text
stringlengths 2
999k
|
|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-25 23:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ChatMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room', models.CharField(max_length=64)),
('message', models.CharField(max_length=1024)),
],
),
]
|
"""Support for French FAI Bouygues Bbox routers."""
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = '192.168.1.254'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
})
def get_scanner(hass, config):
"""Validate the configuration and return a Bbox scanner."""
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple('Device', ['mac', 'name', 'ip', 'last_update'])
class BboxDeviceScanner(DeviceScanner):
"""This class scans for devices connected to the bbox."""
def __init__(self, config):
"""Get host from config."""
from typing import List # noqa: pylint: disable=unused-import
self.host = config[CONF_HOST]
"""Initialize the scanner."""
self.last_results = [] # type: List[Device]
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [result.name for result in self.last_results if
result.mac == device]
if filter_named:
return filter_named[0]
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Check the Bbox for devices.
Returns boolean if scanning successful.
"""
_LOGGER.info("Scanning...")
import pybbox
box = pybbox.Bbox(ip=self.host)
result = box.get_all_connected_devices()
now = dt_util.now()
last_results = []
for device in result:
if device['active'] != 1:
continue
last_results.append(
Device(device['macaddress'], device['hostname'],
device['ipaddress'], now))
self.last_results = last_results
_LOGGER.info("Scan successful")
return True
|
import pytest
from mergesort import mergesort, merge
def test_empty_list_returns_empty_list():
"""Test mergesort on empty list returns same."""
empty = []
assert mergesort(empty) == []
def test_list_with_one_value():
"""Test mergesort on empty list returns same."""
lst = [8]
assert mergesort(lst) == [8]
def test_list_with_two_values():
"""Test mergesort on empty list returns same."""
lst = [8, 3]
assert mergesort(lst) == [3, 8]
def test_list_with_odd_number_of_values():
"""Test odd number of values returns ordered list."""
lst = [8, 3, 7, 9, 5]
assert mergesort(lst) == [3, 5, 7, 8, 9]
def test_list_with_unbalanced_halves():
"""Test list heavy weighted on one half returns ordered list."""
lst = [2, 4, 3, 8, 1, 9, 10, 13]
assert mergesort(lst) == [1, 2, 3, 4, 8, 9, 10, 13]
def test_merge_merges_two_pairs():
"""Test merge function separate of mergesort."""
L = [1, 3, 5]
R = [2, 4, 6]
assert merge(L, R) == [1, 2, 3, 4, 5, 6]
def test_merge_merges_uneven_lists():
L = [1, 3, 5]
R = [2, 4]
assert merge(L, R) == [1, 2, 3, 4, 5]
def test_merge_on_unbalanced_lists():
"""Test list heavy weighted on one half returns ordered list."""
L = [2, 3, 4, 8]
R = [1, 9, 10, 13]
assert merge(L, R) == [1, 2, 3, 4, 8, 9, 10, 13]
|
"""
Implement input sentence encoder.
"""
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
from .config import *
from common.constants import DEVICE
from util.tensor_utils import to_sorted_tensor, to_original_tensor
class Encoder(nn.Module):
"""
Transform embeddings to encoding representations.
"""
def __init__(self, config, input_size, dropout=0.1):
"""
Initialize a GRU encoder.
:param config: configuration, includes total enc size, is bi-direction, etc.
:param input_size: input dimension.
:param dropout: dropout rate for GRU
"""
super(Encoder, self).__init__()
self.config = config
self.layers = config.layers
self.num_directions = 2 if config.brnn else 1
assert config.enc_rnn_size % self.num_directions == 0
self.hidden_size = config.enc_rnn_size // self.num_directions
self.rnn = nn.GRU(
input_size, self.hidden_size,
num_layers=config.layers, dropout=config.dropout,
bidirectional=config.brnn, batch_first=True)
def forward(self, input_emb, lengths, hidden=None):
"""
Given input embeddings and input seq lengths, calculate encoding representations.
:param input_emb: embedding of a batch.
Input shape - [seq_len, batch_size, hidden_dim]
:param lengths: lengths of each sample.
:param hidden: hidden of previous layer. Default None.
:return: encoding of a batch.
Output shape - [unpadded_max_thisbatch_seq_len, batch_size, hidden_dim * num_layers]
TODO: revise code to make input and output shape be [batch, length, dim]
"""
# input_emb shape: [seq_len, batch_size, hidden_dim] [100, 32, 412]
# sorted_emb shape: [seq_len, batch_size, hidden_dim] [100, 32, 412]
sorted_input_emb, sorted_lengths, sorted_idx = to_sorted_tensor(
input_emb, lengths, sort_dim=1, device=DEVICE)
emb = pack(sorted_input_emb, sorted_lengths, batch_first=False)
self.rnn.flatten_parameters()
outputs, hidden_t = self.rnn(emb, hidden)
# hidden_t shape: [num_layers, batch_size, hidden_dim] [2, 32, 256]
# outputs shape: [unpadded_seq_len, batch_size, hidden_dim * num_layers] [79, 32, 512]
# !!! NOTICE: it will unpack to max_unpadded_length.
outputs = unpack(outputs, batch_first=False)[0]
outputs = to_original_tensor(
outputs, sorted_idx, sort_dim=1, device=DEVICE)
return hidden_t, outputs
|
from dublinbus.serializers import UserSerializer
def my_jwt_response_handler(token, user=None, request=None):
''' JWT response handler
Adds a new ‘user’ field with the user’s serialized data when a token is generated
'''
response = UserSerializer(user, context={'request': request}).data
response["token"] = token
return response
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import scipy.stats
from config import ATOL, DEVICES, RTOL
from parameterize import TEST_CASE_NAME, parameterize_cls, place, xrand
paddle.enable_static()
@place(DEVICES)
@parameterize_cls((TEST_CASE_NAME, 'concentration'),
[('test-one-dim', np.random.rand(89) + 5.0)])
class TestDirichlet(unittest.TestCase):
def setUp(self):
self.program = paddle.static.Program()
self.executor = paddle.static.Executor()
with paddle.static.program_guard(self.program):
conc = paddle.static.data('conc', self.concentration.shape,
self.concentration.dtype)
self._paddle_diric = paddle.distribution.Dirichlet(conc)
self.feeds = {'conc': self.concentration}
def test_mean(self):
with paddle.static.program_guard(self.program):
[out] = self.executor.run(self.program,
feed=self.feeds,
fetch_list=[self._paddle_diric.mean])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.mean(self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_variance(self):
with paddle.static.program_guard(self.program):
[out] = self.executor.run(self.program,
feed=self.feeds,
fetch_list=[self._paddle_diric.variance])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.var(self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_prob(self):
with paddle.static.program_guard(self.program):
random_number = np.random.rand(*self.concentration.shape)
random_number = random_number / random_number.sum()
feeds = dict(self.feeds, value=random_number)
value = paddle.static.data('value', random_number.shape,
random_number.dtype)
out = self._paddle_diric.prob(value)
[out] = self.executor.run(self.program,
feed=feeds,
fetch_list=[out])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.pdf(random_number, self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_log_prob(self):
with paddle.static.program_guard(self.program):
random_number = np.random.rand(*self.concentration.shape)
random_number = random_number / random_number.sum()
feeds = dict(self.feeds, value=random_number)
value = paddle.static.data('value', random_number.shape,
random_number.dtype)
out = self._paddle_diric.log_prob(value)
[out] = self.executor.run(self.program,
feed=feeds,
fetch_list=[out])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.logpdf(random_number, self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_entropy(self):
with paddle.static.program_guard(self.program):
[out] = self.executor.run(
self.program,
feed=self.feeds,
fetch_list=[self._paddle_diric.entropy()])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.entropy(self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v2.model.api_key_update_attributes import APIKeyUpdateAttributes
from datadog_api_client.v2.model.api_keys_type import APIKeysType
globals()["APIKeyUpdateAttributes"] = APIKeyUpdateAttributes
globals()["APIKeysType"] = APIKeysType
class APIKeyUpdateData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"attributes": (APIKeyUpdateAttributes,), # noqa: E501
"id": (str,), # noqa: E501
"type": (APIKeysType,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"attributes": "attributes", # noqa: E501
"id": "id", # noqa: E501
"type": "type", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, attributes, id, type, *args, **kwargs): # noqa: E501
"""APIKeyUpdateData - a model defined in OpenAPI
Args:
attributes (APIKeyUpdateAttributes):
id (str): ID of the API key.
type (APIKeysType):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.attributes = attributes
self.id = id
self.type = type
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for iterator_utils.py"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from ..utils import iterator_utils
class IteratorUtilsTest(tf.test.TestCase):
def testGetIterator(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["f e a g", "c c a", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c", "a b", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0], # "f" == unknown, "e" == unknown, a
[2, 0, 3]], # c a eos -- eos is padding
source_v)
self.assertAllEqual([3, 2], src_len_v)
self.assertAllEqual(
[[4, 2, 2], # sos c c
[4, 1, 2]], # sos b c
target_input_v)
self.assertAllEqual(
[[2, 2, 3], # c c eos
[1, 2, 3]], # b c eos
target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 2, 0]], # c c a
source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 0, 1]], # sos a b
target_input_v)
self.assertAllEqual(
[[0, 1, 3]], # a b eos
target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetIteratorWithShard(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "f e a g", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["a b", "c c", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
num_shards=2,
shard_index=1,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0], # "f" == unknown, "e" == unknown, a
[2, 0, 3]], # c a eos -- eos is padding
source_v)
self.assertAllEqual([3, 2], src_len_v)
self.assertAllEqual(
[[4, 2, 2], # sos c c
[4, 1, 2]], # sos b c
target_input_v)
self.assertAllEqual(
[[2, 2, 3], # c c eos
[1, 2, 3]], # b c eos
target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetIteratorWithSkipCount(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c a", "c c a", "d", "f e a g"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["b c", "a b", "", "c c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
skip_count = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
skip_count=skip_count,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer, feed_dict={skip_count: 3})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 2, 2]], # sos c c
target_input_v)
self.assertAllEqual(
[[2, 2, 3]], # c c eos
target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
# Re-init iterator with skip_count=0.
sess.run(iterator.initializer, feed_dict={skip_count: 0})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 0, 3], # c a eos -- eos is padding
[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([2, 3], src_len_v)
self.assertAllEqual(
[[4, 1, 2], # sos b c
[4, 2, 2]], # sos c c
target_input_v)
self.assertAllEqual(
[[1, 2, 3], # b c eos
[2, 2, 3]], # c c eos
target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 2, 0]], # c c a
source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 0, 1]], # sos a b
target_input_v)
self.assertAllEqual(
[[0, 1, 3]], # a b eos
target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetInferIterator(self):
src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "c a", "d", "f e a g"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_infer_iterator(
src_dataset=src_dataset,
src_vocab_table=src_vocab_table,
batch_size=batch_size,
eos=hparams.eos,
src_max_len=src_max_len)
table_initializer = tf.tables_initializer()
source = iterator.source
seq_len = iterator.source_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None], seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual(
[[2, 2, 0], # c c a
[2, 0, 3]], # c a eos
source_v)
self.assertAllEqual([3, 2], seq_len_v)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual(
[[-1, 3, 3], # "d" == unknown, eos eos
[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([1, 3], seq_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run((source, seq_len))
if __name__ == "__main__":
tf.test.main()
|
import os
import matplotlib
import json
from datetime import datetime
from matplotlib import pyplot
def show_results_graph(timer, name=None):
with (open('light_plot.json', 'r')) as f:
data = json.load(f)
with (open('light_plot_imporved.json', 'r')) as f:
data_improved = json.load(f)
os.remove('light_plot.json')
os.remove('light_plot_imporved.json')
x = []
y = []
x_improved = []
y_improved = []
for item in data:
date = datetime.strptime(item['x'], "%Y-%m-%d %H:%M:%S")
x.append(date)
if item['y'] == 1:
y.append(item['y'] + 0.1) # to distinct normal light and improved light states
else:
y.append(item['y'])
for item in data_improved:
date = datetime.strptime(item['x'], "%Y-%m-%d %H:%M:%S")
x_improved.append(date)
y_improved.append(item['y'])
dates_normal = matplotlib.dates.date2num(x)
dates_improved = matplotlib.dates.date2num(x_improved)
matplotlib.pyplot.plot_date(dates_normal, y, 'b-', label="Regular data", linewidth=2)
matplotlib.pyplot.plot_date(dates_improved, y_improved, 'b-', color="red", label="Possible improvement", linewidth=2)
pyplot.title("Compare actual data and possible improvement ({} minutes)".format(timer))
pyplot.legend()
if name:
pyplot.savefig("result.png")
pyplot.show()
|
#
#
#
import unittest, sys
import IfxPy
import config
from testfunctions import IfxPyTestFunctions
class IfxPyTestCase(unittest.TestCase):
def test_148_CallSPDiffBindPattern_01(self):
obj = IfxPyTestFunctions()
obj.assert_expect(self.run_test_148)
def run_test_148(self):
conn = IfxPy.connect(config.ConnStr, config.user, config.password)
if conn:
##### Set up #####
serverinfo = IfxPy.server_info( conn )
server = serverinfo.DBMS_NAME[0:3]
try:
sql = "DROP TABLE sptb"
IfxPy.exec_immediate(conn, sql)
except:
pass
try:
sql = "DROP PROCEDURE sp"
IfxPy.exec_immediate(conn, sql)
except:
pass
sql = "CREATE TABLE sptb (c1 INTEGER, c2 FLOAT, c3 VARCHAR(10), c4 INT8, c5 VARCHAR(20))"
IfxPy.exec_immediate(conn, sql)
sql = "INSERT INTO sptb (c1, c2, c3, c4, c5) VALUES (1, 5.01, 'varchar', 3271982, 'varchar data')"
IfxPy.exec_immediate(conn, sql)
sql = """CREATE PROCEDURE sp(OUT out1 INTEGER, OUT out2 FLOAT, OUT out3 VARCHAR(10), OUT out4 INT8, OUT out5 VARCHAR(20));
SELECT c1, c2, c3, c4, c5 INTO out1, out2, out3, out4, out5 FROM sptb; END PROCEDURE;"""
IfxPy.exec_immediate(conn, sql)
#############################
##### Run the test #####
out1 = 0
out2 = 0.00
out3 = ""
out4 = 0
out5 = ""
stmt, out1, out2, out3, out4, out5 = IfxPy.callproc(conn, 'sp', (out1, out2, out3, out4, out5))
print "out 1:"
print out1
print "out 2:"
print out2
print "out 3:"
print out3
print "out 4:"
print out4
print "out 5:"
print out5
#############################
else:
print "Connection failed."
#__END__
#__IDS_EXPECTED__
#out 1:
#1
#out 2:
#5.01
#out 3:
#varchar
#out 4:
#3271982
#out 5:
#varchar data
|
# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import functools
import queue
from horovod.common.exceptions import HorovodInternalError, HostsUpdatedInterrupt
from horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationManager
notification_manager = WorkerNotificationManager()
class State(object):
"""State representation used for tracking in memory state across workers.
Args:
bcast_object: Function used to broadcast a variable from rank 0 to the other workers.
get_rank: Function that returns the current rank of this worker.
"""
def __init__(self, bcast_object, get_rank):
self._bcast_object = bcast_object
self._rank = get_rank
self._host_messages = queue.Queue()
self._last_updated_timestamp = 0
self._reset_callbacks = []
def register_reset_callbacks(self, callbacks):
"""Register callbacks that will be invoked following a reset event (worker added or removed).
For example, a common use of a reset callback would be to update the learning rate scale with the
new number of workers.
Args:
callbacks: list of functions to execute.
"""
self._reset_callbacks.extend(callbacks)
def on_reset(self):
self._host_messages = queue.Queue()
self.reset()
for callback in self._reset_callbacks:
callback()
def on_hosts_updated(self, timestamp, update_res):
self._host_messages.put((timestamp, update_res))
def commit(self):
"""Commits all modifications to state tracked by this object to host memory.
This call will also check for any changes to known hosts, and raise a `HostsUpdatedInterrupt`
if any were detected.
Because commits are a heavy operation involving data copy (potentially from GPU to host), it is
recommended to consider committing less frequently than once per batch. This allows users to tradeoff
between per-batch execution time and lost training steps in the event of a worker failure.
"""
self.save()
self.check_host_updates()
def check_host_updates(self):
"""Checks that a notification has been sent indicating that hosts can be added or will be removed.
Raises a `HostsUpdatedInterrupt` if such a notification has been received.
"""
# Iterate through the update messages sent from the server. If the update timestamp
# is greater than the last update timestamp, then trigger a HostsUpdatedException.
last_updated_timestamp = prev_timestamp = self._last_updated_timestamp
all_update = HostUpdateResult.no_update
while not self._host_messages.empty():
timestamp, update = self._host_messages.get()
if timestamp > last_updated_timestamp:
last_updated_timestamp = timestamp
all_update |= update
# In order to ensure all workers raise the exception at the same time, we need to sync
# the updated state across all the workers.
# TODO(travis): this should be a max allreduce to account for changes in rank 0
prev_timestamp, self._last_updated_timestamp, all_update = \
self._bcast_object((prev_timestamp, last_updated_timestamp, all_update))
# At this point, updated state is globally consistent across all ranks.
if self._last_updated_timestamp > prev_timestamp:
raise HostsUpdatedInterrupt(all_update == HostUpdateResult.removed)
def save(self):
"""Saves state to host memory."""
raise NotImplementedError()
def restore(self):
"""Restores the last committed state, undoing any uncommitted modifications."""
raise NotImplementedError()
def sync(self):
"""Synchronize state across workers."""
raise NotImplementedError()
def reset(self):
"""Reset objects and variables following a reset event (before synchronization)."""
pass
class ObjectState(State):
"""State for simple Python objects.
Every object is specified as a keyword argument, and will be assigned as an attribute.
Args:
bcast_object: Horovod broadcast object function used to sync state dictionary.
get_rank: Horovod rank function used to identify is this process is the coordinator.
kwargs: Properties to sync, will be exposed as attributes of the object.
"""
def __init__(self, bcast_object, get_rank, **kwargs):
self._bcast_object = bcast_object
self._saved_state = kwargs
self._set_attrs()
super(ObjectState, self).__init__(bcast_object=bcast_object, get_rank=get_rank)
def save(self):
new_state = {}
for attr in self._saved_state.keys():
new_state[attr] = getattr(self, attr)
self._saved_state = new_state
def restore(self):
self._set_attrs()
def sync(self):
if self._saved_state:
self._saved_state = self._bcast_object(self._saved_state)
self._set_attrs()
def _set_attrs(self):
for attr, value in self._saved_state.items():
setattr(self, attr, value)
def run_fn(func, reset):
@functools.wraps(func)
def wrapper(state, *args, **kwargs):
notification_manager.init()
notification_manager.register_listener(state)
skip_sync = False
try:
while True:
try:
if not skip_sync:
state.sync()
return func(state, *args, **kwargs)
except HorovodInternalError:
state.restore()
skip_sync = False
except HostsUpdatedInterrupt as e:
skip_sync = e.skip_sync
reset()
state.on_reset()
finally:
notification_manager.remove_listener(state)
return wrapper
|
import multiprocessing
import random
import time
from pysys.constants import *
from xpybuild.xpybuild_basetest import XpybuildBaseTest
class PySysTest(XpybuildBaseTest):
buildRoot = None # can override this with -XbuildRoot=path to measure your own build
def execute(self):
buildroot = self.buildRoot if self.buildRoot else self.input
assert os.path.isdir(buildroot), self.buildroot
cpus = multiprocessing.cpu_count()
pending = set()
pending.add(1)
pending.add(cpus*1//5)
pending.add(cpus*2//5)
pending.add(cpus*3//5)
pending.add(cpus*4//5)
pending.add(cpus)
#for i in range(1, (cpus)/4 + 1):
# pending.add(i*4)
#pending.add(1)
pending = sorted(p for p in pending if p > 0)
self.log.info('This machine has %d CPUs', cpus)
self.log.info('Planning to run with workers=%s', pending)
random.shuffle(pending) # shuffle to reduce impact of caching; also means if we cycle this test we'll get more useful data
self.bestSoFar = 10000000000
self.bestSoFarWorkers = 1
self.results = {}
starttime = time.time()
def runbuild(workers):
assert workers <= cpus, workers
assert workers > 0
self.log.info('(%d/%d) Building with workers=%d (approx %0.1f hours left)', len(self.results)+1, len(pending), workers,
-1 if (len(self.results)==0) else ( # avoid div by zero on first one
(len(pending)-len(self.results) + 2) # number left; add 2 for possible extra runs
*(time.time()-starttime)/len(self.results) # average time per result
/60.0/60.0 # secs to hours
)
)
t = time.time()
#time.sleep(1)
env = dict(os.environ) if self.buildRoot else None # inherit full parent env for custom builds
self.xpybuild(args=['--workers', str(workers),
'%s=%s'%(getattr(self, 'buildOutputDirProperty', 'OUTPUT_DIR'), self.output+'/output%d'%workers)], buildfile=buildroot+'/root.xpybuild.py', stdouterr='xpybuild-j%d'%workers, timeout=2*60*60, env=env, setOutputDir=False)
t = time.time()-t
self.reportPerformanceResult(t, 'Total build time with %d worker threads'%workers, 's', resultDetails={'workers':workers})
self.results[workers] = t
if t < self.bestSoFar:
self.bestSoFar, self.bestSoFarWorkers = t, workers
self.deletedir(self.output+'/output%d'%workers)
self.log.info('')
for w in pending:
runbuild(w)
# explore slightly more or less than the best to find the optimum, even if not in the pending list
while self.bestSoFarWorkers < cpus and self.bestSoFarWorkers+1 not in self.results:
self.log.info('Best so far is %d; running an extra test for one extra worker', self.bestSoFarWorkers)
runbuild(self.bestSoFarWorkers+1)
while self.bestSoFarWorkers>1 and self.bestSoFarWorkers-1 not in self.results:
self.log.info('Best so far is %d; running an extra test for one less worker', self.bestSoFarWorkers)
runbuild(self.bestSoFarWorkers-1)
for w in sorted(self.results):
self.log.info('Time for % 2d workers: %0.1f', w, self.results[w])
self.log.info('')
self.log.info('Optimum number of workers is %d', self.bestSoFarWorkers)
self.log.info('... which is a multiplier of %0.2f for this %d CPU machine', self.bestSoFarWorkers/float(cpus), cpus)
self.log.info('(for a more accurate result, run with multiple cycles and plot the results .csv in a spreadsheet)')
def validate(self):
pass
|
import json
from os import environ
from pathlib import Path
try:
CONFIG_DIR = Path(environ['XDG_CONFIG_HOME'], __package__)
except KeyError:
CONFIG_DIR = Path.home() / '.config' / __package__
if not CONFIG_DIR.exists():
CONFIG_DIR.mkdir()
CONFIG_FILE = CONFIG_DIR / 'config.json'
with open(CONFIG_FILE) as f:
config = json.load(f)
TOKEN = config['token']
try:
DB_PATH = Path(config['db-path'])
except KeyError:
try:
DB_PATH = Path(environ['XDG_DATA_HOME'], __package__, 'messages.db')
except KeyError:
DB_PATH = Path.home() / '.local/share' / __package__ / 'messages.db'
DB_DIR = DB_PATH.parent
if not DB_DIR.exists():
DB_DIR.mkdir()
DB_URI = f'sqlite:///{DB_PATH}'
|
#!/usr/bin/env python3
import pyglet
import run_demos
import glooey.themes.golden as golden
window = pyglet.window.Window()
gui = golden.Gui(window)
button = golden.BasicButton('Lorem Ipsum')
gui.add(button)
pyglet.app.run()
|
# Copyright 2019 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import sys
import traceback
import pytest
from math import isnan
from hy.models import (HyExpression, HyInteger, HyFloat, HyComplex, HySymbol,
HyString, HyDict, HyList, HySet, HyKeyword)
from hy.lex import tokenize
from hy.lex.exceptions import LexException, PrematureEndOfInput
from hy.errors import hy_exc_handler
def peoi(): return pytest.raises(PrematureEndOfInput)
def lexe(): return pytest.raises(LexException)
def check_ex(execinfo, expected):
output = traceback.format_exception_only(execinfo.type, execinfo.value)
assert output[:-1] == expected[:-1]
# Python 2.7 doesn't give the full exception name, so we compensate.
assert output[-1].endswith(expected[-1])
def check_trace_output(capsys, execinfo, expected):
sys.__excepthook__(execinfo.type, execinfo.value, execinfo.tb)
captured_wo_filtering = capsys.readouterr()[-1].strip('\n')
hy_exc_handler(execinfo.type, execinfo.value, execinfo.tb)
captured_w_filtering = capsys.readouterr()[-1].strip('\n')
output = captured_w_filtering.split('\n')
# Make sure the filtered frames aren't the same as the unfiltered ones.
assert output[:-1] != captured_wo_filtering.split('\n')[:-1]
# Remove the origin frame lines.
assert output[3:-1] == expected[:-1]
# Python 2.7 doesn't give the full exception name, so we compensate.
assert output[-1].endswith(expected[-1])
def test_lex_exception():
""" Ensure tokenize throws a fit on a partial input """
with peoi(): tokenize("(foo")
with peoi(): tokenize("{foo bar")
with peoi(): tokenize("(defn foo [bar]")
with peoi(): tokenize("(foo \"bar")
def test_unbalanced_exception():
"""Ensure the tokenization fails on unbalanced expressions"""
with lexe(): tokenize("(bar))")
with lexe(): tokenize("(baz [quux]])")
def test_lex_single_quote_err():
"Ensure tokenizing \"' \" throws a LexException that can be stringified"
# https://github.com/hylang/hy/issues/1252
with lexe() as execinfo:
tokenize("' ")
check_ex(execinfo, [
' File "<string>", line 1\n',
" '\n",
' ^\n',
'LexException: Could not identify the next token.\n'])
def test_lex_expression_symbols():
""" Make sure that expressions produce symbols """
objs = tokenize("(foo bar)")
assert objs == [HyExpression([HySymbol("foo"), HySymbol("bar")])]
def test_lex_expression_strings():
""" Test that expressions can produce strings """
objs = tokenize("(foo \"bar\")")
assert objs == [HyExpression([HySymbol("foo"), HyString("bar")])]
def test_lex_expression_integer():
""" Make sure expressions can produce integers """
objs = tokenize("(foo 2)")
assert objs == [HyExpression([HySymbol("foo"), HyInteger(2)])]
def test_lex_symbols():
""" Make sure that symbols are valid expressions"""
objs = tokenize("foo ")
assert objs == [HySymbol("foo")]
def test_lex_strings():
""" Make sure that strings are valid expressions"""
objs = tokenize('"foo"')
assert objs == [HyString("foo")]
# Make sure backslash-escaped newlines work (see issue #831)
objs = tokenize(r"""
"a\
bc"
""")
assert objs == [HyString("abc")]
def test_lex_strings_exception():
""" Make sure tokenize throws when codec can't decode some bytes"""
with lexe() as execinfo:
tokenize('\"\\x8\"')
check_ex(execinfo, [
' File "<string>", line 1\n',
' "\\x8"\n',
' ^\n',
'LexException: Can\'t convert "\\x8" to a HyString\n'])
def test_lex_bracket_strings():
objs = tokenize("#[my delim[hello world]my delim]")
assert objs == [HyString("hello world")]
assert objs[0].brackets == "my delim"
objs = tokenize("#[[squid]]")
assert objs == [HyString("squid")]
assert objs[0].brackets == ""
def test_lex_integers():
""" Make sure that integers are valid expressions"""
objs = tokenize("42 ")
assert objs == [HyInteger(42)]
def test_lex_fractions():
""" Make sure that fractions are valid expressions"""
objs = tokenize("1/2")
assert objs == [HyExpression([HySymbol("fraction"), HyInteger(1),
HyInteger(2)])]
def test_lex_expression_float():
""" Make sure expressions can produce floats """
objs = tokenize("(foo 2.)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(2.)])]
objs = tokenize("(foo -0.5)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(-0.5)])]
objs = tokenize("(foo 1.e7)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(1.e7)])]
def test_lex_big_float():
# https://github.com/hylang/hy/issues/1448
assert tokenize("1e900") == [HyFloat(1e900)]
assert tokenize("1e900-1e900j") == [HyComplex(1e900, -1e900)]
def test_lex_nan_and_inf():
assert isnan(tokenize("NaN")[0])
assert tokenize("Nan") == [HySymbol("Nan")]
assert tokenize("nan") == [HySymbol("nan")]
assert tokenize("NAN") == [HySymbol("NAN")]
assert tokenize("Inf") == [HyFloat(float("inf"))]
assert tokenize("inf") == [HySymbol("inf")]
assert tokenize("INF") == [HySymbol("INF")]
assert tokenize("-Inf") == [HyFloat(float("-inf"))]
assert tokenize("-inf") == [HySymbol("-inf")]
assert tokenize("-INF") == [HySymbol("-INF")]
def test_lex_expression_complex():
""" Make sure expressions can produce complex """
def t(x): return tokenize("(foo {})".format(x))
def f(x): return [HyExpression([HySymbol("foo"), x])]
assert t("2.j") == f(HyComplex(2.j))
assert t("-0.5j") == f(HyComplex(-0.5j))
assert t("1.e7j") == f(HyComplex(1e7j))
assert t("j") == f(HySymbol("j"))
assert isnan(t("NaNj")[0][1].imag)
assert t("nanj") == f(HySymbol("nanj"))
assert t("Inf+Infj") == f(HyComplex(complex(float("inf"), float("inf"))))
assert t("Inf-Infj") == f(HyComplex(complex(float("inf"), float("-inf"))))
assert t("Inf-INFj") == f(HySymbol("Inf-INFj"))
def test_lex_digit_separators():
assert tokenize("1_000_000") == [HyInteger(1000000)]
assert tokenize("1,000,000") == [HyInteger(1000000)]
assert tokenize("1,000_000") == [HyInteger(1000000)]
assert tokenize("1_000,000") == [HyInteger(1000000)]
assert tokenize("0x_af") == [HyInteger(0xaf)]
assert tokenize("0x,af") == [HyInteger(0xaf)]
assert tokenize("0b_010") == [HyInteger(0b010)]
assert tokenize("0b,010") == [HyInteger(0b010)]
assert tokenize("0o_373") == [HyInteger(0o373)]
assert tokenize("0o,373") == [HyInteger(0o373)]
assert tokenize('1_2.3,4') == [HyFloat(12.34)]
assert tokenize('1_2e3,4') == [HyFloat(12e34)]
assert (tokenize("1,2/3_4") ==
[HyExpression([HySymbol("fraction"),
HyInteger(12), HyInteger(34)])])
assert tokenize("1,0_00j") == [HyComplex(1000j)]
assert tokenize("1,,,,___,____,,__,,2__,,,__") == [HyInteger(12)]
assert (tokenize("_1,,,,___,____,,__,,2__,,,__") ==
[HySymbol("_1,,,,___,____,,__,,2__,,,__")])
assert (tokenize("1,,,,___,____,,__,,2__,q,__") ==
[HySymbol("1,,,,___,____,,__,,2__,q,__")])
def test_lex_bad_attrs():
with lexe() as execinfo:
tokenize("1.foo")
check_ex(execinfo, [
' File "<string>", line 1\n',
' 1.foo\n',
' ^\n',
'LexException: Cannot access attribute on anything other'
' than a name (in order to get attributes of expressions,'
' use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\n'])
with lexe(): tokenize("0.foo")
with lexe(): tokenize("1.5.foo")
with lexe(): tokenize("1e3.foo")
with lexe(): tokenize("5j.foo")
with lexe(): tokenize("3+5j.foo")
with lexe(): tokenize("3.1+5.1j.foo")
assert tokenize("j.foo")
with lexe(): tokenize("3/4.foo")
assert tokenize("a/1.foo")
assert tokenize("1/a.foo")
with lexe(): tokenize(":hello.foo")
def test_lex_line_counting():
""" Make sure we can count lines / columns """
entry = tokenize("(foo (one two))")[0]
assert entry.start_line == 1
assert entry.start_column == 1
assert entry.end_line == 1
assert entry.end_column == 15
entry = entry[1]
assert entry.start_line == 1
assert entry.start_column == 6
assert entry.end_line == 1
assert entry.end_column == 14
def test_lex_line_counting_multi():
""" Make sure we can do multi-line tokenization """
entries = tokenize("""
(foo (one two))
(foo bar)
""")
entry = entries[0]
assert entry.start_line == 2
assert entry.start_column == 1
assert entry.end_line == 2
assert entry.end_column == 15
entry = entries[1]
assert entry.start_line == 3
assert entry.start_column == 1
assert entry.end_line == 3
assert entry.end_column == 9
def test_lex_line_counting_multi_inner():
""" Make sure we can do multi-line tokenization (inner) """
entry = tokenize("""(foo
bar)""")[0]
inner = entry[0]
assert inner.start_line == 1
assert inner.start_column == 2
inner = entry[1]
assert inner.start_line == 2
assert inner.start_column == 5
def test_dicts():
""" Ensure that we can tokenize a dict. """
objs = tokenize("{foo bar bar baz}")
assert objs == [HyDict(["foo", "bar", "bar", "baz"])]
objs = tokenize("(bar {foo bar bar baz})")
assert objs == [HyExpression([HySymbol("bar"),
HyDict(["foo", "bar",
"bar", "baz"])])]
objs = tokenize("{(foo bar) (baz quux)}")
assert objs == [HyDict([
HyExpression([HySymbol("foo"), HySymbol("bar")]),
HyExpression([HySymbol("baz"), HySymbol("quux")])
])]
def test_sets():
""" Ensure that we can tokenize a set. """
objs = tokenize("#{1 2}")
assert objs == [HySet([HyInteger(1), HyInteger(2)])]
objs = tokenize("(bar #{foo bar baz})")
assert objs == [HyExpression([HySymbol("bar"),
HySet(["foo", "bar", "baz"])])]
objs = tokenize("#{(foo bar) (baz quux)}")
assert objs == [HySet([
HyExpression([HySymbol("foo"), HySymbol("bar")]),
HyExpression([HySymbol("baz"), HySymbol("quux")])
])]
# Duplicate items in a literal set should be okay (and should
# be preserved).
objs = tokenize("#{1 2 1 1 2 1}")
assert objs == [HySet([HyInteger(n) for n in [1, 2, 1, 1, 2, 1]])]
assert len(objs[0]) == 6
# https://github.com/hylang/hy/issues/1120
objs = tokenize("#{a 1}")
assert objs == [HySet([HySymbol("a"), HyInteger(1)])]
def test_nospace():
""" Ensure we can tokenize without spaces if we have to """
entry = tokenize("(foo(one two))")[0]
assert entry.start_line == 1
assert entry.start_column == 1
assert entry.end_line == 1
assert entry.end_column == 14
entry = entry[1]
assert entry.start_line == 1
assert entry.start_column == 5
assert entry.end_line == 1
assert entry.end_column == 13
def test_escapes():
""" Ensure we can escape things """
entry = tokenize(r"""(foo "foo\n")""")[0]
assert entry[1] == "foo\n"
entry = tokenize(r"""(foo r"foo\s")""")[0]
assert entry[1] == r"foo\s"
def test_unicode_escapes():
"""Ensure unicode escapes are handled correctly"""
s = r'"a\xac\u1234\u20ac\U00008000"'
assert len(s) == 29
entry = tokenize(s)[0]
assert len(entry) == 5
assert [ord(x) for x in entry] == [97, 172, 4660, 8364, 32768]
def test_complex():
"""Ensure we tokenize complex numbers properly"""
# This is a regression test for #143
entry = tokenize("(1j)")[0][0]
assert entry == HyComplex("1.0j")
entry = tokenize("(j)")[0][0]
assert entry == HySymbol("j")
def test_tag_macro():
"""Ensure tag macros are handled properly"""
entry = tokenize("#^()")
assert entry[0][0] == HySymbol("dispatch-tag-macro")
assert entry[0][1] == HyString("^")
assert len(entry[0]) == 3
def test_lex_comment_382():
"""Ensure that we can tokenize sources with a comment at the end"""
entry = tokenize("foo ;bar\n;baz")
assert entry == [HySymbol("foo")]
def test_discard():
"""Check that discarded terms are removed properly."""
# empty
assert tokenize("") == []
# single
assert tokenize("#_1") == []
# multiple
assert tokenize("#_1 #_2") == []
assert tokenize("#_1 #_2 #_3") == []
# nested discard
assert tokenize("#_ #_1 2") == []
assert tokenize("#_ #_ #_1 2 3") == []
# trailing
assert tokenize("0") == [0]
assert tokenize("0 #_1") == [0]
assert tokenize("0 #_1 #_2") == [0]
# leading
assert tokenize("2") == [2]
assert tokenize("#_1 2") == [2]
assert tokenize("#_0 #_1 2") == [2]
assert tokenize("#_ #_0 1 2") == [2]
# both
assert tokenize("#_1 2 #_3") == [2]
assert tokenize("#_0 #_1 2 #_ #_3 4") == [2]
# inside
assert tokenize("0 #_1 2") == [0, 2]
assert tokenize("0 #_1 #_2 3") == [0, 3]
assert tokenize("0 #_ #_1 2 3") == [0, 3]
# in HyList
assert tokenize("[]") == [HyList([])]
assert tokenize("[#_1]") == [HyList([])]
assert tokenize("[#_1 #_2]") == [HyList([])]
assert tokenize("[#_ #_1 2]") == [HyList([])]
assert tokenize("[0]") == [HyList([HyInteger(0)])]
assert tokenize("[0 #_1]") == [HyList([HyInteger(0)])]
assert tokenize("[0 #_1 #_2]") == [HyList([HyInteger(0)])]
assert tokenize("[2]") == [HyList([HyInteger(2)])]
assert tokenize("[#_1 2]") == [HyList([HyInteger(2)])]
assert tokenize("[#_0 #_1 2]") == [HyList([HyInteger(2)])]
assert tokenize("[#_ #_0 1 2]") == [HyList([HyInteger(2)])]
# in HySet
assert tokenize("#{}") == [HySet()]
assert tokenize("#{#_1}") == [HySet()]
assert tokenize("#{0 #_1}") == [HySet([HyInteger(0)])]
assert tokenize("#{#_1 0}") == [HySet([HyInteger(0)])]
# in HyDict
assert tokenize("{}") == [HyDict()]
assert tokenize("{#_1}") == [HyDict()]
assert tokenize("{#_0 1 2}") == [HyDict([HyInteger(1), HyInteger(2)])]
assert tokenize("{1 #_0 2}") == [HyDict([HyInteger(1), HyInteger(2)])]
assert tokenize("{1 2 #_0}") == [HyDict([HyInteger(1), HyInteger(2)])]
# in HyExpression
assert tokenize("()") == [HyExpression()]
assert tokenize("(#_foo)") == [HyExpression()]
assert tokenize("(#_foo bar)") == [HyExpression([HySymbol("bar")])]
assert tokenize("(foo #_bar)") == [HyExpression([HySymbol("foo")])]
assert tokenize("(foo :bar 1)") == [HyExpression([HySymbol("foo"), HyKeyword("bar"), HyInteger(1)])]
assert tokenize("(foo #_:bar 1)") == [HyExpression([HySymbol("foo"), HyInteger(1)])]
assert tokenize("(foo :bar #_1)") == [HyExpression([HySymbol("foo"), HyKeyword("bar")])]
# discard term with nesting
assert tokenize("[1 2 #_[a b c [d e [f g] h]] 3 4]") == [
HyList([HyInteger(1), HyInteger(2), HyInteger(3), HyInteger(4)])
]
# discard with other prefix syntax
assert tokenize("a #_'b c") == [HySymbol("a"), HySymbol("c")]
assert tokenize("a '#_b c") == [HySymbol("a"), HyExpression([HySymbol("quote"), HySymbol("c")])]
assert tokenize("a '#_b #_c d") == [HySymbol("a"), HyExpression([HySymbol("quote"), HySymbol("d")])]
assert tokenize("a '#_ #_b c d") == [HySymbol("a"), HyExpression([HySymbol("quote"), HySymbol("d")])]
def test_lex_exception_filtering(capsys):
"""Confirm that the exception filtering works for lexer errors."""
# First, test for PrematureEndOfInput
with peoi() as execinfo:
tokenize(" \n (foo\n \n")
check_trace_output(capsys, execinfo, [
' File "<string>", line 2',
' (foo',
' ^',
'PrematureEndOfInput: Premature end of input'])
# Now, for a generic LexException
with lexe() as execinfo:
tokenize(" \n\n 1.foo ")
check_trace_output(capsys, execinfo, [
' File "<string>", line 3',
' 1.foo',
' ^',
'LexException: Cannot access attribute on anything other'
' than a name (in order to get attributes of expressions,'
' use `(. <expression> <attr>)` or `(.<attr> <expression>)`)'])
|
import os
import metricbeat
import unittest
KUBERNETES_FIELDS = metricbeat.COMMON_FIELDS + ["kubernetes"]
class Test(metricbeat.BaseTest):
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_node(self):
""" Kubernetes kubelet node metricset tests """
self._test_metricset('node', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_system(self):
""" Kubernetes kubelet system metricset tests """
self._test_metricset('system', 2, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_pod(self):
""" Kubernetes kubelet pod metricset tests """
self._test_metricset('pod', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_container(self):
""" Kubernetes kubelet container metricset tests """
self._test_metricset('container', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_node(self):
""" Kubernetes state node metricset tests """
self._test_metricset('state_node', 1, self.get_kube_state_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_pod(self):
""" Kubernetes state pod metricset tests """
self._test_metricset('state_pod', 1, self.get_kube_state_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_container(self):
""" Kubernetes state container metricset tests """
self._test_metricset('state_container', 1, self.get_kube_state_hosts())
def _test_metricset(self, metricset, expected_events, hosts):
self.render_config_template(modules=[{
"name": "kubernetes",
"enabled": "true",
"metricsets": [metricset],
"hosts": hosts,
"period": "5s"
}])
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
# Ensure no errors or warnings exist in the log.
log = self.get_log()
self.assertNotRegexpMatches(log.replace("WARN BETA", ""), "ERR|WARN")
output = self.read_output_json()
self.assertEqual(len(output), expected_events)
evt = output[0]
self.assertItemsEqual(self.de_dot(KUBERNETES_FIELDS), evt.keys(), evt)
self.assert_fields_are_documented(evt)
@classmethod
def get_kubelet_hosts(cls):
return [
"http://" +
os.getenv('KUBELET_HOST', 'localhost') + ':' +
os.getenv('KUBELET_PORT', '10255')
]
@classmethod
def get_kube_state_hosts(cls):
return [
"http://" +
os.getenv('KUBE_STATE_METRICS_HOST', 'localhost') + ':' +
os.getenv('KUBE_STATE_METRICS_PORT', '18080')
]
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.typing as oft
import oneflow._oneflow_internal
from typing import Tuple
@flow.unittest.skip_unless_1n4d()
class TestFunctionInputOutput(flow.unittest.TestCase):
def test_FixedTensorDef(test_case):
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_FixedTensorDef_2_device(test_case):
flow.config.gpu_device_num(2)
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_MirroredTensorDef(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.ListNumpy.Placeholder((2, 5))):
return x
data = np.ones((1, 5), dtype=np.float32)
ndarray_list = Foo([data]).get().numpy_list()
test_case.assertEqual(len(ndarray_list), 1)
test_case.assertTrue(np.allclose(ndarray_list[0], data))
if __name__ == "__main__":
unittest.main()
|
import pytest
pytest.importorskip("foo")
from stories.contrib.sentry.django import ( # FIXME: # isort:skip # pragma: no cover # noqa
DjangoClient,
)
|
import enum
from typing import Any
import click
import pandas as pd
import numpy as np
import structlog
import pathlib
import pydantic
import datetime
import zoltpy.util
from covidactnow.datapublic import common_init, common_df
from scripts import helpers
from covidactnow.datapublic.common_fields import (
GetByValueMixin,
CommonFields,
FieldNameAndCommonField,
)
DATA_ROOT = pathlib.Path(__file__).parent.parent / "data"
_logger = structlog.get_logger(__name__)
class ForecastModel(enum.Enum):
""""""
ENSEMBLE = "COVIDhub-ensemble"
BASELINE = "COVIDhub-baseline"
GOOGLE = "Google_Harvard-CPF"
class Fields(GetByValueMixin, FieldNameAndCommonField, enum.Enum):
MODEL_ABBR = "model_abbr", CommonFields.MODEL_ABBR
REGION = "unit", CommonFields.FIPS
FORECAST_DATE = "forecast_date", CommonFields.FORECAST_DATE
TARGET_DATE = "target_date", CommonFields.DATE
QUANTILE = "quantile", CommonFields.QUANTILE
WEEKLY_NEW_CASES = "case", CommonFields.WEEKLY_NEW_CASES
WEEKLY_NEW_DEATHS = "death", CommonFields.WEEKLY_NEW_DEATHS
class ForecastHubUpdater(pydantic.BaseModel):
"""Updates Forecast Lab Data Set with the Latest Available Forecast
"""
FORECAST_PROJECT_NAME = "COVID-19 Forecasts"
RAW_CSV_FILENAME = "raw.csv"
conn: Any # A valid zoltpy connection
model: ForecastModel # The model to cache from Zoltar
raw_data_root: pathlib.Path
timeseries_output_path: pathlib.Path
@classmethod
def make_with_data_root(
cls, model: ForecastModel, conn: Any, data_root: pathlib.Path,
) -> "ForecastHubUpdater":
return cls(
model=model,
conn=conn,
raw_data_root=data_root / "forecast-hub",
timeseries_output_path=data_root / "forecast-hub" / "timeseries-common.csv",
)
@property
def raw_path(self):
return self.raw_data_root / self.RAW_CSV_FILENAME
def write_version_file(self, forecast_date) -> None:
stamp = datetime.datetime.utcnow().isoformat()
version_path = self.raw_data_root / "version.txt"
with version_path.open("w") as vf:
vf.write(f"Updated on {stamp}\n")
vf.write(f"Using forecast from {forecast_date}\n")
def update_source_data(self):
"""
See https://github.com/reichlab/zoltpy/tree/master for instructions.
Note: Requires environment variables for Z_USERNAME and Z_PASSWORD with correct
permissions.
"""
_logger.info(f"Updating {self.model.name} from ForecastHub")
latest_forecast_date = get_latest_forecast_date(
self.conn, self.FORECAST_PROJECT_NAME, self.model.value
)
# TODO: Save a call to the Forecast Hub by checking if latest_forecast_date is newer than
# the current one saved in version.txt. We expect the cache to be invalidated only once a
# week.
ensemble = zoltpy.util.download_forecast(
self.conn, self.FORECAST_PROJECT_NAME, self.model.value, latest_forecast_date
)
df = zoltpy.util.dataframe_from_json_io_dict(ensemble)
df["forecast_date"] = pd.to_datetime(latest_forecast_date)
df["model_abbr"] = self.model.value
df.to_csv(self.raw_path, index=False)
self.write_version_file(forecast_date=latest_forecast_date)
def load_source_data(self) -> pd.DataFrame:
_logger.info("Updating ForecastHub Ensemble dataset.")
data = pd.read_csv(
self.raw_path, parse_dates=["forecast_date"], dtype={"unit": str}, low_memory=False
)
return data
@staticmethod
def transform(df: pd.DataFrame) -> pd.DataFrame:
df["target_date"] = df.apply(
lambda x: x.forecast_date + pd.Timedelta(weeks=int(x.target.split(" ")[0])),
axis="columns",
)
# The targets have the form "X wk inc/cum cases/deaths"
# Take the final split (death/cases) and use that as target type
df["target_type"] = df.target.str.split(" ").str[-1]
# Take the penultimate split (inc/cum) and use that as aggregation type
df["target_summation"] = df.target.str.split(" ").str[-2]
masks = [
df["unit"] != "US", # Drop the national forecast
df["quantile"].notna(), # Point forecasts are duplicate of quantile = 0.5
df["target_summation"] == "inc", # Only return incidence values
# Some models return both incidence and cumulative values
# Only keep incidence targets (drop cumulative targets)
df["target_date"] <= df["forecast_date"] + pd.Timedelta(weeks=4)
# Time Horizon - Only keep up to 4 week forecasts.
# Almost all forecasts only provide 4 wks.
]
mask = np.logical_and.reduce(masks)
# The raw data is in long form and we need to pivot this to create a column for
# WEEKLY_NEW_CASES and WEEKLY_NEW_DEATHS. "target_type" has either death or cases. "value"
# has the predicted value. The rest of the columns create a unique index. For right now only
# one model and one forecast_date are being served, but we need to maintain the option of
# multiple values.
COLUMNS = [
Fields.MODEL_ABBR,
Fields.REGION,
Fields.FORECAST_DATE,
Fields.TARGET_DATE,
"target_type",
Fields.QUANTILE,
"value",
]
df = df[mask][COLUMNS].copy()
df = df.set_index(
[
Fields.MODEL_ABBR,
Fields.REGION,
Fields.FORECAST_DATE,
Fields.TARGET_DATE,
Fields.QUANTILE,
]
)
pivot = df.pivot(columns="target_type")
pivot = pivot.droplevel(level=0, axis=1).reset_index()
# This cleans up a MultiIndex Column that is an artifact of the pivot in preparation for a
# standard csv dump.
# Rename and remove any columns without a CommonField
data = helpers.rename_fields(pivot, Fields, set(), _logger)
# Need to make the quantiles into a wide form for easier downstream processing
# Mangling the column names into f"weekly_new_{cases/deaths}_{quantile}". This
# would be a good candidate to handle in long/tidy-form and we could remove both pivots.
# Using common_field because this is done after helpers.rename_fields
# TODO(michael): Not sure why pylint is confused about the common_field member not existing.
# pylint: disable=no-member
wide_df = data.set_index(
[
Fields.REGION.common_field,
Fields.TARGET_DATE.common_field,
Fields.MODEL_ABBR.common_field,
Fields.FORECAST_DATE.common_field,
]
).pivot(columns=Fields.QUANTILE.common_field)
# TODO: Once requirements have settled, explicitly pass only the quantiles needed.
wide_df.columns = [x[0] + "_" + str(x[1]) for x in wide_df.columns.to_flat_index()]
wide_df = wide_df.reset_index()
return wide_df
def get_latest_forecast_date(conn, project_name: str, model_abbr: str) -> str:
"""
Return the date string 'YYYY-MM-DD' of the latest submitted forecast for a given model in a
given zoltar project
https://github.com/reichlab/zoltpy/issues/42
Return the str date representation of the latest forecast if available, else the empty string.
"""
project = [project for project in conn.projects if project.name == project_name][0]
model = [model for model in project.models if model.abbreviation == model_abbr][0]
latest_forecast_date = model.latest_forecast.timezero.timezero_date
# Note: model.latest_forecast.timezero.timezero_date is of type datetime.datetime or None
if latest_forecast_date:
_logger.info(f"Latest forecast for {model_abbr} is {latest_forecast_date}")
return str(latest_forecast_date)
else:
_logger.info(f"No forecasts found for {model_abbr} in {project_name}")
return ""
@click.command()
@click.option("--fetch/--no-fetch", default=True)
def main(fetch: bool):
common_init.configure_logging()
connection = zoltpy.util.authenticate()
transformer = ForecastHubUpdater.make_with_data_root(
ForecastModel.ENSEMBLE, connection, DATA_ROOT
)
if fetch:
_logger.info("Fetching new data.")
transformer.update_source_data()
data = transformer.load_source_data()
data = transformer.transform(data)
common_df.write_csv(data, transformer.timeseries_output_path, _logger)
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
|
# Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import Any
from serobj.utils.serobj_calls import SerobjCallsBase
class FilterBase(SerobjCallsBase):
"""
Base class for all pipe filters. Each filter should provide
an `execute` method that returns the score.
"""
_SEROBJ__ATTRS = []
@abstractmethod
def execute(self, data_packet: Any, **kwargs) -> float:
"""
The method that pipe executes before data sending.
:param data_packet: any data object
:return: score (:class:`float`) that contains any user's value.
For many cases you can use [1, 0] values.
"""
raise NotImplementedError
|
"""
Find the first non-repeated character in a string
https://www.codeeval.com/open_challenges/12/
"""
import unittest
def first_unique_character(s):
return
class FirstUniqueCharacterTest(unittest.TestCase):
def test_yellow(self):
self.assertEquals('y', first_unique_character('yellow'))
if __name__ == '__main__':
unittest.main(exit=False)
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: models.py
# Project: tests
# Author: Brian Cherinka
# Created: Friday, 15th February 2019 2:44:13 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2019 Brian Cherinka
# Last Modified: Sunday, 3rd March 2019 4:47:18 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
from sqlalchemy import Column, String, BigInteger, Integer, Float
from .database import Base, engine, Session
import factory
import factory.fuzzy
from pytest_factoryboy import register
class ModelA(Base):
__tablename__ = 'modela'
pk = Column(BigInteger, primary_key=True)
name = Column(String, nullable=False)
x = Column(Integer, nullable=False)
y = Column(Integer, nullable=False)
def __repr__(self):
return f'<ModelA(pk={self.pk},name={self.name},x={self.x},y={self.y})>'
class ModelB(Base):
__tablename__ = 'modelb'
pk = Column(BigInteger, primary_key=True)
z = Column(Float, nullable=False)
def __repr__(self):
return f'<ModelB(pk={self.pk},z={self.z})>'
@register
class ModelAFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ModelA
sqlalchemy_session = Session # the SQLAlchemy session object
pk = factory.Sequence(lambda n: n)
x = factory.Faker('pyint', min_value=0, max_value=20)
y = factory.Faker('pyint', min_value=0, max_value=20)
name = factory.fuzzy.FuzzyText(prefix='model', length=3)
@register
class ModelBFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ModelB
sqlalchemy_session = Session # the SQLAlchemy session object
pk = factory.Sequence(lambda n: n)
z = factory.Faker('pyint', min_value=0, max_value=20)
Base.metadata.create_all(engine)
|
from __future__ import division, absolute_import, print_function
import itertools
import numpy as np
from numpy import exp
from numpy.testing import assert_, assert_equal
from scipy.optimize import root
def test_performance():
# Compare performance results to those listed in
# [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
# and
# [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
# and those produced by dfsane.f from M. Raydan's website.
#
# Where the results disagree, the largest limits are taken.
e_a = 1e-5
e_r = 1e-4
table_1 = [
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors
dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12
]
# Check also scaling invariance
for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],
['cruz', 'cheng']):
for problem in table_1:
n = problem['n']
func = lambda x, n: yscale*problem['F'](x/xscale, n)
args = (n,)
x0 = problem['x0'](n) * xscale
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
sigma_0 = xscale/yscale
with np.errstate(over='ignore'):
sol = root(func, x0, args=args,
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
sigma_0=sigma_0, sigma_eps=sigma_eps,
line_search=line_search),
method='DF-SANE')
err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
fatol, sol.success, sol.nit, sol.nfev])
assert_(sol.success, err_msg)
assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval
assert_(sol.nit <= problem['nit'], err_msg)
assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)
def test_complex():
def func(z):
return z**2 - 1 + 2j
x0 = 2.0j
ftol = 1e-4
sol = root(func, x0, tol=ftol, method='DF-SANE')
assert_(sol.success)
f0 = np.linalg.norm(func(x0))
fx = np.linalg.norm(func(sol.x))
assert_(fx <= ftol*f0)
def test_linear_definite():
# The DF-SANE paper proves convergence for "strongly isolated"
# solutions.
#
# For linear systems F(x) = A x - b = 0, with A positive or
# negative definite, the solution is strongly isolated.
def check_solvability(A, b, line_search='cruz'):
func = lambda x: A.dot(x) - b
xp = np.linalg.solve(A, b)
eps = np.linalg.norm(func(xp)) * 1e3
sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
method='DF-SANE')
assert_(sol.success)
assert_(np.linalg.norm(func(sol.x)) <= eps)
n = 90
# Test linear pos.def. system
np.random.seed(1234)
A = np.arange(n*n).reshape(n, n)
A = A + n*n * np.diag(1 + np.arange(n))
assert_(np.linalg.eigvals(A).min() > 0)
b = np.arange(n) * 1.0
check_solvability(A, b, 'cruz')
check_solvability(A, b, 'cheng')
# Test linear neg.def. system
check_solvability(-A, b, 'cruz')
check_solvability(-A, b, 'cheng')
def test_shape():
def f(x, arg):
return x - arg
for dt in [float, complex]:
x = np.zeros([2,2])
arg = np.ones([2,2], dtype=dt)
sol = root(f, x, args=(arg,), method='DF-SANE')
assert_(sol.success)
assert_equal(sol.x.shape, x.shape)
# Some of the test functions and initial guesses listed in
# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
def F_1(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0] - 1) - 1
g[1:] = i*(exp(x[1:] - 1) - x[1:])
return g
def x0_1(n):
x0 = np.empty([n])
x0.fill(n/(n-1))
return x0
def F_2(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0]) - 1
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
return g
def x0_2(n):
x0 = np.empty([n])
x0.fill(1/n**2)
return x0
def F_4(x, n):
assert_equal(n % 3, 0)
g = np.zeros([n])
# Note: the first line is typoed in some of the references;
# correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
return g
def x0_4(n):
assert_equal(n % 3, 0)
x0 = np.array([-1, 1/2, -1] * (n//3))
return x0
def F_6(x, n):
c = 0.9
mu = (np.arange(1, n+1) - 0.5)/n
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
def x0_6(n):
return np.ones([n])
def F_7(x, n):
assert_equal(n % 3, 0)
def phi(t):
v = 0.5*t - 2
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
v[t >= 2] = (0.5*t + 2)[t >= 2]
return v
g = np.zeros([n])
g[::3] = 1e4 * x[1::3]**2 - 1
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
g[2::3] = phi(x[2::3])
return g
def x0_7(n):
assert_equal(n % 3, 0)
return np.array([1e-3, 18, 1] * (n//3))
def F_9(x, n):
g = np.zeros([n])
i = np.arange(2, n)
g[0] = x[0]**3/3 + x[1]**2/2
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
return g
def x0_9(n):
return np.ones([n])
def F_10(x, n):
return np.log(1 + x) - x/n
def x0_10(n):
return np.ones([n])
|
from django.apps import AppConfig
class CecyrdConfig(AppConfig):
name = 'apps.cecyrd'
verbose_name = 'Evaluación del proveedor'
|
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Hashable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, index as libindex, lib
from pandas._libs.hashtable import duplicated_int64
from pandas._typing import AnyArrayLike, ArrayLike, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_categorical_dtype,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import factorize_from_iterables
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
InvalidIndexError,
_index_shared_docs,
ensure_index,
)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.missing as missing
from pandas.core.sorting import (
get_group_index,
indexer_from_factorized,
lexsort_indexer,
)
from pandas.io.formats.printing import (
format_object_attrs,
format_object_summary,
pprint_thing,
)
if TYPE_CHECKING:
from pandas import Series # noqa:F401
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass="MultiIndex", target_klass="MultiIndex or list of tuples")
)
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype("object") << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
is_lexsorted
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_locs
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`_
for more.
Examples
--------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
"""
_deprecations = Index._deprecations | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
rename = Index.set_names
_tuples = None
sortorder: Optional[int]
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
_set_identity: bool = True,
):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(MultiIndex)
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
if _set_identity:
result._reset_identity()
return result
def _validate_codes(self, level: List, code: List):
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : list
Code to reassign.
level : list
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(
self, codes: Optional[List] = None, levels: Optional[List] = None
):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
Returns
-------
new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > self._lexsort_depth():
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {self._lexsort_depth()}"
)
codes = [
self._validate_codes(level, code) for level, code in zip(levels, codes)
]
new_codes = FrozenList(codes)
return new_codes
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=lib.no_default):
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
elif is_iterator(tuples):
tuples = list(tuples)
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = zip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=lib.no_default):
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
.. versionchanged:: 1.0.0
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
codes = cartesian_product(codes)
return MultiIndex(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df, sortorder=None, names=None):
"""
Make a MultiIndex from a DataFrame.
.. versionadded:: 0.24.0
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@property
def _values(self):
# We override here, since our parent uses _data, which we don't use.
return self.values
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals):
vals = vals._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, "_box_values"):
vals = vals.astype(object)
vals = np.array(vals, copy=False)
values.append(vals)
self._tuples = lib.fast_zip(values)
return self._tuples
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
# overriding the base Index.shape definition to avoid materializing
# the values (GH-27384, GH-27775)
return (len(self),)
def __len__(self) -> int:
return len(self.codes[0])
# --------------------------------------------------------------------
# Levels Methods
@cache_readonly
def levels(self):
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [
x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self, levels, level=None, copy=False, validate=True, verify_integrity=False
):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._shallow_copy() for lev in levels
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels[lev_num] = ensure_index(lev, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
new_codes = self._verify_integrity(levels=new_levels)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning new index.
Parameters
----------
levels : sequence or list of sequence
New level(s) to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'),
(2, 'one'), (2, 'two'),
(3, 'one'), (3, 'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b', 'c'], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
('b', 'two'),
('c', 'one'),
('c', 'two')],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b'], level='bar')
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
(2, 'b'),
(3, 'a'),
(3, 'b')],
names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2)],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
if not inplace:
return idx
@property
def nlevels(self) -> int:
"""
Integer number of levels in this MultiIndex.
"""
return len(self._levels)
@property
def levshape(self):
"""
A tuple with the length of each level.
"""
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self):
return self._codes
def _set_codes(
self, codes, level=None, copy=False, validate=True, verify_integrity=False
):
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes):
lev = self.levels[lev_num]
new_codes[lev_num] = _coerce_indexer_frozen(level_codes, lev, copy=copy)
new_codes = FrozenList(new_codes)
if verify_integrity:
new_codes = self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._tuples = None
self._reset_cache()
def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
"""
Set new codes on MultiIndex. Defaults to returning
new index.
.. versionadded:: 0.24.0
New name for deprecated method `set_labels`.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
verify_integrity : bool (default True)
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level='bar')
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(codes):
raise TypeError("Codes must be list-like")
if is_list_like(codes[0]):
raise TypeError("Codes must be list-like")
level = [level]
codes = [codes]
elif level is None or is_list_like(level):
if not is_list_like(codes) or not is_list_like(codes[0]):
raise TypeError("Codes must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def _constructor(self):
return MultiIndex.from_tuples
@Appender(Index._shallow_copy.__doc__)
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
names = kwargs.pop("names", kwargs.pop("name", self.names))
# discards freq
kwargs.pop("freq", None)
return MultiIndex.from_tuples(values, names=names, **kwargs)
return self.copy(**kwargs)
def _shallow_copy_with_infer(self, values, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(
levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
**kwargs,
)
return self._shallow_copy(values, **kwargs)
# --------------------------------------------------------------------
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
_set_identity=False,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
codes : sequence, optional
deep : bool, default False
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
else:
if levels is None:
levels = self.levels
if codes is None:
codes = self.codes
return MultiIndex(
levels=levels,
codes=codes,
names=names,
sortorder=self.sortorder,
verify_integrity=False,
_set_identity=_set_identity,
)
def __array__(self, dtype=None) -> np.ndarray:
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
def _is_memory_usage_qualified(self) -> bool:
""" return a boolean if we need a qualified .info display """
def f(l):
return "mixed" in l or "string" in l or "unicode" in l
return any(f(l) for l in self._inferred_type_levels)
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
"""
formatter_funcs = [level._formatter_func for level in self.levels]
return tuple(func(val) for func, val in zip(formatter_funcs, tup))
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
return format_object_summary(
self, self._formatter_func, name=name, line_break_each_value=True
)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
return format_object_attrs(self, include_dtype=False)
def _format_native_types(self, na_rep="nan", **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi.values
def format(
self,
space=2,
sparsify=None,
adjoin=True,
names=False,
na_rep=None,
formatter=None,
):
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_1d(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(
pprint_thing(name, escape_chars=("\t", "\r", "\n"))
if name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ""
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthy" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(
result_levels, start=int(names), sentinel=sentinel
)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
# --------------------------------------------------------------------
# Names Methods
def _get_names(self):
return FrozenList(self._names)
def _set_names(self, names, level=None, validate=True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if validate:
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(lev) for lev in level]
# set the name
for lev, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
self._names[lev] = name
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
names = property(
fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n"""
)
# --------------------------------------------------------------------
@Appender(Index._get_grouper_for_level.__doc__)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
else:
# break references back to us so that setting the name
# on the output of a groupby doesn't reflect back here.
level_index = level_index.copy()
if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
else:
grouper = level_index.take(codes)
return grouper, codes, level_index
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
if all(x.is_monotonic for x in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i).values for i in reversed(range(len(self.levels)))
]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep="first"):
shape = map(len, self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError("isna is not defined for MultiIndex")
@Appender(Index.dropna.__doc__)
def dropna(self, how="any"):
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.copy(codes=new_codes, deep=True)
def _get_level_values(self, level, unique=False):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
unique : bool, default False
if True, drop duplicated values
Returns
-------
values : ndarray
"""
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_1d(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@Appender(Index.unique.__doc__)
def unique(self, level=None):
if level is None:
return super().unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True, name=None):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
.. versionadded:: 0.24.0
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{
(level if lvlname is None else lvlname): self._get_level_values(level)
for lvlname, level in zip(idx_names, range(len(self.levels)))
},
copy=False,
)
if index:
result.index = self
return result
def to_flat_index(self):
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
.. versionadded:: 0.24.0
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self.values, tupleize_cols=False)
@property
def is_all_dates(self) -> bool:
return False
def is_lexsorted(self) -> bool:
"""
Return True if the codes are lexicographically sorted.
Returns
-------
bool
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
return self.sortorder
return self._lexsort_depth()
def _lexsort_depth(self) -> int:
"""
Compute and return the lexsort_depth, the number of levels of the
MultiIndex that are sorted lexically
Returns
-------
int
"""
int64_codes = [ensure_int64(level_codes) for level_codes in self.codes]
for k in range(self.nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def _sort_levels_monotonic(self):
"""
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.sort_values()
MultiIndex([('a', 'aa'),
('a', 'bb'),
('b', 'aa'),
('b', 'bb')],
)
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_1d(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self):
"""
Create a new MultiIndex from the current that removes
unused levels, meaning that they are not expressed in the labels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_product([range(2), list('ab')])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
(1, 'a'),
(1, 'b')],
)
>>> mi[2:]
MultiIndex([(1, 'a'),
(1, 'b')],
)
The 0 from the first level is not represented
and can be removed
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
# --------------------------------------------------------------------
# Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(
levels=list(self.levels),
codes=list(self.codes),
sortorder=self.sortorder,
names=list(self.names),
)
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
taken = self._assert_take_fillable(
self.codes,
indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1,
)
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def _assert_take_fillable(
self, values, indices, allow_fill=True, fill_value=None, na_value=None
):
""" Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.codes]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.codes]
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs) -> np.ndarray:
return self.values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
repeats = ensure_platform_int(repeats)
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for MultiIndex operations")
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=object)
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(range(loc.start, loc.stop))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(self, codes, level, errors="raise"):
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
mask = ~algos.isin(self.codes[i], values)
if mask.all() and errors != "ignore":
raise KeyError(f"labels {codes} not found in level")
return self[mask]
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
Returns
-------
MultiIndex
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _get_codes_for_sorting(self):
"""
we categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
def cats(level_codes):
return np.arange(
np.array(level_codes).max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), ordered=True)
for level_codes in self.codes
]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level.
If list-like must be names or ints of levels.
ascending : bool, default True
False to sort in descending order.
Can also be a list to specify a directed ordering.
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
"""
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp, compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "names")
if level is not None:
if method is not None:
raise TypeError("Fill method not supported if level passed")
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop("freq", None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(
target, level, how="right", return_indexers=True, keep_order=False
)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
# --------------------------------------------------------------------
# Indexing Methods
def get_value(self, series, key):
# Label-based
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
try:
loc = self.get_loc(key)
except KeyError:
if is_integer(key):
loc = key
else:
raise
return self._get_values_for_loc(series, loc, key)
def _get_values_for_loc(self, series: "Series", loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
"""
new_values = series._values[loc]
if is_scalar(loc):
return new_values
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
return new_ser.__finalize__(series)
def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError(f"{keyarr[mask]} not in index")
return indexer, keyarr
def _get_partial_string_timestamp_match_key(self, key):
"""
Translate any partial string timestamp matches in key, returning the
new key.
Only relevant for MultiIndex.
"""
# GH#10331
if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(self.levels) - 1))
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if (
isinstance(component, str)
and self.levels[i]._supports_partial_string_indexing
):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
if not self.is_unique:
raise ValueError("Reindexing only valid with uniquely valued Index objects")
if method == "pad" or method == "backfill":
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
indexer = self._engine.get_indexer(target, method, limit)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
else:
indexer = self._engine.get_indexer(target)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super().get_indexer_non_unique(target)
def get_slice_bound(
self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
kind : {'loc', 'getitem'}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound('b', side="left", kind="loc")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(('b','f'), side="right", kind="loc")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self.lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
else:
return start + section.searchsorted(idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels as an integer, slice or
boolean mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
"""
if method is not None:
raise NotImplementedError(
"only the default get_loc method is "
"currently supported for MultiIndex"
)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != "int64":
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, (tuple, list)):
# not including list here breaks some indexing, xref #30892
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (
self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))
)
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=10,
)
loc = np.arange(start, stop, dtype="int64")
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level: bool = True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
"""
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_mi_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [
i for i in range(len(key)) if key[i] != slice(None, None)
]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
try:
return (self._engine.get_loc(key), None)
except KeyError as e:
raise KeyError(key) from e
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._ndarray_values
else:
m = np.zeros(len(codes), dtype=bool)
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(
key.start, key.stop, key.step, kind="loc"
)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
code = self._get_loc_single_level_index(level_index, key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == code, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
i = level_codes.searchsorted(code, side="left")
j = level_codes.searchsorted(code, side="right")
if i == j:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(i, j)
def get_locs(self, seq):
"""
Get location for a sequence of labels.
Parameters
----------
seq : label, slice, list, mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
numpy.ndarray
NumPy array of integers suitable for passing to iloc.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b') # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP
array([2], dtype=int64)
"""
from pandas.core.indexes.numeric import Int64Index
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self.lexsort_depth}"
)
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r):
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError(
"cannot index with a boolean indexer "
"that is not the same length as the "
"index"
)
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k), indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i, indexer=indexer)
)
indexers = idxrs if indexers is None else indexers | idxrs
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
# no matches we are done
return Int64Index([])._ndarray_values
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(
_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)
),
indexer=indexer,
)
else:
# a single label
indexer = _update_indexer(
_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]
),
indexer=indexer,
)
# empty indexer
if indexer is None:
return Int64Index([])._ndarray_values
indexer = self._reorder_indexer(seq, indexer)
return indexer._ndarray_values
def _reorder_indexer(
self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], indexer: ArrayLike
) -> ArrayLike:
"""
Reorder an indexer of a MultiIndex (self) so that the label are in the
same order as given in seq
Parameters
----------
seq : label/slice/list/mask or a sequence of such
indexer: an Int64Index indexer of self
Returns
-------
indexer : a sorted Int64Index indexer of self ordered as seq
"""
# If the index is lexsorted and the list_like label in seq are sorted
# then we do not need to sort
if self.is_lexsorted():
need_sort = False
for i, k in enumerate(seq):
if is_list_like(k):
if not need_sort:
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
n = len(self)
keys: Tuple[np.ndarray, ...] = tuple()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(levels=new_levels, codes=new_codes, verify_integrity=False)
def equals(self, other) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not is_object_dtype(other.dtype):
if self.nlevels != other.nlevels:
return False
other_vals = com.values_from_object(ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(
np.asarray(self.levels[i]._values), self_codes, allow_fill=False
)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values), other_codes, allow_fill=False
)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def union(self, other, sort=None):
"""
Form the union of two MultiIndex objects
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` has length 0.
3. Some values in `self` or `other` cannot be compared.
A RuntimeWarning is issued in this case.
* False : do not sort the result.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
Index
>>> index.union(index2)
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
# TODO: Index.union returns other when `len(self)` is 0.
uniq_tuples = lib.fast_unique_multiple(
[self._ndarray_values, other._ndarray_values], sort=sort
)
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def intersection(self, other, sort=False):
"""
Form the intersection of two MultiIndex objects.
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default False
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default from ``True`` to ``False``, to match
behaviour from before 0.24.0
Returns
-------
Index
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
lvals = self._ndarray_values
rvals = other._ndarray_values
uniq_tuples = None # flag whether _inner_indexer was succesful
if self.is_monotonic and other.is_monotonic:
try:
uniq_tuples = self._inner_indexer(lvals, rvals)[0]
sort = False # uniq_tuples is already sorted
except TypeError:
pass
if uniq_tuples is None:
other_uniq = set(rvals)
seen = set()
uniq_tuples = [
x for x in lvals if x in other_uniq and not (x in seen or seen.add(x))
]
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def difference(self, other, sort=None):
"""
Compute set difference of two MultiIndex objects
Parameters
----------
other : MultiIndex
sort : False or None, default None
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
diff : MultiIndex
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
difference = this.values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, "names"):
if len(other) == 0:
other = MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
verify_integrity=False,
)
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other)
except TypeError as err:
raise TypeError(msg) from err
else:
result_names = self.names if self.names == other.names else None
return other, result_names
# --------------------------------------------------------------------
@Appender(Index.astype.__doc__)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
f"Setting {type(self)} dtype to anything other "
"than object is not supported"
)
elif copy is True:
return self._shallow_copy()
return self
def insert(self, loc: int, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values, names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start: int = 0, sentinel=""):
pivoted = list(zip(*label_list))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return list(zip(*result))
def _get_na_rep(dtype) -> str:
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
def maybe_droplevels(index, key):
"""
Attempt to drop level or levels from the given index.
Parameters
----------
index: Index
key : scalar or tuple
Returns
-------
Index
"""
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array_like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
|
from sql.entity import Entity
class StockItemGroup(Entity):
def __init__(self, data):
super().__init__(data)
self.code = self._get_str('Code') # Primary Key
self.description = self._get_str('Description')
self.is_active = self._get_bool('IsActive')
self.last_modified = self._get_int('LastModified')
|
# -*- coding: utf-8 -*-
"""
utils
~~~~~
"""
import numpy as np
def sizes_to_indices(sizes):
"""Converting sizes to corresponding indices.
Args:
sizes (numpy.dnarray):
An array consist of non-negative number.
Returns:
list{range}:
List the indices.
"""
u_id = np.cumsum(sizes)
l_id = np.insert(u_id[:-1], 0, 0)
return [
np.arange(l, u) for l, u in zip(l_id, u_id)
]
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import qualities
class UnistraIE(InfoExtractor):
_VALID_URL = r'http://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)'
_TESTS = [
{
'url': 'http://utv.unistra.fr/video.php?id_video=154',
'md5': '736f605cfdc96724d55bb543ab3ced24',
'info_dict': {
'id': '154',
'ext': 'mp4',
'title': 'M!ss Yella',
'description': 'md5:104892c71bd48e55d70b902736b81bbf',
},
},
{
'url': 'http://utv.unistra.fr/index.php?id_video=437',
'md5': '1ddddd6cccaae76f622ce29b8779636d',
'info_dict': {
'id': '437',
'ext': 'mp4',
'title': 'Prix Louise Weiss 2014',
'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a',
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
files = set(re.findall(r'file\s*:\s*"([^"]+)"', webpage))
quality = qualities(['SD', 'HD'])
formats = []
for file_path in files:
format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD'
formats.append({
'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path,
'format_id': format_id,
'quality': quality(format_id)
})
title = self._html_search_regex(
r'<title>UTV - (.*?)</', webpage, 'title')
description = self._html_search_regex(
r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL)
thumbnail = self._search_regex(
r'image: "(.*?)"', webpage, 'thumbnail')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats
}
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # reproducibility
sample = " if you want you"
idx2char = list(set(sample)) # index -> char
char2idx = {c: i for i, c in enumerate(idx2char)} # char -> idex
# hyper parameters
dic_size = len(char2idx) # RNN input size (one hot size)
rnn_hidden_size = len(char2idx) # RNN output size
num_classes = len(char2idx) # final output size (RNN or softmax, etc.)
batch_size = 1 # one sample data, one batch
sequence_length = len(sample) - 1 # number of lstm rollings (unit #)
sample_idx = [char2idx[c] for c in sample] # char to index
x_data = [sample_idx[:-1]] # X data sample (0 ~ n-1) hello: hell
y_data = [sample_idx[1:]] # Y label sample (1 ~ n) hello: ello
X = tf.placeholder(tf.int32, [None, sequence_length]) # X data
Y = tf.placeholder(tf.int32, [None, sequence_length]) # Y label
x_one_hot = tf.one_hot(X, num_classes) # one hot: 1 -> 0 1 0 0 0 0 0 0 0 0
cell = tf.contrib.rnn.BasicLSTMCell(
num_units=rnn_hidden_size, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
outputs, _states = tf.nn.dynamic_rnn(
cell, x_one_hot, initial_state=initial_state, dtype=tf.float32)
weights = tf.ones([batch_size, sequence_length])
sequence_loss = tf.contrib.seq2seq.sequence_loss(
logits=outputs, targets=Y, weights=weights)
loss = tf.reduce_mean(sequence_loss)
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)
prediction = tf.argmax(outputs, axis=2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(3000):
l, _ = sess.run([loss, train], feed_dict={X: x_data, Y: y_data})
result = sess.run(prediction, feed_dict={X: x_data})
# print char using dic
result_str = [idx2char[c] for c in np.squeeze(result)]
print(i, "loss:", l, "Prediction:", ''.join(result_str))
'''
0 loss: 2.29895 Prediction: nnuffuunnuuuyuy
1 loss: 2.29675 Prediction: nnuffuunnuuuyuy
2 loss: 2.29459 Prediction: nnuffuunnuuuyuy
3 loss: 2.29247 Prediction: nnuffuunnuuuyuy
...
1413 loss: 1.3745 Prediction: if you want you
1414 loss: 1.3743 Prediction: if you want you
1415 loss: 1.3741 Prediction: if you want you
1416 loss: 1.3739 Prediction: if you want you
1417 loss: 1.3737 Prediction: if you want you
1418 loss: 1.37351 Prediction: if you want you
1419 loss: 1.37331 Prediction: if you want you
'''
|
import numpy as np
import random
import copy
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
from model_ddpg import Actor, Critic
from replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
BUFFER_SIZE = int(1e6) # replay buffer size
START_SIZE = 1024 # when to start training
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-3 # learning rate of the actor
LR_CRITIC = 1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
TRAIN_EVERY = 5 # how often to train a batch
TRAIN_STEPS = 3 # how many training steps when a batch is trained
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, num_agents, state_size, action_size, random_seed, use_per=False):
"""Initialize an Agent object.
Params
======
num_agents (int): number of agents
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
use_per (bool): whether to use prioritized replay buffer
"""
self.num_agents = num_agents
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.use_per = use_per
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Replay memory
if use_per:
self.memory = PrioritizedReplayBuffer(BUFFER_SIZE, BATCH_SIZE)
else:
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, random_seed)
# Initialize time step
self.t_step = 0
def get_critic_Q(self, states, actions, rewards, next_states, dones, gamma, is_train=True):
# Get max predicted Q values (for next states) from target model
if is_train:
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
else:
self.actor_local.eval()
self.actor_target.eval()
self.critic_local.eval()
self.critic_target.eval()
with torch.no_grad():
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
self.actor_local.train()
self.actor_target.train()
self.critic_local.train()
self.critic_target.train()
return Q_expected, Q_targets
def step(self, states, actions, rewards, next_states, dones):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
if self.use_per:
# Convert numpy array to torch tensor
states = torch.from_numpy(states).float().to(device)
actions = torch.from_numpy(actions).float().to(device)
rewards = torch.from_numpy(np.array(rewards)).float().unsqueeze(1).to(device)
next_states = torch.from_numpy(next_states).float().to(device)
dones = torch.from_numpy(np.array(dones).astype(np.uint8)).float().unsqueeze(1).to(device)
# Get max predicted Q values (for next states) from target model
Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, GAMMA, is_train=False)
# Convert torch tensor to numpy array
states = states.cpu().data.numpy()
actions = actions.cpu().data.numpy()
rewards = rewards.cpu().data.numpy().squeeze(1).tolist()
next_states = next_states.cpu().data.numpy()
dones = dones.cpu().data.numpy().squeeze(1).astype(np.bool).tolist()
# Calculate error
errors = Q_expected - Q_targets
errors = errors.cpu().data.numpy().squeeze(1)
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i], errors[i])
else:
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i])
# Update time step
self.t_step += 1
# If enough samples are available in memory,
if len(self.memory) >= START_SIZE:
# Get random subset and learn every TRAIN_EVERY time steps,
if self.t_step % TRAIN_EVERY == 0:
for _ in range(TRAIN_STEPS):
if self.use_per:
experiences, idx_tree, is_weight = self.memory.sample()
self.learn(experiences, GAMMA, idx_tree, is_weight)
else:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, states, add_noise=True):
"""Returns epsilon-greedy actions for given state as per current policy."""
states = torch.from_numpy(states).float().to(device)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(states).cpu().data.numpy()
self.actor_local.train()
if add_noise:
actions += np.concatenate([np.expand_dims(self.noise.sample(), axis=0) for _ in range(self.num_agents)], axis=0)
return np.clip(actions, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma, idx_tree=None, is_weight=None):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, gamma, is_train=True)
# Compute critic loss
if self.use_per:
assert ((is_weight is not None) and (is_weight.size > 0))
is_weight = torch.from_numpy(is_weight).float().to(device)
critic_loss = (is_weight * F.smooth_l1_loss(Q_expected, Q_targets, reduction='none').squeeze()).mean()
else:
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
# torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) # use gradient norm clipping
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
# update priority
if self.use_per:
assert((idx_tree is not None) and (len(idx_tree) > 0))
errors = Q_expected - Q_targets
errors = errors.cpu().data.numpy().squeeze()
for i in range(self.memory.batch_size):
self.memory.update(idx_tree[i], errors[i])
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment functions."""
from builtins import range
import ast
import functools
import os
import re
import six
import socket
import sys
import yaml
try:
from shlex import quote
except ImportError:
from pipes import quote
# Tools supporting customization of options via ADDITIONAL_{TOOL_NAME}_OPTIONS.
# FIXME: Support ADDITIONAL_UBSAN_OPTIONS and ADDITIONAL_LSAN_OPTIONS in an
# ASAN instrumented build.
SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS = [
'HWASAN', 'ASAN', 'CFI', 'MSAN', 'TSAN', 'UBSAN'
]
SANITIZER_NAME_MAP = {
'ASAN': 'address',
'CFI': 'cfi',
'MSAN': 'memory',
'TSAN': 'thread',
'UBSAN': 'undefined',
}
COMMON_SANITIZER_OPTIONS = {
'handle_abort': 1,
'handle_segv': 1,
'handle_sigbus': 1,
'handle_sigfpe': 1,
'handle_sigill': 1,
'print_summary': 1,
'use_sigaltstack': 1,
}
def _eval_value(value_string):
"""Returns evaluated value."""
try:
return ast.literal_eval(value_string)
except:
# String fallback.
return value_string
def join_memory_tool_options(options):
"""Joins a dict holding memory tool options into a string that can be set in
the environment."""
return ':'.join('%s=%s' % (key, str(value))
for key, value in sorted(six.iteritems(options)))
def _maybe_convert_to_int(value):
"""Returns the int representation contained by string |value| if it contains
one. Otherwise returns |value|."""
try:
return int(value)
except ValueError:
return value
# Matches anything that isn't an unquoted (ie: not between two single or two
# double quotes) colon.
UNQUOTED_COLON_REGEX = re.compile('((?:[^\'":]|\'[^\']*\'|"[^"]*")+)')
def _parse_memory_tool_options(options_str):
"""Parses memory tool options into a dict."""
parsed = {}
for item in UNQUOTED_COLON_REGEX.split(options_str):
# Regex split can give us empty strings at the beginning and the end. Skip
# these.
if not item:
continue
# Regex split gives us each ':'. Skip these.
if item == ':':
continue
values = item.split('=', 1)
if len(values) != 2:
# TODO(mbarbella): Factor this out of environment, and switch to logging
# an error and continuing. This error should be recoverable.
raise ValueError('Invalid memory tool option "%s"' % item)
option_name = values[0]
option_value = _maybe_convert_to_int(values[1])
parsed[option_name] = option_value
return parsed
def _quote_value_if_needed(value):
"""Quote environment value as needed for certain platforms like Windows."""
result = value
bot_platform = platform()
if bot_platform == 'WINDOWS':
result = '"%s"' % result
return result
def copy():
"""Return a safe copy of the environment."""
environment_copy = os.environ.copy()
return environment_copy
def get_asan_options(redzone_size, malloc_context_size, quarantine_size_mb,
bot_platform, leaks):
"""Generates default ASAN options."""
asan_options = {}
# Default options needed for all cases.
asan_options['alloc_dealloc_mismatch'] = 0
asan_options['print_scariness'] = 1
asan_options['strict_memcmp'] = 0
# Set provided redzone size.
if redzone_size:
asan_options['redzone'] = redzone_size
# This value is used in determining whether to report OOM crashes or not.
set_value('REDZONE', redzone_size)
# Set maximum number of stack frames to report.
if malloc_context_size:
asan_options['malloc_context_size'] = malloc_context_size
# Set quarantine size.
if quarantine_size_mb:
asan_options['quarantine_size_mb'] = quarantine_size_mb
# Test for leaks if this is an LSan-enabled job type.
if get_value('LSAN') and leaks:
lsan_options = join_memory_tool_options(get_lsan_options())
set_value('LSAN_OPTIONS', lsan_options)
asan_options['detect_leaks'] = 1
else:
remove_key('LSAN_OPTIONS')
asan_options['detect_leaks'] = 0
# FIXME: Support container overflow on Android.
if bot_platform == 'ANDROID':
asan_options['detect_container_overflow'] = 0
# Enable stack use-after-return.
asan_options['detect_stack_use_after_return'] = 1
asan_options['max_uar_stack_size_log'] = 16
# Other less important default options for all cases.
asan_options.update({
'allocator_may_return_null': 1,
'allow_user_segv_handler': 0,
'check_malloc_usable_size': 0,
'detect_odr_violation': 0,
'fast_unwind_on_fatal': 1,
'print_suppressions': 0,
})
# Add common sanitizer options.
asan_options.update(COMMON_SANITIZER_OPTIONS)
# FIXME: For Windows, rely on online symbolization since llvm-symbolizer.exe
# in build archive does not work.
asan_options['symbolize'] = int(bot_platform == 'WINDOWS')
# For Android, allow user defined segv handler to work.
if bot_platform == 'ANDROID':
asan_options['allow_user_segv_handler'] = 1
# Check if UBSAN is enabled as well for this ASAN build.
# If yes, set UBSAN_OPTIONS and enable suppressions.
if get_value('UBSAN'):
ubsan_options = get_ubsan_options()
# Remove |symbolize| explicitly to avoid overridding ASan defaults.
ubsan_options.pop('symbolize', None)
set_value('UBSAN_OPTIONS', join_memory_tool_options(ubsan_options))
return asan_options
def get_cpu_arch():
"""Return cpu architecture."""
if platform() == 'ANDROID':
# FIXME: Handle this import in a cleaner way.
from platforms import android
return android.settings.get_cpu_arch()
# FIXME: Add support for desktop architectures as needed.
return None
def get_current_memory_tool_var():
"""Get the environment variable name for the current job type's sanitizer."""
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
return memory_tool_name + '_OPTIONS'
def get_memory_tool_options(env_var, default_value=None):
"""Get the current memory tool options as a dict. Returns |default_value| if
|env_var| isn't set. Otherwise returns a dictionary containing the memory tool
options and their values."""
env_value = get_value(env_var)
if env_value is not None:
return _parse_memory_tool_options(env_value)
return default_value
def get_instrumented_libraries_paths():
"""Get the instrumented libraries path for the current sanitizer."""
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
if memory_tool_name == 'MSAN':
if 'no-origins' in get_value('BUILD_URL', ''):
memory_tool_name += '_NO_ORIGINS'
else:
memory_tool_name += '_CHAINED'
paths = get_value('INSTRUMENTED_LIBRARIES_PATHS_' + memory_tool_name)
if not paths:
return None
return paths.split(':')
def get_default_tool_path(tool_name):
"""Get the default tool for this platform (from scripts/ dir)."""
if platform().lower() == 'android':
# For android devices, we do symbolization on the host machine, which is
# linux. So, we use the linux version of llvm-symbolizer.
platform_override = 'linux'
else:
# No override needed, use default.
platform_override = None
tool_filename = get_executable_filename(tool_name)
tool_path = os.path.join(
get_platform_resources_directory(platform_override), tool_filename)
return tool_path
def get_environment_settings_as_string():
"""Return environment settings as a string. Includes settings for memory
debugging tools (e.g. ASAN_OPTIONS for ASAN), application binary revision,
application command line, etc."""
environment_string = ''
# Add Android specific variables.
if platform() == 'ANDROID':
# FIXME: Handle this import in a cleaner way.
from platforms import android
environment_string += '[Environment] Build fingerprint: %s\n' % (
get_value('BUILD_FINGERPRINT'))
environment_string += ('[Environment] Patch level: %s\n' %
android.settings.get_security_patch_level())
environment_string += (
'[Environment] Local properties file "%s" with contents:\n%s\n' %
(android.device.LOCAL_PROP_PATH,
android.adb.read_data_from_file(android.device.LOCAL_PROP_PATH)))
command_line = get_value('COMMAND_LINE_PATH')
if command_line:
environment_string += (
'[Environment] Command line file "%s" with contents:\n%s\n' %
(command_line, android.adb.read_data_from_file(command_line)))
asan_options = get_value('ASAN_OPTIONS')
if asan_options:
# FIXME: Need better documentation for Chrome builds. Chrome builds use
# asan_device_setup.sh and we send this options file path as an include
# to extra-options parameter.
sanitizer_options_file_path = (
android.sanitizer.get_options_file_path('ASAN'))
environment_string += (
'[Environment] ASAN options file "%s" with contents:\n%s\n' %
(sanitizer_options_file_path, asan_options))
else:
# For desktop platforms, add |*_OPTIONS| variables from environment.
for sanitizer_option in get_sanitizer_options_for_display():
environment_string += '[Environment] %s\n' % sanitizer_option
return environment_string
def get_sanitizer_options_for_display():
"""Return a list of sanitizer options with quoted values."""
result = []
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
options_variable = tool + '_OPTIONS'
options_value = os.getenv(options_variable)
if not options_value:
continue
result.append('{options_variable}="{options_value}"'.format(
options_variable=options_variable, options_value=quote(options_value)))
return result
def get_llvm_symbolizer_path():
"""Get the path of the llvm-symbolizer binary."""
llvm_symbolizer_path = get_value('LLVM_SYMBOLIZER_PATH')
# Use default llvm symbolizer for the following:
# 1. If we don't have |LLVM_SYMBOLIZER_PATH| env variable set.
# 2. If this build is deleted, then our own llvm symbolizer.
if not llvm_symbolizer_path or not os.path.exists(llvm_symbolizer_path):
llvm_symbolizer_path = get_default_tool_path('llvm-symbolizer')
# Make sure that llvm symbolizer binary exists.
if not os.path.exists(llvm_symbolizer_path):
return None
# Make sure that llvm symbolizer binary is executable.
os.chmod(llvm_symbolizer_path, 0o750)
return llvm_symbolizer_path
def get_root_directory():
"""Return root directory."""
return get_value('ROOT_DIR')
def get_startup_scripts_directory():
"""Return path to startup scripts."""
return os.path.join(get_value('ROOT_DIR'), 'src', 'python', 'bot', 'startup')
def get_config_directory():
"""Return the path to the configs directory."""
config_dir = get_value('CONFIG_DIR_OVERRIDE')
if config_dir:
return config_dir
if is_running_on_app_engine():
# Root is already src/appengine.
return 'config'
# Running on bot, give path to config folder inside appengine dir.
return os.path.join(get_root_directory(), 'src', 'appengine', 'config')
def get_gae_config_directory():
"""Return the path to the google appengine configs directory."""
return os.path.join(get_config_directory(), 'gae')
def get_gce_config_directory():
"""Return the path to the google compute engine configs directory."""
return os.path.join(get_config_directory(), 'gce')
def get_resources_directory():
"""Return the path to the resources directory."""
return os.path.join(get_root_directory(), 'resources')
def get_platform_resources_directory(platform_override=None):
"""Return the path to platform-specific resources directory."""
return os.path.join(get_resources_directory(), 'platform',
platform_override or platform().lower())
def get_suppressions_directory():
"""Return the path to the suppressions directory."""
return os.path.join(get_config_directory(), 'suppressions')
def get_suppressions_file(sanitizer, suffix='suppressions'):
"""Return the path to sanitizer suppressions file, if exists."""
sanitizer_suppressions_filename = '{sanitizer}_{suffix}.txt'.format(
sanitizer=sanitizer, suffix=suffix)
sanitizer_suppressions_file_path = os.path.join(
get_suppressions_directory(), sanitizer_suppressions_filename)
if not os.path.exists(sanitizer_suppressions_file_path):
return None
if not os.path.getsize(sanitizer_suppressions_file_path):
return None
return sanitizer_suppressions_file_path
def get_lsan_options():
"""Generates default LSAN options."""
lsan_suppressions_path = get_suppressions_file('lsan')
lsan_options = {
'print_suppressions': 0,
}
# Add common sanitizer options.
lsan_options.update(COMMON_SANITIZER_OPTIONS)
if lsan_suppressions_path:
lsan_options['suppressions'] = lsan_suppressions_path
return lsan_options
def get_msan_options():
"""Generates default MSAN options."""
msan_options = {'symbolize': 0}
# Add common sanitizer options.
msan_options.update(COMMON_SANITIZER_OPTIONS)
return msan_options
def get_platform_id():
"""Return a platform id as a lowercase string."""
bot_platform = platform()
if bot_platform == 'ANDROID':
# FIXME: Handle this import in a cleaner way.
from platforms import android
platform_id = get_value('PLATFORM_ID', android.settings.get_platform_id())
return platform_id.lower()
return bot_platform.lower()
def get_platform_group():
"""Return the platform group (specified via QUEUE_OVERRIDE) if it
exists, otherwise platform()."""
platform_group = get_value('QUEUE_OVERRIDE')
if platform_group:
return platform_group
return platform()
def get_memory_tool_name(job_name):
"""Figures out name of memory debugging tool."""
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
if tool_matches(tool, job_name):
return tool
# If no tool specified, assume it is ASAN. Also takes care of LSAN job type.
return 'ASAN'
def get_memory_tool_display_string(job_name):
"""Return memory tool string for a testcase."""
memory_tool_name = get_memory_tool_name(job_name)
sanitizer_name = SANITIZER_NAME_MAP.get(memory_tool_name)
if not sanitizer_name:
return 'Memory Tool: %s' % memory_tool_name
return 'Sanitizer: %s (%s)' % (sanitizer_name, memory_tool_name)
def get_executable_filename(executable_name):
"""Return the filename for the given executable."""
if platform() != 'WINDOWS':
return executable_name
extension = '.exe'
if executable_name.endswith(extension):
return executable_name
return executable_name + extension
def get_tsan_options():
"""Generates default TSAN options."""
tsan_suppressions_path = get_suppressions_file('tsan')
tsan_options = {
'atexit_sleep_ms': 200,
'flush_memory_ms': 2000,
'history_size': 3,
'print_suppressions': 0,
'report_thread_leaks': 0,
'report_signal_unsafe': 0,
'stack_trace_format': 'DEFAULT',
'symbolize': 1,
}
# Add common sanitizer options.
tsan_options.update(COMMON_SANITIZER_OPTIONS)
if tsan_suppressions_path:
tsan_options['suppressions'] = tsan_suppressions_path
return tsan_options
def get_ubsan_options():
"""Generates default UBSAN options."""
# Note that UBSAN can work together with ASAN as well.
ubsan_suppressions_path = get_suppressions_file('ubsan')
ubsan_options = {
'halt_on_error': 1,
'print_stacktrace': 1,
'print_suppressions': 0,
# We use -fsanitize=unsigned-integer-overflow as an additional coverage
# signal and do not want those errors to be reported by UBSan as bugs.
# See https://github.com/google/oss-fuzz/issues/910 for additional info.
'silence_unsigned_overflow': 1,
'symbolize': 1,
}
# Add common sanitizer options.
ubsan_options.update(COMMON_SANITIZER_OPTIONS)
# TODO(crbug.com/877070): Make this code configurable on a per job basis.
if ubsan_suppressions_path and not is_chromeos_system_job():
ubsan_options['suppressions'] = ubsan_suppressions_path
return ubsan_options
def get_value(environment_variable, default_value=None):
"""Return an environment variable value."""
value_string = os.getenv(environment_variable)
# value_string will be None if the variable is not defined.
if value_string is None:
return default_value
# Exception for ANDROID_SERIAL. Sometimes serial can be just numbers,
# so we don't want to it eval it.
if environment_variable == 'ANDROID_SERIAL':
return value_string
# Evaluate the value of the environment variable with string fallback.
return _eval_value(value_string)
def _job_substring_match(search_string, job_name):
"""Return a bool on whether a string exists in a provided job name or
use from environment if available (case insensitive)."""
job_name = job_name or get_value('JOB_NAME')
if not job_name:
return False
return search_string in job_name.lower()
def is_afl_job(job_name=None):
"""Return true if the current job uses AFL."""
# Prefix matching is not sufficient.
return _job_substring_match('afl', job_name)
def is_chromeos_job(job_name=None):
"""Return True if the current job is for ChromeOS."""
return _job_substring_match('chromeos', job_name)
def is_chromeos_system_job(job_name=None):
"""Return True if the current job is for ChromeOS system (i.e. not libFuzzer
or entire Chrome browser for Chrome on ChromeOS)."""
return is_chromeos_job(job_name) and get_value('CHROMEOS_SYSTEM')
def is_libfuzzer_job(job_name=None):
"""Return true if the current job uses libFuzzer."""
# Prefix matching is not sufficient.
return _job_substring_match('libfuzzer', job_name)
def is_engine_fuzzer_job(job_name=None):
"""Return if true is this is an engine fuzzer."""
return is_afl_job(job_name) or is_libfuzzer_job(job_name)
def is_posix():
"""Return true if we are on a posix platform (linux/unix and mac os)."""
return os.name == 'posix'
def is_trusted_host(ensure_connected=True):
"""Return whether or not the current bot is a trusted host."""
return get_value('TRUSTED_HOST') and (not ensure_connected or
get_value('WORKER_BOT_NAME'))
def is_untrusted_worker():
"""Return whether or not the current bot is an untrusted worker."""
return get_value('UNTRUSTED_WORKER')
def is_running_on_app_engine():
"""Return True if we are running on appengine (local or production)."""
return (is_running_on_app_engine_development() or
os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/'))
def is_running_on_app_engine_development():
"""Return True if running on the local development appengine server."""
return os.getenv('SERVER_SOFTWARE', '').startswith('Development/')
def parse_environment_definition(environment_string):
"""Parses a job's environment definition."""
if not environment_string:
return {}
definitions = [environment_string.splitlines()]
values = {}
for definition in definitions:
for line in definition:
if line.startswith('#') or not line.strip():
continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
key = m.group(1).strip()
value = m.group(2).strip()
values[key] = value
return values
def platform():
"""Return the operating system type, unless an override is provided."""
environment_override = get_value('OS_OVERRIDE')
if environment_override:
return environment_override.upper()
if sys.platform.startswith('win'):
return 'WINDOWS'
elif sys.platform.startswith('linux'):
return 'LINUX'
elif sys.platform == 'darwin':
return 'MAC'
raise ValueError('Unsupported platform "%s".' % sys.platform)
def remove_key(key_name):
"""Remove environment |key| and its associated value."""
if not key_name:
return
if key_name not in os.environ:
return
del os.environ[key_name]
# Used by reset_environment to store the initial environment.
_initial_environment = None
def reset_environment():
"""Resets environment variables to their initial state. Saves the initial
state on first call."""
global _initial_environment
if _initial_environment is None:
_initial_environment = copy()
# There is nothing to reset if we are initializing for the first time.
else:
# Clean current environment.
os.environ.clear()
# Add shared variables with values from _initial_environment.
os.environ.update(_initial_environment)
if is_trusted_host():
from bot.untrusted_runner import environment as untrusted_env
untrusted_env.reset_environment()
def set_common_environment_variables():
"""Sets environment variables common for different memory debugging tools."""
# G_SLICE = always-malloc: make glib use system malloc.
# NSS_DISABLE_UNLOAD = 1: make nss skip dlclosing dynamically loaded modules,
# which would result in "obj:*" in backtraces.
# NSS_DISABLE_ARENA_FREE_LIST = 1: make nss use system malloc.
set_value('G_SLICE', 'always-malloc')
set_value('NSS_DISABLE_UNLOAD', 1)
set_value('NSS_DISABLE_ARENA_FREE_LIST', 1)
set_value('NACL_DANGEROUS_SKIP_QUALIFICATION_TEST', 1)
def set_memory_tool_options(env_var, options_dict):
"""Set current memory tool options."""
set_value(env_var, join_memory_tool_options(options_dict))
def set_environment_parameters_from_file(file_path):
"""Set environment variables from a file."""
if not os.path.exists(file_path):
return
with open(file_path, 'r') as f:
file_data = f.read()
for line in file_data.splitlines():
if line.startswith('#') or not line.strip():
continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
environment_variable = m.group(1)
environment_variable_value = m.group(2)
set_value(environment_variable, environment_variable_value)
def reset_current_memory_tool_options(redzone_size=0,
malloc_context_size=0,
leaks=True,
symbolize_inline_frames=False,
quarantine_size_mb=None):
"""Resets environment variables for memory debugging tool to default
values."""
# FIXME: Handle these imports in a cleaner way.
from platforms import android
# Set common environment variable useful for memory debugging tools.
set_common_environment_variables()
# Set memory tool name in our environment for easy access.
job_name = get_value('JOB_NAME')
tool_name = get_memory_tool_name(job_name)
set_value('MEMORY_TOOL', tool_name)
bot_platform = platform()
# Default options for memory debuggin tool used.
if tool_name in ['ASAN', 'HWASAN']:
tool_options = get_asan_options(redzone_size, malloc_context_size,
quarantine_size_mb, bot_platform, leaks)
elif tool_name == 'MSAN':
tool_options = get_msan_options()
elif tool_name == 'TSAN':
tool_options = get_tsan_options()
elif tool_name in ['UBSAN', 'CFI']:
tool_options = get_ubsan_options()
# Additional options. These override the defaults.
additional_tool_options = get_value('ADDITIONAL_%s_OPTIONS' % tool_name)
if additional_tool_options:
tool_options.update(_parse_memory_tool_options(additional_tool_options))
if tool_options.get('symbolize') == 1:
if 'external_symbolizer_path' not in tool_options:
llvm_symbolizer_path_arg = _quote_value_if_needed(
get_llvm_symbolizer_path())
tool_options.update({
'external_symbolizer_path': llvm_symbolizer_path_arg
})
if 'symbolize_inline_frames' not in tool_options:
tool_options.update({
'symbolize_inline_frames': str(symbolize_inline_frames).lower()
})
# Join the options.
joined_tool_options = join_memory_tool_options(tool_options)
tool_options_variable_name = '%s_OPTIONS' % tool_name
set_value(tool_options_variable_name, joined_tool_options)
# CFI handles various signals through the UBSan runtime, so need to set
# UBSAN_OPTIONS explicitly. See crbug.com/716235#c25
if tool_name == 'CFI':
set_value('UBSAN_OPTIONS', joined_tool_options)
# For Android, we need to set shell property |asan.options|.
# For engine-based uzzers, it is not needed as options variable is directly
# passed to shell.
if bot_platform == 'ANDROID' and not is_engine_fuzzer_job():
android.sanitizer.set_options(tool_name, joined_tool_options)
def set_default_vars():
"""Set default environment vars and values."""
env_file_path = os.path.join(get_value('ROOT_DIR'), 'bot', 'env.yaml')
with open(env_file_path) as file_handle:
env_file_contents = file_handle.read()
env_vars_and_values = yaml.safe_load(env_file_contents)
for variable, value in six.iteritems(env_vars_and_values):
# We cannot call set_value here.
os.environ[variable] = str(value)
def set_bot_environment():
"""Set environment for the bots."""
root_dir = get_value('ROOT_DIR')
if not root_dir:
# Error, bail out.
return False
# Reset our current working directory. Our's last job might
# have left us in a non-existent temp directory.
# Or ROOT_DIR might be deleted and recreated.
os.chdir(root_dir)
# Set some default directories. These can be overriden by config files below.
bot_dir = os.path.join(root_dir, 'bot')
if is_trusted_host(ensure_connected=False):
worker_root_dir = os.environ['WORKER_ROOT_DIR']
os.environ['BUILDS_DIR'] = os.path.join(worker_root_dir, 'bot', 'builds')
else:
os.environ['BUILDS_DIR'] = os.path.join(bot_dir, 'builds')
os.environ['BUILD_URLS_DIR'] = os.path.join(bot_dir, 'build-urls')
os.environ['LOG_DIR'] = os.path.join(bot_dir, 'logs')
os.environ['CACHE_DIR'] = os.path.join(bot_dir, 'cache')
inputs_dir = os.path.join(bot_dir, 'inputs')
os.environ['INPUT_DIR'] = inputs_dir
os.environ['CRASH_STACKTRACES_DIR'] = os.path.join(inputs_dir, 'crash-stacks')
os.environ['FUZZERS_DIR'] = os.path.join(inputs_dir, 'fuzzers')
os.environ['DATA_BUNDLES_DIR'] = os.path.join(inputs_dir, 'data-bundles')
os.environ['FUZZ_INPUTS'] = os.path.join(inputs_dir, 'fuzzer-testcases')
os.environ['FUZZ_INPUTS_MEMORY'] = os.environ['FUZZ_INPUTS']
os.environ['FUZZ_INPUTS_DISK'] = os.path.join(inputs_dir,
'fuzzer-testcases-disk')
os.environ['MUTATOR_PLUGINS_DIR'] = os.path.join(inputs_dir,
'mutator-plugins')
os.environ['FUZZ_DATA'] = os.path.join(inputs_dir,
'fuzzer-common-data-bundles')
os.environ['IMAGES_DIR'] = os.path.join(inputs_dir, 'images')
os.environ['SYMBOLS_DIR'] = os.path.join(inputs_dir, 'symbols')
os.environ['USER_PROFILE_ROOT_DIR'] = os.path.join(inputs_dir,
'user-profile-dirs')
# Set bot name.
if not get_value('BOT_NAME'):
# If not defined, default to host name.
os.environ['BOT_NAME'] = socket.gethostname().lower()
# Set BOT_TMPDIR if not already set.
if not get_value('BOT_TMPDIR'):
os.environ['BOT_TMPDIR'] = os.path.join(bot_dir, 'tmp')
# Add common environment variables needed by Bazel test runner.
# See https://docs.bazel.build/versions/master/test-encyclopedia.html.
os.environ['TEST_TMPDIR'] = get_value('BOT_TMPDIR')
# Sets the default configuration. Can be overridden by job environment.
set_default_vars()
# Set environment variable from local project configuration.
from config import local_config
local_config.ProjectConfig().set_environment()
# Success.
return True
def set_tsan_max_history_size():
"""Sets maximum history size for TSAN tool."""
tsan_options = get_value('TSAN_OPTIONS')
if not tsan_options:
return
tsan_max_history_size = 7
for i in range(tsan_max_history_size):
tsan_options = (
tsan_options.replace('history_size=%d' % i,
'history_size=%d' % tsan_max_history_size))
set_value('TSAN_OPTIONS', tsan_options)
def set_value(environment_variable, value):
"""Set an environment variable."""
value_str = str(value)
environment_variable_str = str(environment_variable)
value_str = value_str.replace('%ROOT_DIR%', os.environ['ROOT_DIR'])
os.environ[environment_variable_str] = value_str
if is_trusted_host():
from bot.untrusted_runner import environment as untrusted_env
untrusted_env.forward_environment_variable(environment_variable_str,
value_str)
def tool_matches(tool_name, job_name):
"""Return if the memory debugging tool is used in this job."""
match_prefix = '(.*[^a-zA-Z]|^)%s'
matches_tool = re.match(match_prefix % tool_name.lower(), job_name.lower())
return bool(matches_tool)
def appengine_noop(func):
"""Wrap a function into no-op and return None if running on App Engine."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if is_running_on_app_engine():
return None
return func(*args, **kwargs)
return wrapper
def bot_noop(func):
"""Wrap a function into no-op and return None if running on bot."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
is_bot = not is_running_on_app_engine()
if is_bot:
return None
return func(*args, **kwargs)
return wrapper
def is_local_development():
"""Return true if running in local development environment (e.g. running
a bot locally, excludes tests)."""
return bool(get_value('LOCAL_DEVELOPMENT') and not get_value('PY_UNITTESTS'))
def local_noop(func):
"""Wrap a function into no-op and return None if running in local
development environment."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (is_local_development() or is_running_on_app_engine_development()):
return None
return func(*args, **kwargs)
return wrapper
def is_ephemeral():
"""Return whether or not we are an ephemeral bot."""
return get_value('EPHEMERAL')
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import copy
import gast
from collections import defaultdict
from paddle.fluid import unique_name
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import generate_name_node
from paddle.fluid.dygraph.dygraph_to_static.utils import get_constant_variable_node
from paddle.fluid.dygraph.dygraph_to_static.utils import get_attribute_full_name
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable_gast_node
__all__ = ['LoopTransformer', 'NameVisitor']
WHILE_CONDITION_PREFIX = 'while_condition'
WHILE_BODY_PREFIX = 'while_body'
FOR_CONDITION_PREFIX = 'for_loop_condition'
FOR_BODY_PREFIX = 'for_loop_body'
def create_while_node(condition_name, body_name, loop_var_names):
while_args = []
while_args.append(
gast.Name(
id=condition_name,
ctx=gast.Param(),
annotation=None,
type_comment=None))
while_args.append(
gast.Name(
id=body_name, ctx=gast.Param(), annotation=None, type_comment=None))
assign_targets = [
gast.Name(
id=var_name, ctx=gast.Param(), annotation=None, type_comment=None)
for var_name in loop_var_names
]
while_args.append(gast.List(elts=assign_targets, ctx=gast.Param()))
while_func_id = gast.parse('fluid.layers.while_loop').body[0].value
while_node = gast.Call(func=while_func_id, args=while_args, keywords=[])
assign_node = gast.Assign(
targets=[gast.Tuple(
elts=assign_targets, ctx=gast.Store())],
value=while_node)
return assign_node
class LogicalOpTransformer(gast.NodeTransformer):
"""
Transform python boolean op into Paddle logical op
"""
def __init__(self, node):
self.root = node
def transform(self):
return self.visit(self.root)
def visit_UnaryOp(self, node):
self.generic_visit(node)
if isinstance(node.op, gast.Not):
arg = ast_to_source_code(node.operand)
new_node_str = "fluid.layers.logical_not({})".format(arg)
# gast.parse returns Module(body=[expr(value=...)])
new_node = gast.parse(new_node_str).body[0].value
return new_node
return node
def visit_BoolOp(self, node):
self.generic_visit(node)
if isinstance(node.op, gast.And):
new_node = self._create_bool_op_node(node.values, 'and')
elif isinstance(node.op, gast.Or):
new_node = self._create_bool_op_node(node.values, 'or')
else:
raise TypeError(
"Only supports and/or syntax in control flow if statement.")
return new_node
def _create_bool_op_node(self, nodes, api_type):
assert len(
nodes
) > 1, "The length of BoolOp should be at least 2, but received {}.".format(
len(nodes))
if len(nodes) > 2:
# Creates logic_and/logic_or node recursively.
pre_assign_node = self._create_bool_op_node(nodes[:2], api_type)
nodes = [pre_assign_node] + nodes[2:]
args = [ast_to_source_code(child) for child in nodes]
new_node_str = "fluid.layers.logical_{}(x={}, y={})".format(
api_type, args[0], args[1])
# gast.parse return Module(body=[expr(...)])
new_node = gast.parse(new_node_str).body[0].value
return new_node
class NameVisitor(gast.NodeVisitor):
'''
Analysis name liveness for loop transformer
'''
def __init__(self, root_node):
# Set of gast.Name or gast.Attribute for variables
self.current_seen_vars = set()
# List of gast.While/gast.For nodes
self.current_loop = []
# Mapping from gast.While/gast.For to variable nodes
self.before_loop_body_vars = defaultdict(set)
self.in_loop_vars = defaultdict(set)
self.static_analysis_visitor = StaticAnalysisVisitor(root_node)
self.node_to_wrapper_map = self.static_analysis_visitor.get_node_to_wrapper_map(
)
self.visit(root_node)
def is_control_flow_loop(self, node):
# TODO: make a better condition
return True
def get_loop_var_names(self, node):
assert isinstance(
node, (gast.While, gast.For)), "Input node is not gast loop node"
loop_var_names = set()
create_var_names = set()
read_context = {type(gast.Load()), type(gast.AugLoad())}
in_loop_vars = self.in_loop_vars[node]
in_loop_name_strs = self._var_nodes_to_names(in_loop_vars)
before_loop_body_vars = self.before_loop_body_vars[node]
before_loop_name_strs = self._var_nodes_to_names(before_loop_body_vars)
after_loop_vars = self.current_seen_vars - before_loop_body_vars - in_loop_vars
after_loop_name_strs = self._var_nodes_to_names(after_loop_vars,
read_context)
for name in in_loop_name_strs:
if name in before_loop_name_strs:
# If a variable is used in loop and created before loop, it
# should be in loop_var as input
loop_var_names.add(name)
elif name in after_loop_name_strs:
# If a variable is created in the while loop and read after
# loop, it should be in loop_var and we should create it
loop_var_names.add(name)
create_var_names.add(name)
return loop_var_names, create_var_names
def visit_Name(self, node):
if self._is_call_func_name_node(node):
self.generic_visit(node)
return
if node.id == "False" or node.id == "True":
self.generic_visit(node)
return
self.current_seen_vars.add(node)
for loop_node in self.current_loop:
self.in_loop_vars[loop_node].add(node)
self.generic_visit(node)
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
ret = visitor(node)
return ret
def visit_Attribute(self, node):
if self._is_call_func_name_node(node):
return
attr_full_name = get_attribute_full_name(node)
self.current_seen_vars.add(node)
for loop_node in self.current_loop:
self.in_loop_vars[loop_node].add(node)
# sub-nodes are visited during get_attribute_full_name and we shouldn't
# visit again
def visit_For(self, node):
self.current_loop.append(node)
self.visit(node.target)
self.before_loop_body_vars[node] = copy.copy(self.current_seen_vars)
self.generic_visit(node)
self.current_loop.pop()
def visit_While(self, node):
self.current_loop.append(node)
self.visit(node.test)
self.before_loop_body_vars[node] = copy.copy(self.current_seen_vars)
self.generic_visit(node)
self.current_loop.pop()
def _var_nodes_to_names(self, node_set, ctx_filter_set=None):
ret = set()
for node in node_set:
if ctx_filter_set is None or type(node.ctx) in ctx_filter_set:
if isinstance(node, gast.Name):
ret.add(node.id)
elif isinstance(node, gast.Attribute):
ret.add(get_attribute_full_name(node))
return ret
def _is_call_func_name_node(self, node):
parent_node = self.node_to_wrapper_map[node].parent.node
if isinstance(parent_node, gast.Call) and parent_node.func == node:
return True
return False
class LoopTransformer(gast.NodeTransformer):
"""
This class transforms python while/for statement into Static Graph Ast
"""
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Input non-AstNodeWrapper node for the initialization of WhileTransformer."
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
self.name_visitor = NameVisitor(self.root)
def transform(self):
self.visit(self.root)
def visit(self, node):
self.generic_visit(node)
# All parent nodes that may contain gast.While/gast.For
if hasattr(node, 'body'):
self.replace_stmt_list(node.body)
if hasattr(node, 'orelse'):
self.replace_stmt_list(node.orelse)
return node
def replace_stmt_list(self, body_list):
if not isinstance(body_list, list):
return
i = 0
while i < len(body_list):
if isinstance(body_list[i], gast.While):
new_stmts = self.get_while_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
elif isinstance(body_list[i], gast.For):
new_stmts = self.get_for_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
else:
i += 1
def get_for_range_node(self, node):
if not isinstance(node.iter, gast.Call):
return None
if not isinstance(node.iter.func, gast.Name):
return None
if node.iter.func.id != "range":
return None
return node.iter
def get_for_args_stmts(self, iter_name, args_list):
'''
Returns 3 gast stmt nodes for argument.
1. Initailize of iterate variable
2. Condition for the loop
3. Statement for changing of iterate variable during the loop
NOTE(TODO): Python allows to access iteration variable after loop, such
as "for i in range(10)" will create i = 9 after the loop. But using
current conversion will make i = 10. We should find a way to change it
'''
len_range_args = len(args_list)
assert len_range_args >= 1 and len_range_args <= 3, "range() function takes 1 to 3 arguments"
if len_range_args == 1:
init_stmt = get_constant_variable_node(iter_name, 0)
else:
init_stmt = gast.Assign(
targets=[
gast.Name(
id=iter_name,
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=args_list[0])
range_max_node = args_list[0] if len_range_args == 1 else args_list[1]
step_node = args_list[2] if len_range_args == 3 else gast.Constant(
value=1, kind=None)
cond_stmt = gast.Compare(
left=gast.BinOp(
left=gast.Name(
id=iter_name,
ctx=gast.Load(),
annotation=None,
type_comment=None),
op=gast.Add(),
right=step_node),
ops=[gast.LtE()],
comparators=[range_max_node])
change_stmt = gast.AugAssign(
target=gast.Name(
id=iter_name,
ctx=gast.Store(),
annotation=None,
type_comment=None),
op=gast.Add(),
value=step_node)
return init_stmt, cond_stmt, change_stmt
def get_for_stmt_nodes(self, node):
# TODO: consider for - else in python
if not self.name_visitor.is_control_flow_loop(node):
return [node]
# TODO: support non-range case
range_call_node = self.get_for_range_node(node)
if range_call_node is None:
return [node]
if not isinstance(node.target, gast.Name):
return [node]
iter_var_name = node.target.id
init_stmt, cond_stmt, change_stmt = self.get_for_args_stmts(
iter_var_name, range_call_node.args)
loop_var_names, create_var_names = self.name_visitor.get_loop_var_names(
node)
new_stmts = []
# Python can create variable in loop and use it out of loop, E.g.
#
# for x in range(10):
# y += x
# print(x) # x = 10
#
# We need to create static variable for those variables
for name in create_var_names:
new_stmts.append(create_static_variable_gast_node(name))
new_stmts.append(init_stmt)
# for x in range(10) in dygraph should be convert into static tensor + 1 <= 10
for name in loop_var_names:
new_stmts.append(to_static_variable_gast_node(name))
condition_func_node = gast.FunctionDef(
name=unique_name.generate(FOR_CONDITION_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=[gast.Return(value=cond_stmt)],
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(condition_func_node)
new_body = node.body
new_body.append(change_stmt)
new_body.append(
gast.Return(value=generate_name_node(
loop_var_names, ctx=gast.Load())))
body_func_node = gast.FunctionDef(
name=unique_name.generate(FOR_BODY_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=new_body,
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(body_func_node)
while_loop_node = create_while_node(condition_func_node.name,
body_func_node.name, loop_var_names)
new_stmts.append(while_loop_node)
return new_stmts
def get_while_stmt_nodes(self, node):
# TODO: consider while - else in python
if not self.name_visitor.is_control_flow_loop(node):
return [node]
loop_var_names, create_var_names = self.name_visitor.get_loop_var_names(
node)
new_stmts = []
# Python can create variable in loop and use it out of loop, E.g.
#
# while x < 10:
# x += 1
# y = x
# z = y
#
# We need to create static variable for those variables
for name in create_var_names:
new_stmts.append(create_static_variable_gast_node(name))
# while x < 10 in dygraph should be convert into static tensor < 10
for name in loop_var_names:
new_stmts.append(to_static_variable_gast_node(name))
logical_op_transformer = LogicalOpTransformer(node.test)
cond_value_node = logical_op_transformer.transform()
condition_func_node = gast.FunctionDef(
name=unique_name.generate(WHILE_CONDITION_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=[gast.Return(value=cond_value_node)],
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(condition_func_node)
new_body = node.body
new_body.append(
gast.Return(value=generate_name_node(
loop_var_names, ctx=gast.Load())))
body_func_node = gast.FunctionDef(
name=unique_name.generate(WHILE_BODY_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=new_body,
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(body_func_node)
while_loop_node = create_while_node(condition_func_node.name,
body_func_node.name, loop_var_names)
new_stmts.append(while_loop_node)
return new_stmts
|
"""
Test module for NotFollowerTwFriend model
"""
from django.test import TestCase
from ..models import NotFollowerTwFriend
# Create your tests here.
class NotFollowerTwFriendTestCase(TestCase):
"""
Test class for NotFollowerTwFriend model
"""
def setUp(self):
NotFollowerTwFriend.objects.create(
id_str='123456789',
screen_name='tw_user',
name='Twitter User',
created_at='Mon Jan 01 00:00:00 +0000 2018'
)
def test_create_not_follower_tw_friend(self):
self.assertEqual(NotFollowerTwFriend.objects.count(), 1)
self.assertEqual(NotFollowerTwFriend.objects.get().id_str, '123456789')
self.assertEqual(NotFollowerTwFriend.objects.get().screen_name, 'tw_user')
self.assertEqual(NotFollowerTwFriend.objects.get().name, 'Twitter User')
self.assertEqual(NotFollowerTwFriend.objects.get().description, '')
self.assertEqual(NotFollowerTwFriend.objects.get().statuses_count, 0)
self.assertEqual(NotFollowerTwFriend.objects.get().followers_count, 0)
self.assertEqual(NotFollowerTwFriend.objects.get().friends_count, 0)
self.assertEqual(
NotFollowerTwFriend.objects.get().created_at, 'Mon Jan 01 00:00:00 +0000 2018')
self.assertEqual(NotFollowerTwFriend.objects.get().location, '')
self.assertEqual(NotFollowerTwFriend.objects.get().avg_tweetsperday, 0.00)
self.assertEqual(NotFollowerTwFriend.objects.get().tff_ratio, 0.00)
self.assertEqual(NotFollowerTwFriend.objects.get().need_unfollow, True)
|
from flask import url_for
from app.questionnaire.rules import evaluate_skip_conditions
from app.templating.summary.question import Question
class Block:
def __init__(self, block_schema, group_id, answer_store, metadata, schema, group_instance):
self.id = block_schema['id']
self.title = block_schema.get('title')
self.number = block_schema.get('number')
self.link = self._build_link(block_schema, group_id, metadata, group_instance)
self.questions = self._build_questions(block_schema, answer_store, metadata, schema, group_instance)
@staticmethod
def _build_link(block_schema, group_id, metadata, group_instance):
return url_for('questionnaire.get_block',
eq_id=metadata['eq_id'],
form_type=metadata['form_type'],
collection_id=metadata['collection_exercise_sid'],
group_id=group_id,
group_instance=group_instance,
block_id=block_schema['id'])
@staticmethod
def _build_questions(block_schema, answer_store, metadata, schema, group_instance):
questions = []
for question_schema in block_schema.get('questions', []):
is_skipped = evaluate_skip_conditions(question_schema.get('skip_conditions'), schema, metadata, answer_store)
if not is_skipped:
question = Question(question_schema, answer_store, metadata, schema, group_instance).serialize()
questions.append(question)
return questions
def serialize(self):
return {
'id': self.id,
'title': self.title,
'number': self.number,
'link': self.link,
'questions': self.questions,
}
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Author Cleoner S. Pietralonga
# e-mail: cleonerp@gmail.com
# Apache License
from cmath import *
from logicqubit.hilbert import *
"""
In this class, the numerical definition of operators is performed,
and the quantum gates methods performs the tensor product with the matrices in the correct order.
It is necessary to enter the qubit id as an input parameter.
"""
class Gates(Hilbert):
def __init__(self, number_of_qubits=1):
self.__number_of_qubits = number_of_qubits
# basic matrices for the generation of operators
# .......................................
def ID(self):
M = Matrix([[1, 0], [0, 1]], self.getCuda())
return M
def P0(self):
M = Matrix([[1, 0], [0, 0]], self.getCuda()) # |0><0|
return M
def P1(self):
M = Matrix([[0, 0], [0, 1]], self.getCuda()) # |1><1|
return M
def L0(self):
M = Matrix([[0, 1], [0, 0]], self.getCuda()) # |0><1|
return M
def L1(self):
M = Matrix([[0, 0], [1, 0]], self.getCuda()) # |1><0|
return M
# One qubit gates
# input parameters: target
# .......................................
def X(self, target=1):
M = Matrix([[0, 1], [1, 0]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def Y(self, target=1):
M = Matrix([[0, -1j], [1j, 0]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def Z(self, target=1):
M = Matrix([[1, 0], [0, -1]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def V(self, target=1, adjoint=False):
M = Matrix([[1, -1j], [-1j, 1]], self.getCuda()) * ((1j + 1) / 2) # sqrt(X) ou sqrt(NOT)
if adjoint:
M = M.adjoint()
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def S(self, target=1, adjoint=False):
M = Matrix([[1, 0], [0, 1j]], self.getCuda()) # sqrt(Z)
if adjoint:
M = M.adjoint()
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def T(self, target=1, adjoint=False):
M = Matrix([[1, 0], [0, (1 + 1j) / sqrt(2)]], self.getCuda()) # sqrt(S)
if adjoint:
M = M.adjoint()
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def H(self, target=1):
M = Matrix([[1, 1], [1, -1]], self.getCuda()) * (1 / sqrt(2))
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U(self, target, *argv): # U or theta, phi and _lambda
if len(argv) == 1:
M = Matrix(argv[0][0], self.getCuda())
else:
theta = argv[0]
phi = argv[1]
_lambda = argv[2]
M = Matrix(
[[exp(-1j * (phi + _lambda) / 2) * cos(theta / 2), -exp(-1j * (phi - _lambda) / 2) * sin(theta / 2)],
[exp(-1j * (phi - _lambda) / 2) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]],
self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U3(self, target, theta, phi, _lambda):
M = Matrix([[cos(theta / 2), -exp(1j * _lambda) * sin(theta / 2)],
[exp(1j * phi) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U2(self, target, phi, _lambda):
M = Matrix([[1, -exp(1j * _lambda)], [exp(1j * phi), exp(1j * (phi + _lambda))]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U1(self, target, _lambda):
M = Matrix([[1, 0], [0, exp(1j * _lambda)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def RX(self, target, theta):
M = Matrix([[cos(theta / 2), -1j * sin(theta / 2)],
[-1j * sin(theta / 2), cos(theta / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def RY(self, target, theta):
M = Matrix([[cos(theta / 2), -sin(theta / 2)],
[sin(theta / 2), cos(theta / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def RZ(self, target, phi):
M = Matrix([[exp(-1j * phi / 2), 0], [0, exp(1j * phi / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
# Two qubit gates
# input parameters: control and target
# .......................................
def CX(self, control, target):
M = Matrix([[0, 1], [1, 0]], self.getCuda()) # X
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CNOT(self, control, target):
return self.CX(control, target)
def CY(self, control, target):
M = Matrix([[0, -1j], [1j, 0]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CZ(self, control, target):
M = Matrix([[1, 0], [0, -1]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CV(self, control, target, adjoint=False):
M = Matrix([[1, -1j], [-1j, 1]], self.getCuda()) * ((1j + 1) / 2) # sqrt(X) ou sqrt(NOT)
if adjoint:
M = M.adjoint()
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CS(self, control, target, adjoint=False):
M = Matrix([[1, 0], [0, 1j]], self.getCuda()) # sqrt(Z)
if adjoint:
M = M.adjoint()
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CT(self, control, target, adjoint=False):
M = Matrix([[1, 0], [0, (1 + 1j) / sqrt(2)]], self.getCuda()) # sqrt(S)
if adjoint:
M = M.adjoint()
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CRX(self, control, target, theta):
M = Matrix([[cos(theta / 2), -1j * sin(theta / 2)],
[-1j * sin(theta / 2), cos(theta / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CRY(self, control, target, theta):
M = Matrix([[cos(theta / 2), -sin(theta / 2)],
[sin(theta / 2), cos(theta / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CRZ(self, control, target, phi):
M = Matrix([[exp(-1j * phi / 2), 0], [0, exp(1j * phi / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
# generic controlled gate
def CU(self, control, target, *argv): # U or theta, phi and _lambda
if len(argv) == 1:
M = Matrix(argv[0][0], self.getCuda())
else:
theta = argv[0]
phi = argv[1]
_lambda = argv[2]
M = Matrix(
[[exp(-1j * (phi + _lambda) / 2) * cos(theta / 2), -exp(-1j * (phi - _lambda) / 2) * sin(theta / 2)],
[exp(1j * (phi - _lambda) / 2) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]],
self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU3(self, control, target, theta, phi, _lambda):
M = Matrix([[cos(theta / 2), -exp(1j * _lambda) * sin(theta / 2)],
[exp(1j * phi) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU2(self, control, target, phi, _lambda):
M = Matrix([[1, -exp(1j * _lambda)], [exp(1j * phi), exp(1j * (phi + _lambda))]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU1(self, control, target, _lambda):
M = Matrix([[1, 0], [0, exp(1j * _lambda)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
# performs a state change of two qubits
def SWAP(self, target1, target2):
list1, list2, list3, list4 = self.getOrdListSWAP(target1, target2)
operator = self.kronProduct(list1) + self.kronProduct(list2) + self.kronProduct(list3) + self.kronProduct(list4)
return operator
# Three qubit gates, which perform and record the operation
# input parameters: control1, control2, and target
# .......................................
def CCX(self, control1, control2, target):
Gate = Matrix([[0, 1], [1, 0]], self.getCuda()) - self.ID()
list1, list2 = self.getOrdListCtrl2Gate(control1, control2, target, Gate)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def Toffoli(self, control1, control2, target):
return self.CCX(control1, control2, target)
# it's a controlled SWAP
def Fredkin(self, control, target1, target2):
list1, list2, list3, list4, list5, list6 = self.getOrdListFredkin(control, target1, target2)
ID = self.kronProduct(list1)
P1_SWAP = self.kronProduct(list2) + self.kronProduct(list3) + self.kronProduct(list4) + self.kronProduct(list5)
P1_ID = self.kronProduct(list6)
operator = ID + (P1_SWAP-P1_ID)
return operator
# orders the matrices for the tensor product of 1 qubit operations
def getOrdListSimpleGate(self, target, Gate):
list = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == target:
list.append(Gate)
else:
list.append(Matrix([[1, 0], [0, 1]], self.getCuda()))
return list
# orders the matrices for the tensor product of 2 qubits operations
def getOrdListCtrlGate(self, control, target, Gate):
list1 = []
list2 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == control:
list1.append(self.P0()) # |0><0|
list2.append(self.P1()) # |1><1|
elif i == target:
list1.append(self.ID())
list2.append(Gate)
else:
list1.append(self.ID())
list2.append(self.ID())
return list1, list2
# orders the matrices for the tensor product of 3 qubits operations
def getOrdListCtrl2Gate(self, control1, control2, target, Gate):
list1 = []
list2 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == control1 or i == control2:
list1.append(self.ID())
list2.append(self.P1()) # |1><1|
elif i == target:
list1.append(self.ID())
list2.append(Gate)
else:
list1.append(self.ID())
list2.append(self.ID())
return list1, list2
# orders the matrices for the tensor product of the SWAP gate operation
def getOrdListSWAP(self, target1, target2):
list1 = []
list2 = []
list3 = []
list4 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == target1:
list1.append(self.P0()) # |0><0|
list2.append(self.L0()) # |0><1|
list3.append(self.L1()) # |1><0|
list4.append(self.P1()) # |1><1|
elif i == target2:
list1.append(self.P0()) # |0><0|
list2.append(self.L1()) # |1><0|
list3.append(self.L0()) # |0><1|
list4.append(self.P1()) # |1><1|
else:
list1.append(self.ID())
list2.append(self.ID())
list3.append(self.ID())
list4.append(self.ID())
return list1, list2, list3, list4
# orders the matrices for the tensor product of the Fredkin gate operation
def getOrdListFredkin(self, control, target1, target2):
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list6 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == control:
list1.append(self.ID()) # ID
list2.append(self.P1()) # SWAP P0xP0
list3.append(self.P1()) # SWAP L0xL1
list4.append(self.P1()) # SWAP L1xL0
list5.append(self.P1()) # SWAP P1xP1
list6.append(self.P1()) # -ID
elif i == target1:
list1.append(self.ID())
list2.append(self.P0()) # |0><0|
list3.append(self.L0()) # |0><1|
list4.append(self.L1()) # |1><0|
list5.append(self.P1()) # |1><1|
list6.append(self.ID())
elif i == target2:
list1.append(self.ID())
list2.append(self.P0()) # |0><0|
list3.append(self.L1()) # |1><0|
list4.append(self.L0()) # |0><1|
list5.append(self.P1()) # |1><1|
list6.append(self.ID())
else:
list1.append(self.ID())
list2.append(self.ID())
list3.append(self.ID())
list4.append(self.ID())
list5.append(self.ID())
list6.append(self.ID())
return list1, list2, list3, list4, list5, list6
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.willguibr.zpacloud.plugins.module_utils.zpa_client import (
ZPAClientHelper,
delete_none,
)
class TrustedNetworksService:
def __init__(self, module, customer_id):
self.module = module
self.customer_id = customer_id
self.rest = ZPAClientHelper(module)
def getByIDOrName(self, id, name):
network = None
if id is not None:
network = self.getByID(id)
if network is None and name is not None:
network = self.getByName(name)
return network
def getByID(self, id):
response = self.rest.get(
"/mgmtconfig/v1/admin/customers/%s/network/%s" % (self.customer_id, id)
)
status_code = response.status_code
if status_code != 200:
return None
return self.mapRespJSONToApp(response.json)
def getAll(self):
list = self.rest.get_paginated_data(
base_url="/mgmtconfig/v2/admin/customers/%s/network" % (self.customer_id),
data_key_name="list",
)
networks = []
for network in list:
networks.append(self.mapRespJSONToApp(network))
return networks
def getByName(self, name):
networks = self.getAll()
for network in networks:
if network.get("name") == name:
return network
return None
@delete_none
def mapRespJSONToApp(self, resp_json):
if resp_json is None:
return {}
return {
"creation_time": resp_json.get("creationTime"),
"domain": resp_json.get("domain"),
"id": resp_json.get("id"),
"master_customer_id": resp_json.get("masterCustomerId"),
"modified_by": resp_json.get("modifiedBy"),
"modified_time": resp_json.get("modifiedTime"),
"name": resp_json.get("name"),
"network_id": resp_json.get("networkId"),
"zscaler_cloud": resp_json.get("zscalerCloud"),
}
@delete_none
def mapAppToJSON(self, network):
if network is None:
return {}
return {
"creationTime": network.get("creation_time"),
"domain": network.get("domain"),
"id": network.get("id"),
"masterCustomerId": network.get("master_customer_id"),
"modifiedBy": network.get("modified_by"),
"modifiedTime": network.get("modified_time"),
"name": network.get("name"),
"networkId": network.get("network_id"),
"zscalerCloud": network.get("zscaler_cloud"),
}
|
from django.contrib import admin
from .models import BoardGame, Participant, Event
# Register your models here.
admin.site.register(BoardGame)
admin.site.register(Participant)
admin.site.register(Event)
|
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
"""Represents a shared memory segment created by shmget.
From: https://pubs.opengroup.org/onlinepubs/9699919799/
int shmget(key_t key, size_t size, int shmflg);
The shmget() function shall return the shared memory identifier associated with key.
A share memory identifier, associate data structure, and share memory segment of
at least size bytes are created for key if one of the following is true:
- The argument key is equal to IPC_PRIVATE ( (key_t) 0).
- The argument key does not already have a shared memory identifier associated with
it and (shmflg & IPC_CREAT) is non_zero (#define IPC_CREAT 0001000)
Upon successfull completion, shmget() shall return a non-negative integer, a shared-
memory identifier.
"""
from typing import Dict, List, Optional, TYPE_CHECKING
from chb.simulation.SimMemory import SimMemory
import chb.simulation.SimSymbolicValue as SSV
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
if TYPE_CHECKING:
from chb.simulation.SimulationState import SimulationState
class SimSharedMemory(SimMemory):
def __init__(
self,
simstate: "SimulationState",
shmid: int,
key: str, # hex value
buffersize: int = 4096) -> None:
SimMemory.__init__(self, simstate, True, "shared:" + str(shmid))
self._shmid = shmid
self._buffersize = buffersize
# segments may be mapped in multiple locations
self._baseoffsets: List[int] = []
@property
def simstate(self) -> "SimulationState":
return self._simstate
@property
def shmid(self) -> int:
return self._shmid
@property
def bigendian(self) -> bool:
return self.simstate.bigendian
@property
def has_offset(self) -> bool:
return len(self._baseoffsets) > 0
@property
def baseoffsets(self) -> List[int]:
return self._baseoffsets
def set_baseoffset(self, offset: int) -> None:
self._baseoffsets.append(offset)
@property
def buffersize(self) -> int:
return self._buffersize
def has_address(self, addr: int) -> bool:
for offset in self.baseoffsets:
if addr >= offset and addr < offset + self.buffersize:
return True
else:
return False
def initialize(self, iaddr: str):
addr = SSV.mk_global_address(0, "shared")
for i in range(0, self.buffersize):
SimMemory.set(self, iaddr, addr.add_offset(i), SV.simZerobyte)
def set(self,
iaddr: str,
address: SSV.SimAddress,
srcval: SV.SimValue) -> None:
for base in self.baseoffsets:
if (
address.offsetvalue >= base
and address.offsetvalue < base + self.buffersize):
address = address.add_offset(-base)
SimMemory.set(self, iaddr, address, srcval)
break
else:
raise SU.CHBSimError(
self.simstate, iaddr, "Invalid shared memory address: " + str(address))
def get(self,
iaddr: str,
address: SSV.SimAddress,
size: int) -> SV.SimValue:
try:
for base in self.baseoffsets:
if (
address.offsetvalue >= base
and address.offsetvalue < base + self.buffersize):
address = address.add_offset(-base)
try:
memval = SimMemory.get(self, iaddr, address, size)
except SU.CHBSimError:
memval = SV.mk_simvalue(0, size=size)
return memval
else:
raise SU.CHBSimError(
self.simstate,
iaddr,
"invalid shared memory address: " + str(address))
except SU.CHBSimError as e:
print("Error in shared memory: " + str(e))
name = (self.name
+ '['
+ str(address.offsetvalue)
+ ']'
+ ' (value not retrieved: '
+ str(e)
+ ')')
return SSV.SimSymbol(name)
def __str__(self) -> str:
lines: List[str] = []
if self.has_offset:
try:
for a in range(0, self.buffersize, 4):
if a in self._mem:
address = self.mk_address(a)
try:
charstring = self.char_string("", address, 4)
except UF.CHBError:
charstring = "?"
memval = SimMemory.get(self, "0", address, 4)
lines.append(str(hex(a)).rjust(12)
+ " " + str(a).rjust(12)
+ " " + str(memval)
+ " " + str(charstring))
except Exception:
pass
return "\n".join(lines)
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.datastructures.mesh.smoothing import mesh_smooth_area
from compas.datastructures.mesh.operations import trimesh_collapse_edge
from compas.datastructures.mesh.operations import trimesh_swap_edge
from compas.datastructures.mesh.operations import trimesh_split_edge
__all__ = [
'trimesh_remesh',
]
def trimesh_remesh(mesh,
target,
kmax=100,
tol=0.1,
divergence=0.01,
verbose=False,
allow_boundary_split=False,
allow_boundary_swap=False,
allow_boundary_collapse=False,
smooth=True,
fixed=None,
callback=None,
callback_args=None):
"""Remesh until all edges have a specified target length.
Parameters
----------
mesh : Mesh
A triangle mesh.
target : float
The target length for the mesh edges.
kmax : int, optional [100]
The number of iterations.
tol : float, optional [0.1]
Length deviation tolerance.
divergence : float, optional [0.01]
??
verbose : bool, optional [False]
Print feedback messages.
allow_boundary_split : bool, optional [False]
Allow boundary edges to be split.
allow_boundary_swap : bool, optional [False]
Allow boundary edges or edges connected to the boundary to be swapped.
allow_boundary_collapse : bool, optional [False]
Allow boundary edges or edges connected to the boundary to be collapsed.
smooth : bool, optional [True]
Apply smoothing at every iteration.
fixed : list, optional [None]
A list of vertices that have to stay fixed.
callback : callable, optional [None]
A user-defined function that is called after every iteration.
callback_args : list, optional [None]
A list of additional parameters to be passed to the callback function.
Returns
-------
None
Notes
-----
This algorithm not only changes the geometry of the mesh, but also its
topology as needed to achieve the specified target lengths.
Topological changes are made such that vertex valencies are well-balanced
and close to six. This involves three operations:
* split edges that are longer than a maximum length,
* collapse edges that are shorter than a minimum length,
* swap edges if this improves the valency error.
The minimum and maximum lengths are calculated based on a desired target
length.
For more info, see [1]_.
References
----------
.. [1] Botsch, M. & Kobbelt, L., 2004. *A remeshing approach to multiresolution modeling*.
Proceedings of the 2004 Eurographics/ACM SIGGRAPH symposium on Geometry processing - SGP '04, p.185.
Available at: http://portal.acm.org/citation.cfm?doid=1057432.1057457.
Examples
--------
>>>
"""
if verbose:
print(target)
lmin = (1 - tol) * (4.0 / 5.0) * target
lmax = (1 + tol) * (4.0 / 3.0) * target
edge_lengths = [mesh.edge_length(u, v) for u, v in mesh.edges()]
target_start = max(edge_lengths) / 2.0
fac = target_start / target
boundary = set(mesh.vertices_on_boundary())
fixed = fixed or []
fixed = set(fixed)
count = 0
kmax_start = kmax / 2.0
for k in range(kmax):
if k <= kmax_start:
scale = fac * (1.0 - k / kmax_start)
dlmin = lmin * scale
dlmax = lmax * scale
else:
dlmin = 0
dlmax = 0
if verbose:
print(k)
count += 1
if k % 20 == 0:
num_vertices_1 = mesh.number_of_vertices()
# split
if count == 1:
visited = set()
for u, v in list(mesh.edges()):
if u in visited or v in visited:
continue
if mesh.edge_length(u, v) <= lmax + dlmax:
continue
if verbose:
print('split edge: {0} - {1}'.format(u, v))
trimesh_split_edge(mesh, u, v, allow_boundary=allow_boundary_split)
visited.add(u)
visited.add(v)
# collapse
elif count == 2:
visited = set()
for u, v in list(mesh.edges()):
if u in visited or v in visited:
continue
if mesh.edge_length(u, v) >= lmin - dlmin:
continue
if verbose:
print('collapse edge: {0} - {1}'.format(u, v))
trimesh_collapse_edge(mesh, u, v, allow_boundary=allow_boundary_collapse, fixed=fixed)
visited.add(u)
visited.add(v)
visited.update(mesh.halfedge[u])
# swap
elif count == 3:
visited = set()
for u, v in list(mesh.edges()):
if u in visited or v in visited:
continue
f1 = mesh.halfedge[u][v]
f2 = mesh.halfedge[v][u]
if f1 is None or f2 is None:
continue
face1 = mesh.face[f1]
face2 = mesh.face[f2]
v1 = face1[face1.index(u) - 1]
v2 = face2[face2.index(v) - 1]
valency1 = mesh.vertex_degree(u)
valency2 = mesh.vertex_degree(v)
valency3 = mesh.vertex_degree(v1)
valency4 = mesh.vertex_degree(v2)
if u in boundary:
valency1 += 2
if v in boundary:
valency2 += 2
if v1 in boundary:
valency3 += 2
if v2 in boundary:
valency4 += 2
current_error = abs(valency1 - 6) + abs(valency2 - 6) + abs(valency3 - 6) + abs(valency4 - 6)
flipped_error = abs(valency1 - 7) + abs(valency2 - 7) + abs(valency3 - 5) + abs(valency4 - 5)
if current_error <= flipped_error:
continue
if verbose:
print('swap edge: {0} - {1}'.format(u, v))
trimesh_swap_edge(mesh, u, v, allow_boundary=allow_boundary_swap)
visited.add(u)
visited.add(v)
# count
else:
count = 0
if (k - 10) % 20 == 0:
num_vertices_2 = mesh.number_of_vertices()
if abs(1 - num_vertices_1 / num_vertices_2) < divergence and k > kmax_start:
break
# smoothen
if smooth:
if allow_boundary_split:
boundary = set(mesh.vertices_on_boundary())
mesh_smooth_area(mesh, fixed=fixed.union(boundary), kmax=1)
# callback
if callback:
callback(mesh, k, callback_args)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
import unittest
from unittest.mock import MagicMock
from samsung_multiroom.service import REPEAT_ALL
from samsung_multiroom.service import REPEAT_OFF
from samsung_multiroom.service.tunein import TuneInPlayer
def _get_player():
api = MagicMock()
api.get_preset_list.return_value = [
{
'kind': 'speaker',
'title': 'Radio 1',
'description': 'Radio 1 description',
'thumbnail': 'http://radio1.org/thumbnail.png',
'contentid': '0',
'mediaid': '1111',
},
{
'kind': 'speaker',
'title': 'Radio 2',
'description': 'Radio 2 description',
'thumbnail': 'http://radio2.org/thumbnail.png',
'contentid': '1',
'mediaid': '2222',
},
{
'kind': 'speaker',
'title': 'Radio 3',
'description': 'Radio 3 description',
'thumbnail': 'http://radio3.org/thumbnail.png',
'contentid': '2',
'mediaid': '3333',
},
{
'kind': 'my',
'title': 'Radio 4',
'description': 'Radio 4 description',
'thumbnail': 'http://radio4.org/thumbnail.png',
'contentid': '3',
'mediaid': '4444',
},
{
'kind': 'my',
'title': 'Radio 5',
'description': 'Radio 5 description',
'thumbnail': 'http://radio5.org/thumbnail.png',
'contentid': '4',
'mediaid': '5555',
},
]
api.get_radio_info.return_value = {
'cpname': 'TuneIn',
'root': 'Favorites',
'presetindex': '0',
'title': 'Radio 1',
'description': 'Radio 1 description',
'thumbnail': 'http://radio1.org/thumbnail.png',
'mediaid': '1111',
'allowfeedback': '0',
'timestamp': '2018-12-28T18:07:07Z',
'no_queue': '1',
'playstatus': 'play',
}
player = TuneInPlayer(api)
return (player, api)
class TestTuneInPlayer(unittest.TestCase):
def test_is_supported(self):
player, api = _get_player()
self.assertTrue(player.is_play_supported())
self.assertFalse(player.is_jump_supported())
self.assertTrue(player.is_resume_supported())
self.assertFalse(player.is_stop_supported())
self.assertTrue(player.is_pause_supported())
self.assertTrue(player.is_next_supported())
self.assertTrue(player.is_previous_supported())
self.assertFalse(player.is_repeat_supported())
self.assertFalse(player.is_shuffle_supported())
def test_play(self):
playlist = [
type('Item', (object, ), {
'object_id': '1',
'object_type': 'some_type',
'title': 'title 1',
}),
type('Item', (object, ), {
'object_id': '2',
'object_type': 'tunein_radio',
'title': 'radio 2',
}),
type('Item', (object, ), {
'object_id': '3',
'object_type': 'tunein_radio',
'title': 'radio 3',
}),
type('Item', (object, ), {
'object_id': '4',
'object_type': 'some_type2',
'title': 'title 4',
})
]
player, api = _get_player()
player.play(playlist)
api.set_play_select.assert_called_once_with('2')
def test_play_returns_false_for_unsupported_playlist(self):
playlist = [
type('Item', (object, ), {
'object_id': '1',
'object_type': 'some_type',
'title': 'title 1',
}),
type('Item', (object, ), {
'object_id': '4',
'object_type': 'some_type2',
'title': 'title 4',
})
]
player, api = _get_player()
self.assertFalse(player.play(playlist))
api.set_play_select.assert_not_called()
def test_jump(self):
player, api = _get_player()
player.jump(50)
api.set_search_time.assert_not_called()
def test_resume(self):
player, api = _get_player()
player.resume()
api.set_select_radio.assert_called_once()
@unittest.skip('Pending implementation')
def test_stop(self):
player, api = _get_player()
player.stop()
def test_pause(self):
player, api = _get_player()
player.pause()
api.set_playback_control.assert_called_once_with('pause')
@unittest.mock.patch('samsung_multiroom.api.api._get_callable_parameters')
def test_next(self, signature):
signature.return_value = ['start_index', 'list_count']
player, api = _get_player()
player.next()
api.get_preset_list.assert_called_once_with(start_index=0, list_count=30)
api.get_radio_info.assert_called_once()
api.set_play_preset.assert_called_once_with(1, 1)
api.set_select_radio.assert_called_once()
@unittest.mock.patch('samsung_multiroom.api.api._get_callable_parameters')
def test_previous(self, signature):
signature.return_value = ['start_index', 'list_count']
player, api = _get_player()
player.previous()
api.get_preset_list.assert_called_once_with(start_index=0, list_count=30)
api.get_radio_info.assert_called_once()
api.set_play_preset.assert_called_once_with(0, 4)
api.set_select_radio.assert_called_once()
def test_repeat(self):
player, api = _get_player()
player.repeat(REPEAT_ALL)
api.set_repeat_mode.assert_not_called()
def test_shuffle(self):
player, api = _get_player()
player.shuffle(True)
api.set_shuffle_mode.assert_not_called()
def test_get_repeat(self):
player, api = _get_player()
repeat = player.get_repeat()
self.assertEqual(repeat, REPEAT_OFF)
api.get_repeat_mode.assert_not_called()
def test_get_shuffle(self):
player, api = _get_player()
shuffle = player.get_shuffle()
self.assertFalse(shuffle)
api.get_repeat_mode.assert_not_called()
def test_get_current_track(self):
player, api = _get_player()
track = player.get_current_track()
api.get_radio_info.assert_called_once()
self.assertEqual(track.title, 'Radio 1 description')
self.assertEqual(track.artist, 'Radio 1')
self.assertEqual(track.album, None)
self.assertEqual(track.duration, None)
self.assertEqual(track.position, None)
self.assertEqual(track.thumbnail_url, 'http://radio1.org/thumbnail.png')
self.assertEqual(track.object_id, None)
self.assertEqual(track.object_type, 'tunein_radio')
def test_is_active(self):
player, api = _get_player()
self.assertTrue(player.is_active('wifi', 'cp'))
self.assertFalse(player.is_active('wifi', 'dlna'))
self.assertFalse(player.is_active('bt'))
|
query += f"""
SELECT country,"""
for days in test_days_list:
query += f"""
P_alive_{days}d,
pcii_forecast_{days}d,
n_order_forecast_{days}d,
COALESCE(s{days}d.pcii_actual, 0) pcii_actual_{days}d,
CAST(COALESCE(s{days}d.n_order_actual, 0) AS DOUBLE) n_order_actual_{days}d,
CASE WHEN s{days}d.n_order_actual>0 THEN true ELSE false END is_alive_{days}d"""
query += """
FROM clv_inference ci"""
for days in test_days_list:
query += f"""
LEFT JOIN sales_{days}d s{days}d
ON ci.sk_customer = s{days}d.sk_customer"""
query += f"""
WHERE dt = {dt} AND run_id = '{run_id}'
"""
sql(query).createOrReplaceTempView("test_data")
|
import egypt_model
import unittest
class TestAggregateMethods(unittest.TestCase):
def test_aggregates(self):
model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 9 * 5 * 5)
self.assertEqual(egypt_model.compute_total_wealth(model), 9 * 5 * 1000)
self.assertEqual(egypt_model.compute_mean_population(model), 5 * 5)
self.assertEqual(egypt_model.compute_mean_wealth(model), 5 * 1000)
model = egypt_model.EgyptModel(31, 30, starting_settlements=0, starting_households=5, starting_household_size=5, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 0)
self.assertEqual(egypt_model.compute_total_wealth(model), 0)
self.assertEqual(egypt_model.compute_mean_population(model), 0)
self.assertEqual(egypt_model.compute_mean_wealth(model), 0)
model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=0, starting_household_size=5, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 0)
self.assertEqual(egypt_model.compute_total_wealth(model), 0)
self.assertEqual(egypt_model.compute_mean_population(model), 0)
self.assertEqual(egypt_model.compute_mean_wealth(model), 0)
model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=0, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 0)
self.assertEqual(egypt_model.compute_total_wealth(model), 9 * 5 * 1000)
self.assertEqual(egypt_model.compute_mean_population(model), 0)
self.assertEqual(egypt_model.compute_mean_wealth(model), 5 * 1000)
class TestSettlementMethods(unittest.TestCase):
def setUp(self):
self.model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.settlement = self.model.settlements[0]
def test_settlement_workers(self):
self.assertEqual(self.settlement.workers(), 5*5)
for household in self.settlement.households:
household.workers += 1
self.assertEqual(self.settlement.workers(), 5*6)
self.settlement.households.remove(self.settlement.households[0])
self.assertEqual(self.settlement.workers(), 4*6)
def test_settlement_grain(self):
self.assertEqual(self.settlement.grain(), 5*1000)
for household in self.settlement.households:
household.grain += 1
self.assertEqual(self.settlement.grain(), 5*1001)
self.settlement.households.remove(self.settlement.households[0])
self.assertEqual(self.settlement.grain(), 4*1001)
class TestHouseholdMethods(unittest.TestCase):
def setUp(self):
self.model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.household = self.model.households[0]
self.assertEqual(self.household.grain, 1000)
def test_workers(self):
self.assertEqual(self.household.workers, 5)
self.assertEqual(self.household.workers_worked, 0)
def test_storage_loss(self):
grain = 1000
self.household.grain = grain
self.household.storage_loss()
grain -= grain * 0.1
self.assertEqual(self.household.grain, grain)
self.household.storage_loss()
grain -= grain * 0.1
self.assertEqual(self.household.grain, grain)
self.household.storage_loss()
grain -= grain * 0.1
self.assertEqual(self.household.grain, grain)
def test_consume_grain(self):
workers = 5
grain = workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION + 1
self.household.grain = grain
self.household.workers = workers
self.household.consume_grain()
self.assertEqual(self.household.grain, grain - workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION)
self.assertEqual(self.household.workers, workers)
grain = workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION
self.household.grain = grain
self.household.workers = workers
self.household.consume_grain()
self.assertEqual(self.household.grain, 0)
self.assertEqual(self.household.workers, workers - 1)
workers = 5
grain = workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION - 1
self.household.grain = grain
self.household.workers = workers
self.household.consume_grain()
self.assertEqual(self.household.grain, 0)
self.assertEqual(self.household.workers, workers - 1)
def test_competency_increase(self):
self.household.competency = 0.5
self.model.annual_competency_increase = 5
self.assertEqual(self.household.competency, 0.5)
self.household.competency_increase()
self.assertEqual(self.household.competency, 0.525)
self.household.competency_increase()
self.assertEqual(self.household.competency, 0.55125)
self.model.annual_competency_increase = 0
self.household.competency_increase()
self.assertEqual(self.household.competency, 0.55125)
def test_generation_changeover(self):
self.model.min_ambition = 0.2
self.model.min_competency = 0.5
self.household.generation_changeover_countdown = 3
self.household.competency = 0.8
self.household.ambition = 0.4
self.household.generation_changeover()
self.assertEqual(self.household.competency, 0.8)
self.assertEqual(self.household.ambition, 0.4)
self.household.generation_changeover()
self.assertEqual(self.household.competency, 0.8)
self.assertEqual(self.household.ambition, 0.4)
self.household.generation_changeover()
self.assertNotEqual(self.household.competency, 0.8)
self.assertNotEqual(self.household.competency, 0.4)
self.assertTrue(self.household.competency >= 0.5 and self.household.competency <= 1)
self.assertTrue(self.household.ambition >= 0.2and self.household.ambition <= 1)
class TestFieldMethods(unittest.TestCase):
def setUp(self):
self.model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.model.fallow_limit = 10
self.household = self.model.households[0]
self.field = egypt_model.FieldAgent(1, self.model, self.household)
self.household.fields.append(self.field)
self.model.fields.append(self.field)
self.model.grid.position_agent(self.field, 0,0)
self.assertEqual(self.field.unique_id, 1)
self.assertEqual(self.field.years_fallowed,0)
self.assertFalse(self.field.harvested)
def test_changeover(self):
for i in range(10):
self.field.harvested = True
self.field.changeover()
self.assertEqual(self.field.years_fallowed, 0)
self.assertEqual(self.household, self.field.household)
for i in range(9):
self.field.changeover()
self.assertEqual(self.field.years_fallowed, 9)
self.assertEqual(self.household, self.field.household)
self.field.changeover()
self.assertEqual(self.field.years_fallowed, self.model.fallow_limit)
self.assertTrue(self.field not in self.household.fields)
self.assertTrue(self.field not in self.model.fields)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.sutter
~~~~~~~~~~~~~~~~~~~~
:company: Sutter Instrument.
:description: Biomedical and scientific instrumentation.
:website: http://www.sutter.com/
---
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from .lambda103 import Lambda103
__all__ = ['Lambda103', ]
|
"""
WSGI config for editor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
application = get_wsgi_application()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import re
from abc import ABC, abstractmethod
from textwrap import dedent
from typing import Callable, ClassVar, Iterator, Optional, cast
from typing_extensions import final
from pants.backend.docker.registries import ALL_DEFAULT_REGISTRIES
from pants.base.build_environment import get_buildroot
from pants.core.goals.run import RestartableField
from pants.engine.addresses import Address
from pants.engine.fs import GlobMatchErrorBehavior
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AsyncFieldMixin,
BoolField,
Dependencies,
DictStringToStringField,
InvalidFieldException,
OptionalSingleSourceField,
StringField,
StringSequenceField,
Target,
)
from pants.util.docutil import doc_url
# Common help text to be applied to each field that supports value interpolation.
_interpolation_help = (
"{kind} may use placeholders in curly braces to be interpolated. The placeholders are derived "
"from various sources, such as the Dockerfile instructions and build args.\n\n"
)
class DockerImageBuildArgsField(StringSequenceField):
alias = "extra_build_args"
default = ()
help = (
"Build arguments (`--build-arg`) to use when building this image. "
"Entries are either strings in the form `ARG_NAME=value` to set an explicit value; "
"or just `ARG_NAME` to copy the value from Pants's own environment.\n\n"
"Use `[docker].build_args` to set default build args for all images."
)
class DockerImageContextRootField(StringField):
alias = "context_root"
help = (
"Specify which directory to use as the Docker build context root. This affects the file "
"paths to use for the `COPY` and `ADD` instructions. For example, whether "
"`COPY files/f.txt` should look for the file relative to the build root: "
"`<build root>/files/f.txt` vs relative to the BUILD file: "
"`<build root>/path_to_build_file/files/f.txt`.\n\n"
"Specify the `context_root` path as `files` for relative to build root, or as `./files` "
"for relative to the BUILD file.\n\n"
"If `context_root` is not specified, it defaults to `[docker].default_context_root`."
)
@classmethod
def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:
value_or_default = super().compute_value(raw_value, address=address)
if isinstance(value_or_default, str) and value_or_default.startswith("/"):
val = value_or_default.strip("/")
raise InvalidFieldException(
f"The `{cls.alias}` field in target {address} must be a relative path, but was "
f"{value_or_default!r}. Use {val!r} for a path relative to the build root, or "
f"{'./' + val!r} for a path relative to the BUILD file (i.e. {os.path.join(address.spec_path, val)!r})."
)
return value_or_default
class DockerImageSourceField(OptionalSingleSourceField):
default = "Dockerfile"
# When the default glob value is in effect, we don't want the normal glob match error behavior
# to kick in for a missing Dockerfile, in case there are `instructions` provided, in which case
# we generate the Dockerfile instead. If there are no `instructions`, or there are both
# `instructions` and a Dockerfile hydrated from the `source` glob, we error out with a message
# to the user.
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
help = (
"The Dockerfile to use when building the Docker image.\n\n"
"Use the `instructions` field instead if you prefer not having the Dockerfile in your "
"source tree."
)
class DockerImageInstructionsField(StringSequenceField):
alias = "instructions"
required = False
help = (
"The `Dockerfile` content, typically one instruction per list item.\n\n"
"Use the `source` field instead if you prefer having the Dockerfile in your source tree."
"\n\n"
+ dedent(
"""\
Example:
# example/BUILD
docker_image(
instructions=[
"FROM base/image:1.0",
"RUN echo example",
],
)
"""
)
)
class DockerImageTagsField(StringSequenceField):
alias = "image_tags"
default = ("latest",)
help = (
"Any tags to apply to the Docker image name (the version is usually applied as a tag).\n\n"
+ _interpolation_help.format(kind="tag")
+ f"See {doc_url('tagging-docker-images')}."
)
class DockerImageTargetStageField(StringField):
alias = "target_stage"
help = (
"Specify target build stage, rather than building the entire `Dockerfile`.\n\n"
"When using multi-stage build, you may name your stages, and can target them when building "
"to only selectively build a certain stage. See also the `--docker-build-target-stage` "
"option.\n\n"
"Read more about [multi-stage Docker builds]"
"(https://docs.docker.com/develop/develop-images/multistage-build/#stop-at-a-specific-build-stage)"
)
class DockerImageDependenciesField(Dependencies):
supports_transitive_excludes = True
class DockerImageRegistriesField(StringSequenceField):
alias = "registries"
default = (ALL_DEFAULT_REGISTRIES,)
help = (
"List of addresses or configured aliases to any Docker registries to use for the "
"built image.\n\n"
"The address is a domain name with optional port for your registry, and any registry "
"aliases are prefixed with `@` for addresses in the [docker].registries configuration "
"section.\n\n"
"By default, all configured registries with `default = true` are used.\n\n"
+ dedent(
"""\
Example:
# pants.toml
[docker.registries.my-registry-alias]
address = "myregistrydomain:port"
default = false # optional
# example/BUILD
docker_image(
registries = [
"@my-registry-alias",
"myregistrydomain:port",
],
)
"""
)
+ (
"The above example shows two valid `registry` options: using an alias to a configured "
"registry and the address to a registry verbatim in the BUILD file."
)
)
class DockerImageRepositoryField(StringField):
alias = "repository"
help = (
'The repository name for the Docker image. e.g. "<repository>/<name>".\n\n'
"It uses the `[docker].default_repository` by default.\n\n"
+ _interpolation_help.format(kind="repository")
+ "Additional placeholders for the repository field are: `name`, `directory` and "
"`parent_directory`.\n\nSee the documentation for `[docker].default_repository` for more "
"information."
)
class DockerImageSkipPushField(BoolField):
alias = "skip_push"
default = False
help = "If set to true, do not push this image to registries when running `./pants publish`."
OptionValueFormatter = Callable[[str], str]
class DockerBuildOptionFieldMixin(ABC):
"""Inherit this mixin class to provide options to `docker build`."""
docker_build_option: ClassVar[str]
@abstractmethod
def option_values(self, *, value_formatter: OptionValueFormatter) -> Iterator[str]:
"""Subclasses must implement this, to turn their `self.value` into none, one or more option
values."""
@final
def options(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for value in self.option_values(value_formatter=value_formatter):
yield from (self.docker_build_option, value)
class DockerImageBuildImageLabelsOptionField(DockerBuildOptionFieldMixin, DictStringToStringField):
alias = "image_labels"
help = (
"Provide image metadata.\n\n"
+ _interpolation_help.format(kind="label value")
+ "See [Docker labels](https://docs.docker.com/config/labels-custom-metadata/"
"#manage-labels-on-objects) for more information."
)
docker_build_option = "--label"
def option_values(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for label, value in (self.value or {}).items():
yield f"{label}={value_formatter(value)}"
class DockerImageBuildSecretsOptionField(
AsyncFieldMixin, DockerBuildOptionFieldMixin, DictStringToStringField
):
alias = "secrets"
help = (
"Secret files to expose to the build (only if BuildKit enabled).\n\n"
"Secrets may use absolute paths, or paths relative to your build root, or the BUILD file "
"if prefixed with `./`. The id should be valid as used by the Docker build `--secret` "
"option. See [Docker secrets](https://docs.docker.com/engine/swarm/secrets/) for more "
"information.\n\n"
+ dedent(
"""\
Example:
docker_image(
secrets={
"mysecret": "/var/secrets/some-secret",
"repo-secret": "src/proj/secrets/some-secret",
"target-secret": "./secrets/some-secret",
}
)
"""
)
)
docker_build_option = "--secret"
def option_values(self, **kwargs) -> Iterator[str]:
# os.path.join() discards preceding parts if encountering an abs path, e.g. if the secret
# `path` is an absolute path, the `buildroot` and `spec_path` will not be considered. Also,
# an empty path part is ignored.
for secret, path in (self.value or {}).items():
full_path = os.path.join(
get_buildroot(),
self.address.spec_path if re.match(r"\.{1,2}/", path) else "",
path,
)
yield f"id={secret},src={os.path.normpath(full_path)}"
class DockerImageBuildSSHOptionField(DockerBuildOptionFieldMixin, StringSequenceField):
alias = "ssh"
default = ()
help = (
"SSH agent socket or keys to expose to the build (only if BuildKit enabled) "
"(format: default|<id>[=<socket>|<key>[,<key>]])\n\n"
"The exposed agent and/or keys can then be used in your `Dockerfile` by mounting them in "
"your `RUN` instructions:\n\n"
" RUN --mount=type=ssh ...\n\n"
"See [Docker documentation](https://docs.docker.com/develop/develop-images"
"/build_enhancements/#using-ssh-to-access-private-data-in-builds) for more information."
)
docker_build_option = "--ssh"
def option_values(self, **kwargs) -> Iterator[str]:
yield from cast("tuple[str]", self.value)
class DockerImageTarget(Target):
alias = "docker_image"
core_fields = (
*COMMON_TARGET_FIELDS,
DockerImageBuildArgsField,
DockerImageDependenciesField,
DockerImageSourceField,
DockerImageInstructionsField,
DockerImageContextRootField,
DockerImageTagsField,
DockerImageRegistriesField,
DockerImageRepositoryField,
DockerImageBuildImageLabelsOptionField,
DockerImageBuildSecretsOptionField,
DockerImageBuildSSHOptionField,
DockerImageSkipPushField,
DockerImageTargetStageField,
RestartableField,
)
help = (
"The `docker_image` target describes how to build and tag a Docker image.\n\n"
"Any dependencies, as inferred or explicitly specified, will be included in the Docker "
"build context, after being packaged if applicable.\n\n"
"By default, will use a Dockerfile from the same directory as the BUILD file this target "
"is defined in. Point at another file with the `source` field, or use the `instructions` "
"field to have the Dockerfile contents verbatim directly in the BUILD file.\n\n"
"Dependencies on upstream/base images defined by another `docker_image` are inferred if "
"referenced by a build argument with a default value of the target address.\n\n"
+ dedent(
"""\
Example:
# src/docker/downstream/Dockerfile
ARG BASE=src/docker/upstream:image
FROM $BASE
...
"""
)
)
|
import unittest
from policy_sentry.writing.minimize import minimize_statement_actions
from policy_sentry.querying.all import get_all_actions
class MinimizeWildcardActionsTestCase(unittest.TestCase):
def test_minimize_statement_actions(self):
actions_to_minimize = [
"kms:CreateGrant",
"kms:CreateCustomKeyStore",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
]
desired_result = ["ec2:authorizes*", "kms:createc*", "kms:createg*"]
all_actions = get_all_actions(lowercase=True)
minchars = None
self.maxDiff = None
# minimized_actions_list = minimize_statement_actions(desired_actions, all_actions, minchars)
self.assertListEqual(
sorted(
minimize_statement_actions(actions_to_minimize, all_actions, minchars)
),
sorted(desired_result),
)
def test_minimize_statement_actions_funky_case(self):
actions_to_minimize = [
"kms:creategrant",
"kms:createcustomkeystore",
"ec2:authorizesecuritygroupegress",
"ec2:authorizesecuritygroupingress",
]
desired_result = ["ec2:authorizes*", "kms:createc*", "kms:createg*"]
all_actions = get_all_actions(lowercase=True)
minchars = None
self.maxDiff = None
# minimized_actions_list = minimize_statement_actions(desired_actions, all_actions, minchars)
self.assertListEqual(
sorted(
minimize_statement_actions(actions_to_minimize, all_actions, minchars)
),
sorted(desired_result),
)
|
# encoding: utf-8
import os
import pytest
import ckan.model as model
import ckan.lib.mailer as mailer
from ckan.tests import factories
from ckan.lib.base import render
from ckan.common import config
from ckan.tests.lib.test_mailer import MailerBase
@pytest.mark.usefixtures("with_request_context", "clean_db", "with_plugins")
@pytest.mark.ckan_config("ckan.plugins", "example_theme_custom_emails")
class TestExampleCustomEmailsPlugin(MailerBase):
def _get_template_content(self, name):
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "templates", "emails"
)
with open(os.path.join(templates_path, name), "r") as f:
return f.read()
def test_reset_password_custom_subject(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_reset_link(user_obj)
# check it went to the mock smtp server
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {"site_title": config.get_value("ckan.site_title")}
expected = render(
"emails/reset_password_subject.txt", extra_vars
)
expected = expected.split("\n")[0]
subject = self.get_email_subject(msg[3])
assert expected == subject
assert "**test**" in subject
def test_reset_password_custom_body(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_reset_link(user_obj)
# check it went to the mock smtp server
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {"reset_link": mailer.get_reset_link(user_obj)}
expected = render("emails/reset_password.txt", extra_vars)
body = self.get_email_body(msg[3]).decode()
assert expected == body.strip()
assert "**test**" in body
def test_invite_user_custom_subject(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_invite(user_obj)
# check it went to the mock smtp server
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {
"site_title": config.get_value("ckan.site_title"),
}
expected = render("emails/invite_user_subject.txt", extra_vars)
expected = expected.split("\n")[0]
subject = self.get_email_subject(msg[3])
assert expected == subject
assert "**test**" in subject
def test_invite_user_custom_body(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_invite(user_obj)
# check it went to the mock smtp server
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {
"reset_link": mailer.get_reset_link(user_obj),
"user_name": user["name"],
"site_title": config.get_value("ckan.site_title"),
}
expected = render("emails/invite_user.txt", extra_vars)
body = self.get_email_body(msg[3]).decode()
assert expected == body.strip()
assert "**test**" in body
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pandas as pd
import re
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import PyQt5
import time
"""[Initial Settings]
初期設定
"""
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
"""[CSS Selector Settings]
CSSセレクターの設定
"""
PAGER = "li.pager-next a"
word = input("検索したいキーワードを入力してください:")
df_main = pd.DataFrame(columns=['在庫有無','タイトル','値段','URL'])
df_graf = pd.DataFrame(columns=['SOLD','PRICE'])
n = 1
res = browser.get("https://www.mercari.com/jp/search/?page=" + str(n) +"&keyword="+ word)
res = browser.get("https://www.mercari.com/jp/search/?page="+str(num)+"&keyword="+word)
print(res)
browser.get(res)
while True:
if PAGER:
item_boxlist = browser.find_elements_by_css_selector(".items-box")
for item_box in item_boxlist:
try:
if len(item_box.find_elements_by_css_selector(".item-sold-out-badge")) > 0:
sold = "SOLD"
else:
sold = "NOT SOLD"
sub_title = item_box.find_element_by_class_name("items-box-body")
title = sub_title.find_element_by_tag_name("h3").text
item_price = item_box.find_element_by_css_selector(".items-box-price")
price_text = item_price.text
price_text = re.sub(r",", "", price_text).lstrip("¥ ")
price_text_int = int(price_text)
print(price_text_int)
url = item_box.find_element_by_tag_name("a").get_attribute("href")
data = pd.Series( [ sold,title,price_text_int,url ], index=df_main.columns )
grdata = pd.Series( [ sold,price_text_int ], index=df_graf.columns )
df_main = df_main.append( data, ignore_index=True )
df_graf = df_graf.append( grdata, ignore_index=True )
except Exception as e:
print(e)
btn = browser.find_element_by_css_selector(PAGER).get_attribute('href')
n += 1
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page...')
else:
print('No items anymore...')
break
print(df_main)
sns.stripplot(x='SOLD', y='PRICE', data=df_graf)
plt.show()
sns.pairplot(df_graf,hue="SOLD")
plt.show()
print('Writing out to CSV file...')
df_main.to_csv("pricedata.csv", encoding="utf_8_sig")
print("Done")
|
import argparse
import os
import glob
import copy
import csv
import json
import numpy as np
from PIL import Image
import nrrd
import radiomics
from radiomics import featureextractor
import SimpleITK as sitk
_pwd_ = os.getcwd()
data_Table = {}
Feature_Table = {}
hyperparameters = {}
hyperparameters['setting'] = {}
hyperparameters['force2D'] = True
hyperparameters['force2Ddimension'] = 0
def assert_paser_valid(args):
assert (os.path.exists(args.input_root)), "The image root folder cannot be found"
if args.Table != None:
assert (os.path.exists(args.Table)), "The data table cannot be found"
assert (len(args.Volume) != 0), "Input volume cannot be found"
assert (len(args.Mask) != 0), "Input Mask cannot be found"
assert (len(args.Mask) == len(args.Volume)), "The number of Masks is not consistent with the number of Volumes."
if os.path.exists(args.output_folder) == False:
os.mkdir(args.output_folder)
if args.Volume[0] == 'all':
assert (args.Mask[0]) == 'all', "-Mask: should be \'all\'"
assert (isinstance(eval(args.width), float) or
isinstance(eval(args.width), int)), "-width: should be a float/int number"
assert (isinstance(eval(args.level), float) or
isinstance(eval(args.level), int)), "-level: should be a float/int number"
def read_data_Table(Table_path):
global data_Table
data_csv = open(Table_path, 'r')
csv_reader = csv.reader(data_csv, delimiter = ',')
for row in csv_reader:
ID = row[0]
data_Table[ID] = row
data_csv.close()
def read_data(args):
global Feature_Table
Vols = []
Segs = []
Folder_Vol = os.path.join(args.input_root,
'crop_vol')
Folder_Seg = os.path.join(args.input_root,
'crop_msk')
if args.Volume[0] == 'all':
Vols = sorted( glob.glob( os.path.join(Folder_Vol,
'UC*')))
Segs = sorted( glob.glob( os.path.join(Folder_Seg,
'UC*')))
for _index_ in range(len(Vols)):
ID = os.path.basename(Vols[_index_]).split('_')[0]
Feature_Table[ID] = {}
Feature_Table[ID]['Type'] = 'UTUC'
Feature_Table[ID]['Sex'] = data_Table[ID][2]
Grade_info = data_Table[ID][4]
if ('High' in Grade_info or
'high' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'HG'
elif ('Low' in Grade_info or
'low' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'LG'
else:
Feature_Table[ID]['Histological grade'] = 'None'
if (data_Table[ID][6] == '' or
data_Table[ID][6] == None):
Feature_Table[ID]['T stage'] = 'None'
elif data_Table[ID][6] == 'A':
Feature_Table[ID]['T stage'] = 'a'
else:
Feature_Table[ID]['T stage'] = data_Table[ID][6]
Feature_Table[ID]['Lymph-Invasion'] = data_Table[ID][9]
Feature_Table[ID]['tumor'] = glob.glob( os.path.join(Vols[_index_],
'*.tif'))[0]
Feature_Table[ID]['mask'] = glob.glob( os.path.join(Segs[_index_],
'*.png'))[0]
else:
N = len(args.Volume)
for _index_ in range(N):
Vol = glob.glob( os.path.join(Folder_Vol,
f'{args.Volume[_index_]}*'))[0]
Seg = glob.glob( os.path.join(Folder_Seg,
f'{args.Mask[_index_]}*'))[0]
ID = os.path.basename(Vol).split('_')[0]
Feature_Table[ID] = {}
Feature_Table[ID]['Type'] = 'UTUC'
Feature_Table[ID]['Sex'] = data_Table[ID][2]
Grade_info = data_Table[ID][4]
if ('High' in Grade_info or
'high' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'HG'
elif ('Low' in Grade_info or
'low' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'LG'
else:
Feature_Table[ID]['Histological grade'] = 'None'
if (data_Table[ID][6] == '' or
data_Table[ID][6] == None):
Feature_Table[ID]['T stage'] = 'None'
else:
Feature_Table[ID]['T stage'] = data_Table[ID][6]
Feature_Table[ID]['Lymph-Invasion'] = data_Table[ID][9]
Feature_Table[ID]['tumor'] = glob.glob( os.path.join(Vol,
'*.tif'))[0]
Feature_Table[ID]['mask'] = glob.glob( os.path.join(Seg,
'*.png'))[0]
def Extract_features(args):
import matplotlib.pyplot as plt
global Feature_Table
global hyperparameters
args.width = eval(args.width)
args.level = eval(args.level)
Lower_bound = (args.level - (args.width/2))
hyperparameters['setting']['voxelArrayShift'] = Lower_bound
extractor = featureextractor.RadiomicsFeatureExtractor(**hyperparameters)
extractor.enableImageTypeByName('Wavelet',
customArgs={'level':1})
extractor.enableImageTypeByName('Square')
extractor.enableImageTypeByName('SquareRoot')
extractor.enableImageTypeByName('Logarithm')
extractor.enableImageTypeByName('Exponential')
extractor.enableImageTypeByName('Gradient',
customArgs={'gradientUseSpacing':False})
extractor.enableImageTypeByName('LBP2D',
customArgs={'lbp2Dmethod':'default',
'lbp2DRadius':3,
'lbp2DSamples':36})
extractor.enableAllFeatures()
for ID in Feature_Table.keys():
imageFilepath = Feature_Table[ID]['tumor']
maskFilepath = Feature_Table[ID]['mask']
img = sitk.ReadImage(imageFilepath)
np_img = sitk.GetArrayFromImage(img)
np_img = np_img * (args.width/65535) + Lower_bound
np_img = np_img.astype(np.int)
#plt.imshow(np_img, cmap='gray')
#plt.show()
IMG = sitk.GetImageFromArray(np_img)
features = extractor.execute(IMG,
maskFilepath,
255)
F = {}
print(f'analyzing {ID}')
F['Original'] = {}
F['Wavelet'] = {}
F['Square'] = {}
F['SquareRoot'] = {}
F['Logarithm'] = {}
F['Exponential'] = {}
F['Gradient'] = {}
F['LBP2D'] = {}
for key in features.keys():
#print(f"Compute {key} : {features[key]}")
if 'diagnostics' in key:
continue
if 'original' in key:
F['Original'][key.split('original_')[1]] = float(features[key])
continue
if 'wavelet' in key:
F['Wavelet'][key.split('wavelet-')[1]] = float(features[key])
continue
if 'square_' in key:
F['Square'][key.split('square_')[1]] = float(features[key])
continue
if 'squareroot_' in key:
F['SquareRoot'][key.split('squareroot_')[1]] = float(features[key])
continue
if 'logarithm_' in key:
F['Logarithm'][key.split('logarithm_')[1]] = float(features[key])
if 'exponential' in key:
F['Exponential'][key.split('exponential_')[1]] = float(features[key])
continue
if 'gradient' in key:
F['Gradient'][key.split('gradient_')[1]] = float(features[key])
continue
if 'lbp-2D_' in key:
F['LBP2D'][key.split('lbp-2D_')[1]] = float(features[key])
continue
Feature_Table[ID]['Features'] = F
def normalization():
NumberOfpatients = len(list(Feature_Table.keys()))
base_ID = list(Feature_Table.keys())[0]
F = Feature_Table[base_ID]['Features']
buffer_list = [0.0] * NumberOfpatients
for _filter_ in list(F.keys()):
feature_types = list(F[_filter_].keys())
for _feature_ in feature_types:
_index_ = 0
_Max_ = Feature_Table[base_ID]['Features'][_filter_][_feature_]
_Min_ = Feature_Table[base_ID]['Features'][_filter_][_feature_]
for ID in list(Feature_Table.keys()):
feature_value = Feature_Table[ID]['Features'][_filter_][_feature_]
buffer_list[_index_] = feature_value
print(_filter_,
_feature_,
feature_value,
_Max_,
_Min_)
if feature_value > _Max_:
_Max_ = feature_value
if feature_value < _Min_:
_Min_ = feature_value
_index_ += 1
#Normalize to the range of [0, 1]
offset = 0.0
if (_Max_ - _Min_) == 0:
continue
scale_factor = (1.0 - 0.0)/(_Max_ - _Min_)
_index_ = 0
for ID in list(Feature_Table.keys()):
Feature_Table[ID]['Features'][_filter_][_feature_] = (offset +
scale_factor*(buffer_list[_index_] -
_Min_))
_index_ += 1
def save_results(args):
json_path = os.path.join(args.output_folder,
'Features.txt')
json_file = open(json_path, 'w')
json_content = json.dumps(Feature_Table,
indent = 4)
json_file.writelines(json_content)
json_file.close()
csv_path = os.path.join(args.output_folder,
'Features.csv')
csv_file = open(csv_path, 'w')
writer = csv.writer(csv_file, dialect='excel')
headers = []
headers.append('Subject')
first_key = list(Feature_Table.keys())[0]
inner_keys = list(Feature_Table[first_key].keys())
for inner_key in inner_keys:
if inner_key == 'Features':
Feature_keys = list(Feature_Table[first_key][inner_key].keys())
for Feature_key in Feature_keys:
_features_ = list(Feature_Table[first_key][inner_key][Feature_key].keys())
for _feature_ in _features_:
headers.append(f'{Feature_key}: ' + _feature_)
else:
headers.append(inner_key)
writer.writerow(headers)
_line_ = []
print(f"We totally analyze {len(list(Feature_Table.keys()))} participants")
for key in sorted(list(Feature_Table.keys())):
_line_ = []
_line_.append(key)
inner_keys = list(Feature_Table[key].keys())
for inner_key in inner_keys:
if inner_key == 'Features':
Feature_keys = list(Feature_Table[key][inner_key].keys())
for Feature_key in Feature_keys:
_features_ = list(Feature_Table[first_key][inner_key][Feature_key].keys())
for _feature_ in _features_:
_line_.append(Feature_Table[key][inner_key][Feature_key][_feature_])
else:
_line_.append(Feature_Table[key][inner_key])
writer.writerow(_line_)
csv_file.close()
a = zip(*csv.reader(open(csv_path, "r")))
csv.writer(open(csv_path, "w")).writerows(a)
def main():
API_description = """
***** Radiomics Analysis Platform *****
API Name: Radiomics Feature Analysis
Version: 1.0
Developer: Alvin Li
Email: d05548014@ntu.edu.tw
****************************************
"""
parser = argparse.ArgumentParser(prog='Feature_Extraction.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=API_description)
parser.add_argument('-input_root',
action = 'store',
type = str,
help = 'The absolute path to input root.')
parser.add_argument('-Table',
action = 'store',
type = str,
help = 'The absolute path to the DATA TABLE (*.csv).')
parser.add_argument('-Volume',
nargs = '+',
help = 'ex: -Volume Vol1.tif Vol2.tif ...')
parser.add_argument('-Mask',
nargs = '+',
help = 'ex: -Mask Msk1.png Msk2.png ...')
parser.add_argument('-output_folder',
action = 'store',
help = 'The absolute path to the output folder used to store extracted Feature Table')
parser.add_argument('-width',
action = 'store',
type = str,
help = 'window width')
parser.add_argument('-level',
action = 'store',
type = str,
help = 'window level')
parser.add_argument('-normalize',
action = 'store',
type = str,
help = 'True/False')
args = parser.parse_args()
assert_paser_valid(args)
read_data_Table(args.Table)
read_data(args)
Extract_features(args)
if args.normalize == 'True':
normalization()
save_results(args)
if __name__ == '__main__':
main()
|
from django.conf import settings
def sentry_dsn(request):
return {
'SENTRY_DSN': settings.SENTRY_DSN
}
def commit_sha(request):
return {
'COMMIT_SHA': settings.COMMIT_SHA
}
|
#Based on gerthvh's menu_8button.py
#BlockComPhone PitftGraphicLib v0.1 For Portrait Pitft Only
import sys, pygame
from pygame.locals import *
import time
import subprocess
import os
from subprocess import *
#os.system('adafruit-pitft-touch-cal -f') #Confirm orentation #But TOO Laggy
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
#set size of the screen
size = width, height = 240, 320
#screen = pygame.display.set_mode(size)
os.system('sudo sh -c "echo 508 > /sys/class/gpio/export"')
#os.system('ls -l /sys/class/gpio')
os.system("sudo sh -c 'echo 'out' > /sys/class/gpio/gpio508/direction'")
os.system("sudo sh -c 'echo '1' > /sys/class/gpio/gpio508/value'")
def initdis():
global screen
screen = pygame.display.set_mode(size)
# Initialize pygame and hide mouse
pygame.init()
pygame.mouse.set_visible(0)
screen = pygame.display.set_mode(size)
# Background Color
screen.fill(black)
# Outer Border
pygame.draw.rect(screen, red, (0,0,240,320),10)
def disinitdis():
pygame.quit()
global touchlist
touchlist = []
def clear(xpos,ypos,length,height):
pygame.draw.rect(screen, black, (xpos,ypos,length,height),0)
def clearall():
pygame.draw.rect(screen, black, (0,0,240,320),0)
pygame.draw.rect(screen, red, (0,0,240,320),10)
global touchlist
touchlist = []
def backlight(onoff):
#print('Backlight')
if onoff: os.system("sudo sh -c 'echo '1' > /sys/class/gpio/gpio508/value'")
else: os.system("sudo sh -c 'echo '0' > /sys/class/gpio/gpio508/value'")
#colors R G B
white = (255, 255, 255)
red = (255, 0, 0)
green = ( 0, 255, 0)
blue = ( 0, 0, 255)
black = ( 0, 0, 0)
cyan = ( 50, 255, 255)
magenta = (255, 0, 255)
yellow = (255, 255, 0)
orange = (255, 127, 0)
touchlist = []
# define function for printing text in a specific place with a specific width and height with a specific colour and border
def make_button(text, xpo, ypo, height, width, recspace, colour, fontsize, function):
font=pygame.font.Font(None,fontsize)
label=font.render(str(text), 1, (colour))
screen.blit(label,(xpo,ypo))
#Space between rec and label
pygame.draw.rect(screen, colour, (xpo-recspace,ypo-recspace,width,height),3)
#Touchscreen
global touchlist
touchlist.append((xpo,width+xpo,ypo,height+ypo,function))
# define function for printing text in a specific place with a specific colour
def make_label(text, xpo, ypo, fontsize, colour):
font=pygame.font.Font(None,fontsize)
label=font.render(str(text), 1, (colour))
screen.blit(label,(xpo,ypo))
# define function that checks for touch location
def on_touch():
counterst = 0
# get the position that was touched
touch_pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
while counterst < len(touchlist):
if touchlist[counterst][0] <= touch_pos[0] <= touchlist[counterst][1] and touchlist[counterst][2] <= touch_pos[1] <= touchlist[counterst][3]:
touchlist[counterst][4]()
counterst = counterst + 1
def touchdisch():
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
print "screen pressed" #for debugging purposes
pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
print pos #for checking
pygame.draw.circle(screen, white, pos, 2, 0) #for debugging purposes - adds a small dot where the screen is pressed
on_touch()
#ensure there is always a safe way to end the program if the touch screen fails
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
sys.exit()
if event.type == pygame.QUIT: sys.exit()
pygame.display.update()
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes, last_active="softmax"):
# Initialize the model
model = Sequential()
input_shape = (height, width, depth)
# If we are using 'channels-first', update the input shape
if K.image_data_format() == 'channels_first':
input_shape = (depth, height, width)
# First set of CONV => RELU => POOL layers
model.add(Conv2D(20, (5, 5), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Second set of CONV => RELU => POOL layers
model.add(Conv2D(50, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# First (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense(classes))
model.add(Activation(last_active))
# return the constructed network architecture
return model
|
# -*- coding: utf-8 -*-
import os
import sys
from ..common.file import get_file_name
from ..common.log import logger
def add_sys_path(file_path: str, project_name: str):
if not os.path.exists(file_path):
raise FileNotFoundError("{} not found".format(file_path))
flag = False
parent_path = os.path.abspath(file_path)
parent_name = get_file_name(parent_path)
project_path = None
while parent_name:
parent_path = os.path.dirname(parent_path)
for child_name in os.listdir(parent_path):
if child_name == project_name:
flag = True
project_path = parent_path
break
if flag:
break
if flag:
sys.path.insert(0, project_path)
logger.info("已成功将{}添加至系统路径".format(project_path))
else:
raise FileNotFoundError("{} not found".format(project_name))
|
"""
gof.py
gof stands for Graph Optimization Framework.
The gof submodule of theano implements a framework
for manipulating programs described as graphs. The
gof module defines basic theano graph concepts:
-Apply nodes, which represent the application
of an Op to Variables. Together these make up a
graph.
-The Type, needed for Variables to make sense.
-The FunctionGraph, which defines how a subgraph
should be interpreted to implement a function.
-The Thunk, a callable object that becames part
of the executable emitted by theano.
-Linkers/VMs, the objects that call Thunks in
sequence in order to execute a theano program.
Conceptually, gof is intended to be sufficiently abstract
that it could be used to implement a language other than
theano. ie, theano is a domain-specific language for
numerical computation, created by implementing
tensor Variables and Ops that perform mathematical functions.
A different kind of domain-specific language could be
made by using gof with different Variables and Ops.
In practice, gof and the rest of theano are somewhat more
tightly intertwined.
Currently, gof also contains much of the C compilation
functionality. Ideally this should be refactored into
a different submodule.
For more details and discussion, see the theano-dev
e-mail thread "What is gof?".
"""
from __future__ import absolute_import, print_function, division
from theano.gof.cc import \
CLinker, OpWiseCLinker, DualLinker, HideC
from theano.gof.fg import \
CachedConstantError, InconsistencyError, MissingInputError, FunctionGraph
from theano.gof.destroyhandler import \
DestroyHandler
from theano.gof.graph import \
Apply, Variable, Constant, view_roots
from theano.gof.link import \
Container, Linker, LocalLinker, PerformLinker, WrapLinker, WrapLinkerMany
from theano.gof.op import \
Op, OpenMPOp, PureOp, COp, ops_with_inner_function
from theano.gof.type import EnumType, EnumList, CEnumType
from theano.gof.opt import (
Optimizer,
optimizer, inplace_optimizer,
SeqOptimizer,
MergeOptimizer,
LocalOptimizer, local_optimizer, LocalOptGroup,
OpSub, OpRemove, PatternSub,
NavigatorOptimizer, TopoOptimizer, EquilibriumOptimizer,
OpKeyOptimizer, CheckStackTraceOptimization)
from theano.gof.optdb import \
DB, LocalGroupDB, Query, \
EquilibriumDB, SequenceDB, ProxyDB
from theano.gof.toolbox import \
Feature, \
Bookkeeper, History, Validator, ReplaceValidate, NodeFinder,\
PrintListener, ReplacementDidntRemovedError, NoOutputFromInplace
from theano.gof.type import \
Type, Generic, generic
from theano.gof.utils import \
hashtype, object2, MethodNotDefined
from theano.gof.params_type import ParamsType, Params
import theano
if theano.config.cmodule.preload_cache:
cc.get_module_cache()
|
import os
import enum
from typing import Counter
from sqlalchemy import Column, String, Integer, create_engine
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relationship
from sqlalchemy.sql.expression import false, null
from sqlalchemy.sql.schema import ForeignKey, PrimaryKeyConstraint, Table, MetaData
from sqlalchemy.sql.sqltypes import Boolean, Float
from config import init_env_vars
Base = declarative_base()
init_env_vars()
### UNCOMMENT these below vars to enable for local
# database_name = os.getenv('DB_NAME')
# database_username = os.getenv('DB_USER')
# database_password = os.getenv('DB_PASSWORD')
# database_path = "postgresql://{}:{}@{}/{}"\
# .format(database_username, database_password, 'localhost:5432', database_name)
### HEROKU REQUIREMENTS
database_path = os.environ.get('DATABASE_URL').replace("://", "ql://", 1)
db = SQLAlchemy()
'''
setup_db(app)
binds a flask application and a SQLAlchemy service
'''
def setup_db(app, database_path=database_path):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = app
db.init_app(app)
db.create_all()
Migrate(app, db)
def session_revert():
db.session.rollback()
def session_close():
db.session.close()
'''
Schema Configuration
'''
class Reservation (db.Model):
__tablename__ = 'reservation'
id = Column(Integer, primary_key=True)
vehicle_id = Column(Integer, ForeignKey('vehicle.id'), nullable=False)
customer_id = Column(Integer, ForeignKey('customer.id'), nullable=False)
employee_id = Column(Integer, ForeignKey('employee.id'), nullable=False)
# implemented the time attrib, if time allows
# start_time =
# end_time =
cost = Column(Float, nullable=False)
reservation_open = Column(Boolean, nullable=False)
vehicle =relationship('Vehicle', uselist=False, foreign_keys=[vehicle_id])
customer=relationship('Customer', uselist=False, foreign_keys=[customer_id])
employee=relationship('Employee', uselist=False, foreign_keys=[employee_id])
def __init__(self, vehicle_id, customer_id,
employee_id, cost, reservation_open):
self.vehicle_id = vehicle_id
self.customer_id = customer_id
self.employee_id = employee_id
self.cost = cost
self.reservation_open = reservation_open
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def get_cust_info(id):
return Customer.query.filter_by(id=id).first()
def get_emp_info(id):
return Employee.query.filter_by(id=id).first()
def get_veh_info(id):
return Vehicle.query.filter_by(id=id).first()
def format(self):
customer = Reservation.get_cust_info(self.customer_id)
employee = Reservation.get_emp_info(self.employee_id)
vehicle = Reservation.get_veh_info(self.vehicle_id)
return {
'id' : self.id,
'cost': self.cost,
'customer_name': customer.first_name + ' ' + customer.last_name,
'employee_name': employee.first_name + ' ' + employee.last_name,
'vehicle_id': self.vehicle_id,
'vehicle_make_and_model': vehicle.make + ' ' + vehicle.model,
'reservation_open' : self.reservation_open
}
class Vehicle(db.Model):
__tablename__= 'vehicle'
id = Column(Integer, primary_key=True)
make = Column(String, nullable=False)
model = Column(String, nullable=False)
year = Column(Integer, nullable=False)
body_style = Column(String)
color = Column(String)
currently_rented = Column(Boolean, nullable=False)
reservations = relationship('Reservation', back_populates='vehicle')
def __init__(self, make, model, year, body_style, color,
currently_rented):
self.make = make
self.model = model
self.year = year
self.body_style = body_style
self.color = color
self.currently_rented = currently_rented
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'make': self.make,
'model': self.model,
'year': self.year,
'body_style': self.body_style,
'color': self.color,
'currently_rented': self.currently_rented,
}
class Person(db.Model):
# __tablename__= 'person'
__abstract__ = True
# id = Column(Integer, primary_key=True)
first_name = Column(String, nullable=False)
last_name = Column(String, nullable=False)
address = Column(String, nullable=False)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_on':type,
'polymorphic_identity':'person',
}
class Customer(Person):
__tablename__ = 'customer'
id = Column(Integer, primary_key=True)
reservations = relationship('Reservation', back_populates='customer')
__mapper_args__ = {
'polymorphic_identity':'customer'
}
def __init__(self, first_name, last_name, address, type):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type,
}
class Manager(Person):
__tablename__ = 'manager'
id = Column(Integer, primary_key=True)
employees = relationship('Employee', back_populates='manager')
__mapper_args__ = {
'polymorphic_identity':'manager'
}
def __init__(self, first_name, last_name, address, type):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type
}
class Employee(Person, db.Model):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
manager_id = Column(Integer, ForeignKey('manager.id'))
manager = relationship('Manager', back_populates='employees')
reservations = relationship('Reservation', back_populates='employee')
__mapper_args__ = {
'polymorphic_identity':'employee'
}
def __init__(self, first_name, last_name, address, type, manager_id):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
self.manager_id = manager_id
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type,
'manager_id' : self.manager_id
}
'''
Helper functions
'''
def get_vehicle(id):
if id <= 0:
return Vehicle.query.all()
else:
return Vehicle.query.filter_by(id=id).first()
def get_customer(id):
if not id:
return Customer.query.all()
else:
return Customer.query.filter_by(id=id).first()
def get_employee(id):
if not id:
return Employee.query.all()
else:
return Employee.query.filter_by(id=id).first()
def get_manager(id):
if not id:
return Manager.query.all()
else:
return Manager.query.filter_by(id=id).first()
def get_reservation():
return Reservation.query.all()
|
#!/usr/bin/env python3
##############################################################################
# Copyright (c) 2016: Leonardo Cardoso
# https://github.com/LeoFCardoso/pdf2pdfocr
##############################################################################
# Emulate pdftk multibackground operator
# $1 - first file (foreground)
# $2 - second file (background)
# $3 - output file
# User should pass correct parameters. There is no parameter check.
####
# Depends on PyPDF2
#
import datetime
import sys
from PyPDF2 import PdfFileWriter, PdfFileReader
__author__ = 'Leonardo F. Cardoso'
#
verbose_mode = False # Used for debug
def debug(param):
try:
if verbose_mode:
tstamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
print("[{0}] [DEBUG]\t{1}".format(tstamp, param))
except:
pass
output = PdfFileWriter()
# First file (image)
imagepdf = PdfFileReader(open(sys.argv[1], 'rb'), strict=False)
# Second file (text)
textpdf = PdfFileReader(open(sys.argv[2], 'rb'), strict=False)
# Copy pages to output with text
scale_tolerance = 0.001
for i in range(imagepdf.getNumPages()):
debug("Page: {0}".format(i + 1))
imagepage = imagepdf.getPage(i)
textpage = textpdf.getPage(i)
debug("Img (original): {0}".format(imagepage.mediaBox.upperRight))
debug("Text: {0}".format(textpage.mediaBox.upperRight))
# Handle rotation
rotate_angle = imagepage.get('/Rotate')
debug("Image page rotate angle is {0}".format(rotate_angle))
debug("Text page rotate angle is {0}".format(textpage.get('/Rotate')))
if rotate_angle is None:
rotate_angle = 0
#
image_page_x = imagepage.mediaBox.upperRight[0]
image_page_y = imagepage.mediaBox.upperRight[1]
# With rotated pages (90 or 270 degress), we have to switch x and y, to avoid wrong scale operation
if rotate_angle == 90 or rotate_angle == 270:
image_page_x = imagepage.mediaBox.upperRight[1]
image_page_y = imagepage.mediaBox.upperRight[0]
#
debug("Img (dimensions after rotation): {0}, {1}".format(image_page_x, image_page_y))
factor_x = textpage.mediaBox.upperRight[0] / image_page_x
factor_y = textpage.mediaBox.upperRight[1] / image_page_y
debug("Factors: {0}, {1}".format(factor_x, factor_y))
debug("Corrected Factors: {0}, {1}".format(factor_x - 1, factor_y - 1))
# Try to avoid unnecessary scale operation
if abs(factor_x - 1) > scale_tolerance or abs(factor_y - 1) > scale_tolerance:
debug("Scaling...")
imagepage.scale(float(factor_x), float(factor_y))
# imagepage stay on top
if rotate_angle == 0 or rotate_angle == 360:
debug("Merge simple")
# TODO very slow in some PDFs
textpage.mergePage(imagepage)
else:
debug("Merge rotated")
# Tested values for translation with 90 degrees
if rotate_angle == 90:
textpage.mergeRotatedTranslatedPage(imagepage, (-1 * rotate_angle), image_page_y / 2,
image_page_y / 2, expand=False)
# Tested values for translation with 180 degrees
if rotate_angle == 180:
textpage.mergeRotatedTranslatedPage(imagepage, (-1 * rotate_angle), image_page_x / 2,
image_page_y / 2, expand=False)
# Tested values for translation with 270 degrees
if rotate_angle == 270:
textpage.mergeRotatedTranslatedPage(imagepage, (-1 * rotate_angle), image_page_x / 2,
image_page_x / 2, expand=False)
#
textpage.compressContentStreams()
output.addPage(textpage)
#
with open(sys.argv[3], 'wb') as f:
output.write(f)
#
|
# -*- coding: UTF-8 -*-
import sys
import logging
import argparse
import shutil
from typing import Dict, List
from echoscope.util import file_util, log_util
from echoscope.config import config
from echoscope.model import config_model
from echoscope.source import source, mysql_source, clickhouse_source
from echoscope.generate import generate, markdown_generate
from clickhouse_driver import Client, connect
# 源数据导出map
__source_map: Dict[str, source.Source] = {}
# 输出文件类型map
__generate_map: Dict[str, generate.Generate] = {}
__generate = None
def init():
"""初始化
"""
file_util.mkdirs(config.LogPath, False)
log_util.log_init(config.LogPath)
mysqlSource = mysql_source.MysqlSource()
__source_map[config.DsMysql] = mysqlSource
__source_map[config.DsMariaDB] = mysqlSource
__source_map[config.DsClickHouse] = clickhouse_source.ClickhouseSource()
mdGenerate = markdown_generate.MarkdownGenerate(config.TemplatePath, config.MarkdownExportPath)
__generate_map[config.ExportTypeMarkdown] = mdGenerate
def _parse_option():
"""获取命令行参数
Returns:
[type]: [description]
"""
parser = argparse.ArgumentParser(description='Echoscope')
parser.add_argument('-g', '--generate', type=str, default='markdown',
help='generate file type. support: markdown')
options = parser.parse_args()
return options, sys.argv[1:]
def main():
init()
options, args = _parse_option()
shutil.rmtree(path=config.MarkdownExportPath, ignore_errors=True)
confMap: Dict[str, List[config_model.DataSourceConfig]] = {}
# 生成模型文件
for dsConfig in config.exportDsConfig:
logging.info("start generate model file: %s" % dsConfig)
ds = __source_map[dsConfig.dsType].export_model(conf=dsConfig)
dsConfig.ds = ds
filePath = __generate_map[options.generate].generate_index_file(conf=dsConfig, ds=ds)
logging.info("generate model index file path: %s" % filePath)
filePath = __generate_map[options.generate].generate_file(conf=dsConfig, ds=ds)
if confMap.get(dsConfig.dsType, None) == None:
confMap[dsConfig.dsType] = [dsConfig]
else:
confMap[dsConfig.dsType].append(dsConfig)
logging.info("end generate model file path: %s" % filePath)
logging.info("start generate root index file ")
confss: List[List[config_model.DataSourceConfig]] = []
for dsType in config.SupportDsType:
print(dsType)
confs = confMap.get(dsType, None)
if confs == None:
continue
print(dsType)
confss.append(confs)
__generate_map[config.ExportTypeMarkdown].generate_root_file(confss)
logging.info("end generate root index file ")
main()
# conn = connect('clickhouse://default:123456@10.0.3.94:9000/system')
# # client = Client(host='10.0.3.94', port=8123, user='default', password='123456')
# cursor = conn.cursor()
# cursor.execute('select version() as ver;')
# yz = cursor.fetchall()
# print(yz)
|
# code source: https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-iv-database
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, v - 1)
print 'Current database version: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
# This script downgrades database by 1 revision every time it runs.
# To downgrade in multiple revisions, run script as many as needed.
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1RollingUpdateStatefulSetStrategy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'partition': 'int'
}
attribute_map = {
'partition': 'partition'
}
def __init__(self, partition=None):
"""
V1RollingUpdateStatefulSetStrategy - a model defined in Swagger
"""
self._partition = None
self.discriminator = None
if partition is not None:
self.partition = partition
@property
def partition(self):
"""
Gets the partition of this V1RollingUpdateStatefulSetStrategy.
Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.
:return: The partition of this V1RollingUpdateStatefulSetStrategy.
:rtype: int
"""
return self._partition
@partition.setter
def partition(self, partition):
"""
Sets the partition of this V1RollingUpdateStatefulSetStrategy.
Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.
:param partition: The partition of this V1RollingUpdateStatefulSetStrategy.
:type: int
"""
self._partition = partition
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1RollingUpdateStatefulSetStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import scipy
from optuna._experimental import experimental
from optuna.logging import get_logger
from optuna.study import Study
from optuna.study import StudyDirection
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
from optuna.visualization._utils import _check_plot_args
from optuna.visualization._utils import _get_param_values
from optuna.visualization.matplotlib._matplotlib_imports import _imports
from optuna.visualization.matplotlib._utils import _is_log_scale
from optuna.visualization.matplotlib._utils import _is_numerical
if _imports.is_successful():
from optuna.visualization.matplotlib._matplotlib_imports import Axes
from optuna.visualization.matplotlib._matplotlib_imports import Colormap
from optuna.visualization.matplotlib._matplotlib_imports import ContourSet
from optuna.visualization.matplotlib._matplotlib_imports import plt
_logger = get_logger(__name__)
AXES_PADDING_RATIO = 5e-2
@experimental("2.2.0")
def plot_contour(
study: Study,
params: Optional[List[str]] = None,
*,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "Axes":
"""Plot the parameter relationship as contour plot in a study with Matplotlib.
Note that, if a parameter contains missing values, a trial with missing values is not plotted.
.. seealso::
Please refer to :func:`optuna.visualization.plot_contour` for an example.
Warnings:
Output figures of this Matplotlib-based
:func:`~optuna.visualization.matplotlib.plot_contour` function would be different from
those of the Plotly-based :func:`~optuna.visualization.plot_contour`.
Example:
The following code snippet shows how to plot the parameter relationship as contour plot.
.. plot::
import optuna
def objective(trial):
x = trial.suggest_float("x", -100, 100)
y = trial.suggest_categorical("y", [-1, 0, 1])
return x ** 2 + y
sampler = optuna.samplers.TPESampler(seed=10)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=30)
optuna.visualization.matplotlib.plot_contour(study, params=["x", "y"])
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their target values.
params:
Parameter list to visualize. The default is all parameters.
target:
A function to specify the value to display. If it is :obj:`None` and ``study`` is being
used for single-objective optimization, the objective values are plotted.
.. note::
Specify this argument if ``study`` is being used for multi-objective optimization.
target_name:
Target's name to display on the color bar.
Returns:
A :class:`matplotlib.axes.Axes` object.
Raises:
:exc:`ValueError`:
If ``target`` is :obj:`None` and ``study`` is being used for multi-objective
optimization.
"""
_imports.check()
_check_plot_args(study, target, target_name)
_logger.warning(
"Output figures of this Matplotlib-based `plot_contour` function would be different from "
"those of the Plotly-based `plot_contour`."
)
return _get_contour_plot(study, params, target, target_name)
def _get_contour_plot(
study: Study,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "Axes":
# Calculate basic numbers for plotting.
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
_, ax = plt.subplots()
return ax
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(all_params)
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
_, ax = plt.subplots()
return ax
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(set(params))
n_params = len(sorted_params)
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
if n_params == 2:
# Set up the graph style.
fig, axs = plt.subplots()
axs.set_title("Contour Plot")
cmap = _set_cmap(study, target)
contour_point_num = 100
# Prepare data and draw contour plots.
if params:
x_param = params[0]
y_param = params[1]
else:
x_param = sorted_params[0]
y_param = sorted_params[1]
cs = _generate_contour_subplot(
trials, x_param, y_param, axs, cmap, contour_point_num, target
)
if isinstance(cs, ContourSet):
axcb = fig.colorbar(cs)
axcb.set_label(target_name)
else:
# Set up the graph style.
fig, axs = plt.subplots(n_params, n_params)
fig.suptitle("Contour Plot")
cmap = _set_cmap(study, target)
contour_point_num = 100
# Prepare data and draw contour plots.
cs_list = []
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
ax = axs[y_i, x_i]
cs = _generate_contour_subplot(
trials, x_param, y_param, ax, cmap, contour_point_num, target
)
if isinstance(cs, ContourSet):
cs_list.append(cs)
if cs_list:
axcb = fig.colorbar(cs_list[0], ax=axs)
axcb.set_label(target_name)
return axs
def _set_cmap(study: Study, target: Optional[Callable[[FrozenTrial], float]]) -> "Colormap":
cmap = "Blues_r" if target is None and study.direction == StudyDirection.MAXIMIZE else "Blues"
return plt.get_cmap(cmap)
class _LabelEncoder:
def __init__(self) -> None:
self.labels: List[str] = []
def fit(self, labels: List[str]) -> "_LabelEncoder":
self.labels = sorted(set(labels))
return self
def transform(self, labels: List[str]) -> List[int]:
return [self.labels.index(label) for label in labels]
def fit_transform(self, labels: List[str]) -> List[int]:
return self.fit(labels).transform(labels)
def get_labels(self) -> List[str]:
return self.labels
def get_indices(self) -> List[int]:
return list(range(len(self.labels)))
def _calculate_griddata(
trials: List[FrozenTrial],
x_param: str,
x_indices: List[Union[str, int, float]],
y_param: str,
y_indices: List[Union[str, int, float]],
contour_point_num: int,
target: Optional[Callable[[FrozenTrial], float]],
) -> Tuple[
np.ndarray,
np.ndarray,
np.ndarray,
List[Union[int, float]],
List[Union[int, float]],
List[Union[int, float]],
List[Union[int, float]],
List[int],
List[str],
List[int],
List[str],
int,
int,
]:
# Extract values for x, y, z axes from each trail.
x_values = []
y_values = []
z_values = []
x_range_values = []
y_range_values = []
for trial in trials:
contains_x_param = x_param in trial.params
if contains_x_param:
x_range_values.append(trial.params[x_param])
contains_y_param = y_param in trial.params
if contains_y_param:
y_range_values.append(trial.params[y_param])
if not contains_x_param or not contains_y_param:
continue
x_values.append(trial.params[x_param])
y_values.append(trial.params[y_param])
if target is None:
value = trial.value
else:
value = target(trial)
if isinstance(value, int):
value = float(value)
elif not isinstance(value, float):
raise ValueError(
"Trial{} has COMPLETE state, but its target value is non-numeric.".format(
trial.number
)
)
z_values.append(value)
# Return empty values when x or y has no value.
if len(x_values) == 0 or len(y_values) == 0:
return (
np.array([]),
np.array([]),
np.array([]),
x_values,
y_values,
[],
[],
[],
[],
[],
[],
0,
0,
)
# Add dummy values for grid data calculation when a parameter has one unique value.
x_values_dummy = []
y_values_dummy = []
if len(set(x_values)) == 1:
x_values_dummy = [x for x in x_indices if x not in x_values]
x_values = x_values + x_values_dummy * len(x_values)
y_values = y_values + (y_values * len(x_values_dummy))
z_values = z_values + (z_values * len(x_values_dummy))
if len(set(y_values)) == 1:
y_values_dummy = [y for y in y_indices if y not in y_values]
y_values = y_values + y_values_dummy * len(y_values)
x_values = x_values + (x_values * len(y_values_dummy))
z_values = z_values + (z_values * len(y_values_dummy))
# Convert categorical values to int.
cat_param_labels_x = [] # type: List[str]
cat_param_pos_x = [] # type: List[int]
cat_param_labels_y = [] # type: List[str]
cat_param_pos_y = [] # type: List[int]
if not _is_numerical(trials, x_param):
enc = _LabelEncoder()
x_range_values = enc.fit_transform(list(map(str, x_range_values)))
x_values = enc.transform(list(map(str, x_values)))
cat_param_labels_x = enc.get_labels()
cat_param_pos_x = enc.get_indices()
if not _is_numerical(trials, y_param):
enc = _LabelEncoder()
y_range_values = enc.fit_transform(list(map(str, y_range_values)))
y_values = enc.transform(list(map(str, y_values)))
cat_param_labels_y = enc.get_labels()
cat_param_pos_y = enc.get_indices()
# Calculate min and max of x and y.
x_values_min = min(x_range_values)
x_values_max = max(x_range_values)
y_values_min = min(y_range_values)
y_values_max = max(y_range_values)
# Calculate grid data points.
# For x and y, create 1-D array of evenly spaced coordinates on linear or log scale.
xi = np.array([])
yi = np.array([])
zi = np.array([])
if _is_log_scale(trials, x_param):
padding_x = (np.log10(x_values_max) - np.log10(x_values_min)) * AXES_PADDING_RATIO
x_values_min = np.power(10, np.log10(x_values_min) - padding_x)
x_values_max = np.power(10, np.log10(x_values_max) + padding_x)
xi = np.logspace(np.log10(x_values_min), np.log10(x_values_max), contour_point_num)
else:
padding_x = (x_values_max - x_values_min) * AXES_PADDING_RATIO
x_values_min -= padding_x
x_values_max += padding_x
xi = np.linspace(x_values_min, x_values_max, contour_point_num)
if _is_log_scale(trials, y_param):
padding_y = (np.log10(y_values_max) - np.log10(y_values_min)) * AXES_PADDING_RATIO
y_values_min = np.power(10, np.log10(y_values_min) - padding_y)
y_values_max = np.power(10, np.log10(y_values_max) + padding_y)
yi = np.logspace(np.log10(y_values_min), np.log10(y_values_max), contour_point_num)
else:
padding_y = (y_values_max - y_values_min) * AXES_PADDING_RATIO
y_values_min -= padding_y
y_values_max += padding_y
yi = np.linspace(y_values_min, y_values_max, contour_point_num)
# create irregularly spaced map of trial values
# and interpolate it with Plotly's interpolation formulation
if x_param != y_param:
zmap = _create_zmap(x_values, y_values, z_values, xi, yi)
zi = _interpolate_zmap(zmap, contour_point_num)
return (
xi,
yi,
zi,
x_values,
y_values,
[x_values_min, x_values_max],
[y_values_min, y_values_max],
cat_param_pos_x,
cat_param_labels_x,
cat_param_pos_y,
cat_param_labels_y,
len(x_values_dummy),
len(y_values_dummy),
)
def _generate_contour_subplot(
trials: List[FrozenTrial],
x_param: str,
y_param: str,
ax: "Axes",
cmap: "Colormap",
contour_point_num: int,
target: Optional[Callable[[FrozenTrial], float]],
) -> "ContourSet":
x_indices = sorted(set(_get_param_values(trials, x_param)))
y_indices = sorted(set(_get_param_values(trials, y_param)))
if len(x_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(x_param))
return ax
if len(y_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(y_param))
return ax
(
xi,
yi,
zi,
x_values,
y_values,
x_values_range,
y_values_range,
x_cat_param_pos,
x_cat_param_label,
y_cat_param_pos,
y_cat_param_label,
x_values_dummy_count,
y_values_dummy_count,
) = _calculate_griddata(
trials, x_param, x_indices, y_param, y_indices, contour_point_num, target
)
cs = None
ax.set(xlabel=x_param, ylabel=y_param)
ax.set_xlim(x_values_range[0], x_values_range[1])
ax.set_ylim(y_values_range[0], y_values_range[1])
if len(zi) > 0:
if _is_log_scale(trials, x_param):
ax.set_xscale("log")
if _is_log_scale(trials, y_param):
ax.set_yscale("log")
if x_param != y_param:
# Contour the gridded data.
ax.contour(xi, yi, zi, 15, linewidths=0.5, colors="k")
cs = ax.contourf(xi, yi, zi, 15, cmap=cmap.reversed())
# Plot data points.
if x_values_dummy_count > 0:
x_org_len = int(len(x_values) / (x_values_dummy_count + 1))
y_org_len = int(len(y_values) / (x_values_dummy_count + 1))
elif y_values_dummy_count > 0:
x_org_len = int(len(x_values) / (y_values_dummy_count + 1))
y_org_len = int(len(y_values) / (y_values_dummy_count + 1))
else:
x_org_len = len(x_values)
y_org_len = len(x_values)
ax.scatter(
x_values[:x_org_len],
y_values[:y_org_len],
marker="o",
c="black",
s=20,
edgecolors="grey",
linewidth=2.0,
)
if x_cat_param_pos:
ax.set_xticks(x_cat_param_pos)
ax.set_xticklabels(x_cat_param_label)
if y_cat_param_pos:
ax.set_yticks(y_cat_param_pos)
ax.set_yticklabels(y_cat_param_label)
ax.label_outer()
return cs
def _create_zmap(
x_values: List[Union[int, float]],
y_values: List[Union[int, float]],
z_values: List[float],
xi: np.ndarray,
yi: np.ndarray,
) -> Dict[Tuple[int, int], float]:
# creates z-map from trial values and params.
# z-map is represented by hashmap of coordinate and trial value pairs
#
# coordinates are represented by tuple of integers, where the first item
# indicates x-axis index and the second item indicates y-axis index
# and refer to a position of trial value on irregular param grid
#
# since params were resampled either with linspace or logspace
# original params might not be on the x and y axes anymore
# so we are going with close approximations of trial value positions
zmap = dict()
for x, y, z in zip(x_values, y_values, z_values):
xindex = int(np.argmin(np.abs(xi - x)))
yindex = int(np.argmin(np.abs(yi - y)))
zmap[(xindex, yindex)] = z
return zmap
def _interpolate_zmap(zmap: Dict[Tuple[int, int], float], contour_plot_num: int) -> np.ndarray:
# implements interpolation formulation used in Plotly
# to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30
# citing their doc:
#
# > Fill in missing data from a 2D array using an iterative
# > poisson equation solver with zero-derivative BC at edges.
# > Amazingly, this just amounts to repeatedly averaging all the existing
# > nearest neighbors
#
# Plotly's algorithm is equivalent to solve the following linear simultaneous equation.
# It is discretization form of the Poisson equation.
#
# z[x, y] = zmap[(x, y)] (if zmap[(x, y)] is given)
# 4 * z[x, y] = z[x-1, y] + z[x+1, y] + z[x, y-1] + z[x, y+1] (if zmap[(x, y)] is not given)
a_data = []
a_row = []
a_col = []
b = np.zeros(contour_plot_num**2)
for x in range(contour_plot_num):
for y in range(contour_plot_num):
grid_index = y * contour_plot_num + x
if (x, y) in zmap:
a_data.append(1)
a_row.append(grid_index)
a_col.append(grid_index)
b[grid_index] = zmap[(x, y)]
else:
for dx, dy in ((-1, 0), (1, 0), (0, -1), (0, 1)):
if 0 <= x + dx < contour_plot_num and 0 <= y + dy < contour_plot_num:
a_data.append(1)
a_row.append(grid_index)
a_col.append(grid_index)
a_data.append(-1)
a_row.append(grid_index)
a_col.append(grid_index + dy * contour_plot_num + dx)
z = scipy.sparse.linalg.spsolve(scipy.sparse.csc_matrix((a_data, (a_row, a_col))), b)
return z.reshape((contour_plot_num, contour_plot_num))
|
import this
def hello_world():
print("Hello World")
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""System configuration library.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.framework.versions import CXX11_ABI_FLAG
from tensorflow.python.framework.versions import MONOLITHIC_BUILD
from tensorflow.python.platform.sysconfig import get_build_info
from tensorflow.python.platform.sysconfig import get_compile_flags
from tensorflow.python.platform.sysconfig import get_include
from tensorflow.python.platform.sysconfig import get_lib
from tensorflow.python.platform.sysconfig import get_link_flags
del _print_function
|
#
# PySNMP MIB module CISCO-MGX82XX-MODULE-RSRC-PART-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-MGX82XX-MODULE-RSRC-PART-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:50:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint")
cardGeneric, = mibBuilder.importSymbols("BASIS-MIB", "cardGeneric")
ciscoWan, = mibBuilder.importSymbols("CISCOWAN-SMI", "ciscoWan")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Counter32, Unsigned32, TimeTicks, Counter64, ModuleIdentity, Gauge32, Integer32, NotificationType, IpAddress, ObjectIdentity, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Unsigned32", "TimeTicks", "Counter64", "ModuleIdentity", "Gauge32", "Integer32", "NotificationType", "IpAddress", "ObjectIdentity", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoMgx82xxModuleRsrcPartMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 351, 150, 73))
ciscoMgx82xxModuleRsrcPartMIB.setRevisions(('2003-04-18 00:00',))
if mibBuilder.loadTexts: ciscoMgx82xxModuleRsrcPartMIB.setLastUpdated('200304180000Z')
if mibBuilder.loadTexts: ciscoMgx82xxModuleRsrcPartMIB.setOrganization('Cisco Systems, Inc.')
cardResourcePartition = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 2, 9))
cardLcnPartitionType = MibScalar((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noPartition", 1), ("controllerBased", 2), ("portControllerBased", 3))).clone('noPartition')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardLcnPartitionType.setStatus('current')
cardResPartGrpTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2), )
if mibBuilder.loadTexts: cardResPartGrpTable.setStatus('current')
cardResPartGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1), ).setIndexNames((0, "CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartCtrlrNum"))
if mibBuilder.loadTexts: cardResPartGrpEntry.setStatus('current')
cardResPartCtrlrNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("par", 1), ("pnni", 2), ("tag", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cardResPartCtrlrNum.setStatus('current')
cardResPartRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("add", 1), ("del", 2), ("mod", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardResPartRowStatus.setStatus('current')
cardResPartNumOfLcnAvail = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardResPartNumOfLcnAvail.setStatus('current')
cmmRsrcPartMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2))
cmmRsrcPartMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 1))
cmmRsrcPartMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 2))
cmmRsrcPartCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 1, 1)).setObjects(("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cmmRsrcPartGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmmRsrcPartCompliance = cmmRsrcPartCompliance.setStatus('current')
cmmRsrcPartGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 2, 1)).setObjects(("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardLcnPartitionType"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartCtrlrNum"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartRowStatus"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartNumOfLcnAvail"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmmRsrcPartGroup = cmmRsrcPartGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", cardResPartGrpTable=cardResPartGrpTable, ciscoMgx82xxModuleRsrcPartMIB=ciscoMgx82xxModuleRsrcPartMIB, cmmRsrcPartMIBConformance=cmmRsrcPartMIBConformance, cmmRsrcPartMIBCompliances=cmmRsrcPartMIBCompliances, cmmRsrcPartGroup=cmmRsrcPartGroup, cardResPartNumOfLcnAvail=cardResPartNumOfLcnAvail, cardResourcePartition=cardResourcePartition, cmmRsrcPartMIBGroups=cmmRsrcPartMIBGroups, cmmRsrcPartCompliance=cmmRsrcPartCompliance, cardResPartRowStatus=cardResPartRowStatus, cardResPartCtrlrNum=cardResPartCtrlrNum, cardLcnPartitionType=cardLcnPartitionType, PYSNMP_MODULE_ID=ciscoMgx82xxModuleRsrcPartMIB, cardResPartGrpEntry=cardResPartGrpEntry)
|
from diagrams import Cluster, Diagram
from graphviz import Digraph
from .PipelineNode import PipelineNode
import sklearn
from sklearn import *
import regex as re
import warnings
#warnings.filterwarnings("ignore")
class PipelineDiagram:
def __init__(self, pipeline, file_name='ml_pipeline.png'):
self.pipe = pipeline
self.title = 'Machine Learning Pipeline'
self.title_param = 'Machine Learning Parameters Pipeline'
self.view = True
self.file_name = file_name
self.cn = PipelineNode()
def show(self, title=None):
self.title = title if title else self.title
self.pipe_len = len(list(self.pipe))
return self.create_diagram()
def show_params(self, title=None):
self.title_param = title if title else self.title_param
return self.create_param_diagram()
@staticmethod
def parent_classes(level=0, base='sklearn'):
if level != 0:
base = 'sklearn.' + base
return list(filter(lambda x: not re.search(r'^_.*', x), dir(eval(base))))
def all_classes(self):
l = self.parent_classes()
for i in self.parent_classes():
try:
eval(i)
except:
l.remove(i)
class_list = {x: [eval('sklearn.' + x + '.' + y) for y in self.parent_classes(1, x)] for x in l}
return class_list
def get_link(self, path):
reg = re.findall(r"'(.*)'", str(path))[0]
link = 'https://scikit-learn.org/stable/modules/generated/{0}.html'.format(re.sub("".join(re.findall(r'\.(_.*\.)',reg)),'',reg))
return link
def find_category(self, obj):
temp = self.all_classes()
try:
comp = str(type(obj)).split('.')[1]
if type(obj) in temp[comp] and comp!='pipeline':
return (comp, obj, self.get_link(type(obj)))
if comp=='pipeline':
return list(map(self.find_category, [x[1] for x in obj.transformer_list]))
except:
return ('Custom Function', obj, 'Function')
def find_category_params(self, obj):
try:
comp = str(type(obj)).split('.')[1]
if comp!='pipeline':
return (obj, self.get_param(obj))
if comp=='pipeline':
return list(map(self.find_category_params, [x[1] for x in obj.transformer_list]))
except:
return (obj, 'Custom Function')
def get_param(self, obj):
try:
s = list(obj.get_params().items())
reg = re.sub(r"(,\s)\'","\l'",str(dict(filter(lambda x: '__' not in x[0] , s))))
return re.sub('(\(.*\))', '', str(obj))+'\n\n'+re.sub('{|}', '', reg)
except:
return str(obj)
def all_params(self):
return list(map(self.find_category_params, self.pipe))
def all_categories(self):
return list(map(self.find_category, self.pipe))
def create_diagram(self):
with Diagram(self.title, show=False, filename=self.file_name) as pipe_diag:
inputs = [("data","Train Data"), ("data", "Validation Data"), ("data","Test Data")]
start = self.create_cluster("Input Data", inputs) >> self.cn.create_node(("Data Stream","Data Stream"))
self.traverse_pipeline(start)
return pipe_diag
def create_param_diagram(self):
self.g = Digraph('G', filename='ml_pipeline_params.gv')
self.g.graph_attr["rankdir"] = "LR"
self.create_cluster_params('Inputs', ['Train Data', 'Validation Data', 'Test Data'])
#self.g.edge('input','streamin')
#self.g.edge('streamout','Model')
self.traverse_pipeline_params()
self.g.view()
return self
def traverse_pipeline(self, curr):
self.descriptions = list(self.all_categories())
for i in self.descriptions:
if type(i) == list:
curr = curr >> self.create_cluster("Transformers", i)
else:
curr = curr >> self.cn.create_node(i)
return curr
def traverse_pipeline_params(self):
self.params = self.all_params()
for i in self.params:
if type(i) == list:
self.create_cluster_params('Transformers', [x[1] for x in i])
else:
self.g.node(str(i[0]), label=i[1], shape='box')
self.g.edge(self.input, str(i[0]))
self.input = str(i[0])
return self
def create_cluster(self, cluster_name, node_names):
with Cluster(cluster_name):
return list(map(self.cn.create_node, node_names))
def create_cluster_params(self, cluster_name, node_names):
with self.g.subgraph(name='cluster_'+cluster_name) as c:
inlabel = 'streamin_' + cluster_name
outlabel = 'streamout_' + cluster_name
c.attr(style='filled', color='green', URL='https://stackoverflow.com')
c.node_attr.update(style='filled', color='white')
c.node(outlabel, label='Stream', shape='box')
if cluster_name != 'Inputs':
c.node(inlabel, label='Stream', shape='box')
self.g.edge(self.input, inlabel)
c.node(outlabel, label='Union', shape='box')
for i in range(len(node_names)):
c.node(cluster_name+str(i), label=node_names[i], shape='box')
if cluster_name!='Inputs':
c.edge(inlabel, str(cluster_name+str(i)))
c.edge(cluster_name+str(i), outlabel)
self.input = outlabel
c.attr(label=cluster_name, URL='https://stackoverflow.com')
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from apache_beam.typehints import typehints
class RowTypeConstraint(typehints.TypeConstraint):
def __init__(self, fields):
self._fields = tuple(fields)
def _consistent_with_check_(self, sub):
return self == sub
def type_check(self, instance):
from apache_beam import Row
return isinstance(instance, Row)
def _inner_types(self):
"""Iterates over the inner types of the composite type."""
return [field[1] for field in self._fields]
def __eq__(self, other):
return type(self) == type(other) and self._fields == other._fields
def __hash__(self):
return hash(self._fields)
def __repr__(self):
return 'Row(%s)' % ', '.join(
'%s=%s' % (name, typehints._unified_repr(t)) for name,
t in self._fields)
def get_type_for(self, name):
return dict(self._fields)[name]
|
# Copyright 2017-2020 Palantir Technologies, Inc.
# Copyright 2021- Python Language Server Contributors.
import math
import os
import sys
from pathlib import Path
from typing import NamedTuple, Dict
import pytest
from pylsp import uris, lsp
from pylsp.workspace import Document
from pylsp.plugins.jedi_completion import pylsp_completions as pylsp_jedi_completions
from pylsp.plugins.jedi_completion import pylsp_completion_item_resolve as pylsp_jedi_completion_item_resolve
from pylsp.plugins.rope_completion import pylsp_completions as pylsp_rope_completions
from pylsp._utils import JEDI_VERSION
PY2 = sys.version[0] == '2'
LINUX = sys.platform.startswith('linux')
CI = os.environ.get('CI')
LOCATION = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__))
)
DOC_URI = uris.from_fs_path(__file__)
DOC = """import os
print os.path.isabs("/tmp")
def hello():
pass
def _a_hello():
pass
class Hello():
@property
def world(self):
return None
def everyone(self, a, b, c=None, d=2):
pass
print Hello().world
print Hello().every
def documented_hello():
\"\"\"Sends a polite greeting\"\"\"
pass
"""
def test_rope_import_completion(config, workspace):
com_position = {'line': 0, 'character': 7}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_rope_completions(config, workspace, doc, com_position)
assert items is None
class TypeCase(NamedTuple):
document: str
position: dict
label: str
expected: lsp.CompletionItemKind
TYPE_CASES: Dict[str, TypeCase] = {
'variable': TypeCase(
document='test = 1\ntes',
position={'line': 1, 'character': 3},
label='test',
expected=lsp.CompletionItemKind.Variable
),
'function': TypeCase(
document='def test():\n pass\ntes',
position={'line': 2, 'character': 3},
label='test()',
expected=lsp.CompletionItemKind.Function
),
'keyword': TypeCase(
document='fro',
position={'line': 0, 'character': 3},
label='from',
expected=lsp.CompletionItemKind.Keyword
),
'file': TypeCase(
document='"' + __file__[:-2].replace('"', '\\"') + '"',
position={'line': 0, 'character': len(__file__) - 2},
label=Path(__file__).name + '"',
expected=lsp.CompletionItemKind.File
),
'module': TypeCase(
document='import statis',
position={'line': 0, 'character': 13},
label='statistics',
expected=lsp.CompletionItemKind.Module
),
'class': TypeCase(
document='KeyErr',
position={'line': 0, 'character': 6},
label='KeyError',
expected=lsp.CompletionItemKind.Class
),
'property': TypeCase(
document=(
'class A:\n'
' @property\n'
' def test(self):\n'
' pass\n'
'A().tes'
),
position={'line': 4, 'character': 5},
label='test',
expected=lsp.CompletionItemKind.Property
)
}
@pytest.mark.parametrize('case', list(TYPE_CASES.values()), ids=list(TYPE_CASES.keys()))
def test_jedi_completion_type(case, config, workspace):
# property support was introduced in 0.18
if case.expected == lsp.CompletionItemKind.Property and JEDI_VERSION.startswith('0.17'):
return
doc = Document(DOC_URI, workspace, case.document)
items = pylsp_jedi_completions(config, doc, case.position)
items = {i['label']: i for i in items}
assert items[case.label]['kind'] == case.expected
def test_jedi_completion(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
labels = [i['label'] for i in items]
assert 'isfile(path)' in labels
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {'line': 1, 'character': 1000})
def test_jedi_completion_item_resolve(config, workspace):
# Over the blank line
com_position = {'line': 8, 'character': 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c for c in completions}
documented_hello_item = items['documented_hello()']
assert 'documentation' not in documented_hello_item
assert 'detail' not in documented_hello_item
resolved_documented_hello = pylsp_jedi_completion_item_resolve(
completion_item=documented_hello_item,
document=doc
)
assert 'Sends a polite greeting' in resolved_documented_hello['documentation']
def test_jedi_completion_with_fuzzy_enabled(config, workspace):
# Over 'i' in os.path.isabs(...)
config.update({'plugins': {'jedi_completion': {'fuzzy': True}}})
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
expected = 'commonprefix(m)'
if JEDI_VERSION == '0.18.0':
expected = 'commonprefix(list)'
assert items[0]['label'] == expected
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {'line': 1, 'character': 1000})
def test_jedi_completion_resolve_at_most(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
# Do not resolve any labels
config.update({'plugins': {'jedi_completion': {'resolve_at_most': 0}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i['label'] for i in items}
assert 'isabs' in labels
# Resolve all items
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i['label'] for i in items}
assert 'isfile(path)' in labels
def test_rope_completion(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
workspace.put_document(DOC_URI, source=DOC)
doc = workspace.get_document(DOC_URI)
items = pylsp_rope_completions(config, workspace, doc, com_position)
assert items
assert items[0]['label'] == 'isabs'
def test_jedi_completion_ordering(config, workspace):
# Over the blank line
com_position = {'line': 8, 'character': 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c['sortText'] for c in completions}
# And that 'hidden' functions come after unhidden ones
assert items['hello()'] < items['_a_hello()']
def test_jedi_property_completion(config, workspace):
# Over the 'w' in 'print Hello().world'
com_position = {'line': 18, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c['sortText'] for c in completions}
# Ensure we can complete the 'world' property
assert 'world' in list(items.keys())[0]
def test_jedi_method_completion(config, workspace):
# Over the 'y' in 'print Hello().every'
com_position = {'line': 20, 'character': 19}
doc = Document(DOC_URI, workspace, DOC)
config.capabilities['textDocument'] = {'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [completion for completion in completions if completion['label'] == 'everyone(a, b, c, d)'][0]
# Ensure we only generate snippets for positional args
assert everyone_method['insertTextFormat'] == lsp.InsertTextFormat.Snippet
assert everyone_method['insertText'] == 'everyone(${1:a}, ${2:b})$0'
# Disable param snippets
config.update({'plugins': {'jedi_completion': {'include_params': False}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [completion for completion in completions if completion['label'] == 'everyone(a, b, c, d)'][0]
assert 'insertTextFormat' not in everyone_method
assert everyone_method['insertText'] == 'everyone'
@pytest.mark.skipif(PY2 or (sys.platform.startswith('linux') and os.environ.get('CI') is not None),
reason="Test in Python 3 and not on CIs on Linux because wheels don't work on them.")
def test_pyqt_completion(config, workspace):
# Over 'QA' in 'from PyQt5.QtWidgets import QApplication'
doc_pyqt = "from PyQt5.QtWidgets import QA"
com_position = {'line': 0, 'character': len(doc_pyqt)}
doc = Document(DOC_URI, workspace, doc_pyqt)
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions is not None
def test_numpy_completions(config, workspace):
doc_numpy = "import numpy as np; np."
com_position = {'line': 0, 'character': len(doc_numpy)}
doc = Document(DOC_URI, workspace, doc_numpy)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('array' in i['label'] for i in items)
def test_pandas_completions(config, workspace):
doc_pandas = "import pandas as pd; pd."
com_position = {'line': 0, 'character': len(doc_pandas)}
doc = Document(DOC_URI, workspace, doc_pandas)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('DataFrame' in i['label'] for i in items)
def test_matplotlib_completions(config, workspace):
doc_mpl = "import matplotlib.pyplot as plt; plt."
com_position = {'line': 0, 'character': len(doc_mpl)}
doc = Document(DOC_URI, workspace, doc_mpl)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('plot' in i['label'] for i in items)
def test_snippets_completion(config, workspace):
doc_snippets = 'from collections import defaultdict \na=defaultdict'
com_position = {'line': 0, 'character': 35}
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict'
com_position = {'line': 1, 'character': len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict($0)'
assert completions[0]['insertTextFormat'] == lsp.InsertTextFormat.Snippet
def test_snippets_completion_at_most(config, workspace):
doc_snippets = 'from collections import defaultdict \na=defaultdict'
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
config.update({'plugins': {'jedi_completion': {'resolve_at_most': 0}}})
com_position = {'line': 1, 'character': len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict'
assert not completions[0].get('insertTextFormat', None)
def test_completion_with_class_objects(config, workspace):
doc_text = 'class FOOBAR(Object): pass\nFOOB'
com_position = {'line': 1, 'character': 4}
doc = Document(DOC_URI, workspace, doc_text)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {
'include_params': True,
'include_class_objects': True,
}}})
completions = pylsp_jedi_completions(config, doc, com_position)
assert len(completions) == 2
assert completions[0]['label'] == 'FOOBAR'
assert completions[0]['kind'] == lsp.CompletionItemKind.Class
assert completions[1]['label'] == 'FOOBAR object'
assert completions[1]['kind'] == lsp.CompletionItemKind.TypeParameter
def test_snippet_parsing(config, workspace):
doc = 'divmod'
completion_position = {'line': 0, 'character': 6}
doc = Document(DOC_URI, workspace, doc)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, completion_position)
out = 'divmod(${1:x}, ${2:y})$0'
if JEDI_VERSION == '0.18.0':
out = 'divmod(${1:a}, ${2:b})$0'
assert completions[0]['insertText'] == out
def test_multiline_import_snippets(config, workspace):
document = 'from datetime import(\n date,\n datetime)\na=date'
doc = Document(DOC_URI, workspace, document)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
position = {'line': 2, 'character': 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'datetime'
def test_multiline_snippets(config, workspace):
document = 'from datetime import\\\n date,\\\n datetime \na=date'
doc = Document(DOC_URI, workspace, document)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
position = {'line': 2, 'character': 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'datetime'
def test_multistatement_snippet(config, workspace):
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
document = 'a = 1; from datetime import date'
doc = Document(DOC_URI, workspace, document)
position = {'line': 0, 'character': len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
document = 'from math import fmod; a = fmod'
doc = Document(DOC_URI, workspace, document)
position = {'line': 0, 'character': len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'fmod(${1:x}, ${2:y})$0'
def test_jedi_completion_extra_paths(tmpdir, workspace):
# Create a tempfile with some content and pass to extra_paths
temp_doc_content = '''
def spam():
pass
'''
p = tmpdir.mkdir("extra_path")
extra_paths = [str(p)]
p = p.join("foo.py")
p.write(temp_doc_content)
# Content of doc to test completion
doc_content = """import foo
foo.s"""
doc = Document(DOC_URI, workspace, doc_content)
# After 'foo.s' without extra paths
com_position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra paths
settings = {'pylsp': {'plugins': {'jedi': {'extra_paths': extra_paths}}}}
doc.update_config(settings)
# After 'foo.s' with extra paths
com_position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'spam()'
@pytest.mark.skipif(PY2 or not LINUX or not CI, reason="tested on linux and python 3 only")
def test_jedi_completion_environment(workspace):
# Content of doc to test completion
doc_content = '''import logh
'''
doc = Document(DOC_URI, workspace, doc_content)
# After 'import logh' with default environment
com_position = {'line': 0, 'character': 11}
assert os.path.isdir('/tmp/pyenv/')
settings = {'pylsp': {'plugins': {'jedi': {'environment': None}}}}
doc.update_config(settings)
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra environment
env_path = '/tmp/pyenv/bin/python'
settings = {'pylsp': {'plugins': {'jedi': {'environment': env_path}}}}
doc.update_config(settings)
# After 'import logh' with new environment
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'loghub'
resolved = pylsp_jedi_completion_item_resolve(completions[0], doc)
assert 'changelog generator' in resolved['documentation'].lower()
def test_document_path_completions(tmpdir, workspace_other_root_path):
# Create a dummy module out of the workspace's root_path and try to get
# completions for it in another file placed next to it.
module_content = '''
def foo():
pass
'''
p = tmpdir.join("mymodule.py")
p.write(module_content)
# Content of doc to test completion
doc_content = """import mymodule
mymodule.f"""
doc_path = str(tmpdir) + os.path.sep + 'myfile.py'
doc_uri = uris.from_fs_path(doc_path)
doc = Document(doc_uri, workspace_other_root_path, doc_content)
com_position = {'line': 1, 'character': 10}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'foo()'
|
# ================================================================
# YouTube Downloader (.MP4 para .MP3) :: github.com/sliatecinos
# ================================================================
from pytube import YouTube
from pydub import AudioSegment
import os
import time
link = input('\nEntre com o link:')
yt = YouTube(link)
# Title of the video:
print('Titulo:\t',yt.title)
# Nro. of views:
print('Nro. de views:\t',yt.views)
# Lenght of the video:
print('Tamanho:\t',yt.length)
# # Description of the video:
# print('Descricao:\t',yt.description)
# Rating:
print('Avaliaçoes:\t',yt.rating)
# Author:
print('Publicado por:\t',yt.author)
# Getting only audio from video:
video = yt.streams.filter(only_audio=True).first()
res = input('Continuar?(y/n):\t')
destino = 'C:\\Users\\sliatecinos\\Músicas\\'
if res.lower() == 'y':
# Starting download:
print('Time start:', time.strftime("%b %d %Y %H:%M:%S", time.localtime()))
print('Download em andamento....')
out_file = video.download(destino)
print('Download completado!!')
mp4_audio = AudioSegment.from_file(out_file, format="mp4")
base, ext = os.path.splitext(out_file)
mp4_audio.export(base + '.mp3', format="mp3")
print('Conversao pra MP3, com sucesso!!!')
files_in_directory = os.listdir(destino)
filtered_files = [file for file in files_in_directory if file.endswith(".mp4")]
for file in filtered_files:
path_to_file = os.path.join(destino, file)
os.remove(path_to_file)
print('Time end:', time.strftime("%b %d %Y %H:%M:%S", time.localtime()))
|
"""
This script has a few examples about how to use custom keras objects
which are defined in `keras_custom_objects`
"""
'''
1. Use a custom EarlyStopping criteria:
In our case, it is RelativeEarlyStopping which is to terminate training
if the monitored improvement between two epochs is less than 0.1%
'''
import keras_custom_objects as KO
custom_earlystopping = KO.RelativeEarlyStopping(monitor='val_loss',
min_perc_delta=0.001, # perc means percentage
patience=patience,
verbose=2,
mode='min'
)
'''
2. Use custom fitting function:
In our case, we want to extend the original fit_generator with extra functionalities
such as not to use multiprocessing for validation to avoid validation data duplication,
and to be able to re-weight validation instances the same way if training instances are
weighted under certain scheme.
The way I created these custom keras functions are by no means the most accurate/elegant way
of achieving the goal. Feel free to modify or do it your way and do let me know if you find a better
way to do so. Thanks!
'''
import keras_custom_objects as KO
# because the custom functions are defined under the CustomModel class which is inherited
# from the Model class, we now must define our model using CustomModel
model = CustomModel(inputs=some_layer.input, outputs=some_other_layer.output)
# and then you can call custom fitting no different to the original case
model.fit_generator_custom(train_generator,
steps_per_epoch=train_steps,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_steps,
class_weight=class_weighting, # this weight will now also apply to validation instances
verbose=1,
callbacks=[tensorboard, earlystopping, checkpoint],
max_queue_size=40,
workers=14,
use_multiprocessing=True) # in fact use_multiprocessing=False for validation set
|
import os
from shutil import copyfile
from logic_bank.util import prt
def setup_db():
""" copy db/database-gold.db over db/database.db"""
print("\n" + prt("restoring database-gold\n"))
basedir = os.path.abspath(os.path.dirname(__file__))
basedir = os.path.dirname(basedir)
print("\n********************************\n"
" IMPORTANT - create database.db from database-gold.db in " + basedir + "/nw/db/\n" +
" - from -- " + prt("") +
"\n********************************")
nw_loc = os.path.join(basedir, "db/database.db")
nw_source = os.path.join(basedir, "db/database-gold.db")
copyfile(src=nw_source, dst=nw_loc)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 24 21:46:41 2019
You are not expected to understand my codes!
@Author: Kotori_Y
@Blog: blog.moyule.me
@Weibo: Kotori-Y
@Mail: yzjkid9@gmail.com
I love Megumi forerver!
"""
print(__doc__)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split,KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score
import pandas as pd
import time
import os
from tqdm import tqdm
kf = KFold(n_splits=5)#kfold
start = time.clock()
#os.chdir(r'E:\student\yzy\Importance')
#files = os.listdir()
#os.makedirs('FeatureAna')
#df = df.sample(frac=1).reset_index(drop=True)
#df.drop('SMILES',axis=1,inplace=True)
#y = df.pop('Label')
#fold = 0
####################################### 5-Fold #######################################
#df_i = pd.DataFrame()#creat a dataframe for importance
#df_m = pd.DataFrame()#creat a dataframe for metrics
#for train_index, test_index in kf.split(df):
# col = list(df.columns)
# fold += 1
# X_train, x_test = df.iloc[train_index], df.iloc[test_index]
# Y_train, y_test = y.iloc[train_index], y.iloc[test_index]
# X = X_train.copy()
# x = x_test.copy()
#
# for _ in tqdm(range(len(df.columns))):
#
# rfc = RandomForestClassifier(n_estimators=500,n_jobs=-1)
## print('----------------Fitting----------------')
# rfc.fit(X,Y_train)
#
# fea = pd.DataFrame(
# {
# 'Feature':col,
# 'Importance':rfc.feature_importances_,
# 'Fold':'fold_{}'.format(fold),
# 'Class':len(col)
# }
# )
# fea.sort_values('Importance',ascending=False,inplace=True)
# df_i = pd.concat([df_i,fea],ignore_index=True)
#
# #cal correlate metrics
# acc = accuracy_score(y_test,rfc.predict(x))
# pre = precision_score(y_test,rfc.predict(x))
# rec = recall_score(y_test,rfc.predict(x))
#
# me = pd.DataFrame(
# {
# 'Precision':[pre],
# 'Recall':[rec],
# 'Accuracy':[acc],
# 'Fold':['fold_{}'.format(fold)],
# 'Class':[len(col)]
# }
# )
# df_m = pd.concat([df_m,me],ignore_index=True)
#
# #drop the most unimportant feature
# drop = list(fea['Feature'])[-1]
#
# X.drop(drop,axis=1,inplace=True)
# x.drop(drop,axis=1,inplace=True)
# col.remove(drop)
#
# del rfc,fea,me
#
#
#end = time.clock()
#
#print(end-start)
#
#df_i.to_csv('Importances.csv')
#df_m.to_csv('Metrics.csv')
###########################################################################################
####################################### ONCE #######################################
def Selection(file,filepath):
os.chdir(filepath)
print('-----{} start-----'.format(file.replace('.csv','')))
df_i = pd.DataFrame()#creat a dataframe for importance
df_m = pd.DataFrame()#creat a dataframe for metrics
#df_1 = pd.read_csv(r'E:\student\kotori\Lemon\backup\2C9_In_MACCS-1.csv')
#df_0 = pd.read_csv(r'E:\student\kotori\Lemon\backup\2C9_In_MACCS-0.csv')
#df_1 = df_1.sample(len(df_0),replace=True)
#df = pd.concat([df_1,df_0],ignore_index=True,sort=False)
df = pd.read_csv(file)
df = df.sample(frac=1).reset_index(drop=True)
# df = df.iloc[:,3:]
# try:
# df.drop('SMILES',axis=1,inplace=True)
# except:
# df.drop('Smiles',axis=1,inplace=True)
y = df.pop('grades')
col = list(df.columns)
X_train,x_test,Y_train,y_test = train_test_split(df,y,test_size=0.2)
X = X_train.copy()
x = x_test.copy()
for _ in tqdm(range(len(df.columns))):
rfc = RandomForestClassifier(n_estimators=500,n_jobs=-1)
# print('----------------Fitting----------------')
rfc.fit(X,Y_train)
fea = pd.DataFrame(
{
'Feature':col
,'Importance':rfc.feature_importances_
,'Class':len(col)
}
)
fea.sort_values('Importance',ascending=False,inplace=True)
df_i = pd.concat([df_i,fea],ignore_index=True,sort=False)
#cal correlate metrics
acc = accuracy_score(y_test,rfc.predict(x))
pre = precision_score(y_test,rfc.predict(x))
rec = recall_score(y_test,rfc.predict(x))
me = pd.DataFrame(
{
'Precision':[pre]
,'Recall':[rec]
,'Accuracy':[acc]
#,'Fold':['fold_{}'.format(fold)]
,'Class':[len(col)]
}
)
df_m = pd.concat([df_m,me],ignore_index=True,sort=False)
#drop the most unimportant feature
drop = list(fea['Feature'])[-1]
X.drop(drop,axis=1,inplace=True)
x.drop(drop,axis=1,inplace=True)
col.remove(drop)
del rfc,fea,me
#file = '2C9_In_MACCS'
#df_i.to_csv('FeatureAna/{}_Importances_oversampling.csv'.format(file),index=False)
#df_m.to_csv('FeatureAna/{}_Metrics_oversampling.csv'.format(file),index=False)
return df_i,df_m
def main():
tempt = print("Input the absolute path of your file locate and ensure the file only contain 'SMILES', 'Label' and the features vector\n")
filepath = input("The absolute path: ")
files = os.listdir(filepath)
for file in files:
df_i, df_m = Selection(file,filepath)
# os.chdir(r'E:\student\yzy\All')
#
# part_1_class = list(range(1000,1717))
#
# df_i_a = df_i[df_i['Class'].isin(part_1_class)]
# df_i_b = df_i[~df_i['Class'].isin(part_1_class)]
# df_i.iloc[:,:].to_csv(file.replace('.csv','') + '_Importances.csv',index=False)
# df_m.to_csv(file.replace('.csv','') + '_Metrics.csv',index=False)
df_i.to_csv('{}_Importances.csv'.format(file.replace('.csv','')))
if '__main__' == __name__:
main()
#,'Fold':'fold_{}'.format(fold)
|
from six import iteritems, itervalues
from collections import OrderedDict, MutableMapping, Iterable
from functools import wraps
import anvil.config as cfg
def to_list(query):
if isinstance(query, list):
return query
elif isinstance(query, str):
return [query]
elif isinstance(query, dict):
return [query]
elif not query:
return list()
try:
return list(query)
except TypeError:
return [query]
def to_size_list(query, desired_length):
query_list = to_list(query) if query else [None]
if len(query_list) > desired_length:
return query_list[:desired_length]
else:
return query_list + [query_list[-1]] * (desired_length - len(query_list))
def to_camel_case(input_string):
tokens = input_string.split('_')
return tokens[0] + ''.join([token.capitalize() for token in tokens[1:]])
def gen_flatten_dict_depth_two(d):
"""Taken from:
https://stackoverflow.com/questions/3835192/flatten-a-dictionary-of-dictionaries-2-levels-deep-of-lists-in-python
Given the d_inner, return an iterator that provides all the nodes from within.
"""
for d_inner in itervalues(d):
if isinstance(d_inner, dict):
for nodes in itervalues(d_inner):
print('nodes ', nodes)
for node in to_list(nodes):
print(node)
yield node
else:
for node in to_list(d_inner):
print('node ', node)
yield node
def get_dict_depth(d=None, level=0):
"""Returns maximum depth of the hierarchy"""
if not isinstance(d, dict) or not d:
return level
return max(get_dict_depth(d[k], level=level + 1) for k in d)
def get_dict_key_matches(key, dictionary):
for k, v in iteritems(dictionary):
if k == key:
return {k: v}
elif isinstance(v, dict):
return get_dict_key_matches(key, v)
def dict_to_keys_list(d, keys=None):
keys = keys if keys is not None else []
if isinstance(d, dict):
for k, v in iteritems(d):
keys.append(k)
dict_to_keys_list(v, keys)
else:
keys.append(d)
return keys
def dict_deep_sort(cls, obj):
"""Recursively sort list or dict nested lists
Taken from: http://goo.gl/tQfDP6
"""
if isinstance(obj, dict):
_sorted = OrderedDict()
for key in sorted(list(obj)):
_sorted[key] = cls.deep_sort(obj[key])
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(cls.deep_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted
def to_str_dict(d):
data = {}
for k, v in iteritems(d):
try:
data.update({str(k): str(v)})
except TypeError:
pass
return data
def pop_dict_keys(d, keys):
popped = []
for key in keys:
try:
popped.append(d.pop(key))
except KeyError:
pass
return popped
def merge_dicts(*args, **kwargs):
"""Outputs a merged dictionary from inputs. Overwrites data if there are conflicts from left to right.
:param args: (dict), tuple of input dictionaries
:param kwargs: dict, input kwargs to merge
:return: dict, combined data.
"""
data = {}
for input_dict in [arg for arg in args if isinstance(arg, dict)] + [kwargs]:
data.update(input_dict)
return data
def dict_compare(d1, d2):
"""Taken from: https://stackoverflow.com/questions/4527942/comparing-two-dictionaries-in-python"""
d1_keys = set(list(d1))
d2_keys = set(list(d2))
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same
def dict_to_flat_dict(d, full_path=True, parent_key='', sep='_'):
"""Got from https://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys
:param d: dict, input dictionary
:param full_path: bool, whether to store the full path as the key or the final key for that dictionary item.
:param parent_key: str, keeps track of the dictionary path taken, do not set.
:param sep: str, arbitary separator to delineate path separation in the parent_key string.
:return: dict, flat dictionary with all keys as full path keys.
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key and full_path else k
if isinstance(v, MutableMapping):
items.extend(dict_to_flat_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
merge_value_LUT = {
dict: lambda d1, d2: merge_dicts(d2),
list: lambda l1, l2: l1 + to_list(l2),
str: lambda s1, s2: s1 + str(s2),
'replace': lambda e1, e2: e2,
}
class Map(dict):
"""A dot notation accessible dictionary class extension.
Taken from: https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in iteritems(arg):
self[k] = v
if kwargs:
for k, v in iteritems(kwargs):
self[k] = v
def deep_update(self, d, path=None):
if path is None:
path = []
for k, v in iteritems(d):
if isinstance(v, dict):
self.deep_update(v, path=path + [k])
else:
self._merge_value(path + [k], v)
def flatten(self):
return gen_flatten_dict_depth_two(self)
def to_flat_dict(self, full_path=False):
return dict_to_flat_dict(self, full_path=full_path)
def to_value_list(self):
result = []
map(result.extend, [n if isinstance(n, Iterable) else to_list(n) for n in itervalues(self.to_flat_dict())])
return result
def _merge_value(self, path, v):
"""Stably merge values without overwriting or messing up Map object.
This is used since we have a slightly customized way of adding entries and do not want the base Map object
to start getting stale data. If a path does not exist, we will add a default Map object in that place
unless it is the final path, in which case we merge with the existing (or not) value.
:param path: list, list of keys we will traverse down.
:param v: object, any type of object we are adding to that nested/base dict.
"""
current_map = self
for p in path[:-1]:
current_map = current_map.setdefault(p, self.__class__())
current_v = current_map.setdefault(path[-1], None)
current_map[path[-1]] = merge_value_LUT.get(type(current_v), merge_value_LUT['replace'])(current_v, v)
def __getattr__(self, attr):
"""Passthrough function for dictionary.get"""
return self.get(attr)
def __setattr__(self, key, value):
"""Passthrough function for dictionary item setter"""
self.__setitem__(key, value)
def __setitem__(self, key, value):
"""Updates both setitem and instance dictionary key value"""
super(Map, self).__setitem__(key, value)
self.__dict__[key] = value
def __delattr__(self, item):
"""Passthrough for dictionary delete item."""
self.__delitem__(item)
def __delitem__(self, key):
"""Deletes both the attribute and the instance dictionary"""
super(Map, self).__delitem__(key)
del self.__dict__[key]
def __eq__(self, other):
"""Determines if the dictionary is equivalent to the other dictionary."""
return dict_compare(self.__dict__, other)
def extend_parent_kwarg(number_of_parents):
def inner(f):
@wraps(f)
def wrapper(abstract_grouping, *args, **kwargs):
kwargs[cfg.PARENT] = iter(to_size_list(kwargs.get(cfg.PARENT), number_of_parents))
return f(abstract_grouping, *args, **kwargs)
return wrapper
return inner
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects and functions that manage rights for various user actions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import copy
import logging
from constants import constants
from core.domain import activity_services
from core.domain import change_domain
from core.domain import role_services
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import feconf
import python_utils
import utils
current_user_services = models.Registry.import_current_user_services()
(collection_models, exp_models,) = models.Registry.import_models([
models.NAMES.collection, models.NAMES.exploration
])
# IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve
# backward-compatibility with previous exploration snapshots in the datastore.
# Do not modify the definitions of CMD keys that already exist.
CMD_CREATE_NEW = 'create_new'
CMD_CHANGE_ROLE = 'change_role'
CMD_CHANGE_EXPLORATION_STATUS = 'change_exploration_status'
CMD_CHANGE_COLLECTION_STATUS = 'change_collection_status'
CMD_CHANGE_PRIVATE_VIEWABILITY = 'change_private_viewability'
CMD_RELEASE_OWNERSHIP = 'release_ownership'
CMD_UPDATE_FIRST_PUBLISHED_MSEC = 'update_first_published_msec'
ACTIVITY_STATUS_PRIVATE = constants.ACTIVITY_STATUS_PRIVATE
ACTIVITY_STATUS_PUBLIC = constants.ACTIVITY_STATUS_PUBLIC
ROLE_OWNER = 'owner'
ROLE_EDITOR = 'editor'
ROLE_VOICE_ARTIST = 'voice artist'
ROLE_VIEWER = 'viewer'
ROLE_NONE = 'none'
ROLE_ADMIN = 'admin'
ROLE_MODERATOR = 'moderator'
# The allowed list of roles which can be used in change_role command.
ALLOWED_ROLES = [ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST, ROLE_VIEWER]
# The allowed list of status which can be used in change_exploration_status
# and change_collection_status commands.
ALLOWED_STATUS = [ACTIVITY_STATUS_PRIVATE, ACTIVITY_STATUS_PUBLIC]
COMMON_ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_CHANGE_ROLE,
'required_attribute_names': ['assignee_id', 'old_role', 'new_role'],
'optional_attribute_names': [],
'allowed_values': {'new_role': ALLOWED_ROLES, 'old_role': ALLOWED_ROLES}
}, {
'name': CMD_CHANGE_PRIVATE_VIEWABILITY,
'required_attribute_names': [
'old_viewable_if_private', 'new_viewable_if_private'],
'optional_attribute_names': []
}, {
'name': CMD_RELEASE_OWNERSHIP,
'required_attribute_names': [],
'optional_attribute_names': [],
}, {
'name': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'required_attribute_names': [
'old_first_published_msec', 'new_first_published_msec'],
'optional_attribute_names': [],
}]
class ActivityRights(python_utils.OBJECT):
"""Domain object for the rights/publication status of an activity (an
exploration or a collection).
"""
def __init__(
self, exploration_id, owner_ids, editor_ids, voice_artist_ids,
viewer_ids, community_owned=False, cloned_from=None,
status=ACTIVITY_STATUS_PRIVATE, viewable_if_private=False,
first_published_msec=None):
self.id = exploration_id
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.voice_artist_ids = voice_artist_ids
self.viewer_ids = viewer_ids
self.community_owned = community_owned
self.cloned_from = cloned_from
self.status = status
self.viewable_if_private = viewable_if_private
self.first_published_msec = first_published_msec
def validate(self):
"""Validates an ActivityRights object.
Raises:
utils.ValidationError: if any of the owners, editors, voice artists
and viewers lists overlap, or if a community-owned exploration
has owners, editors, voice artists or viewers specified.
"""
if self.community_owned:
if (self.owner_ids or self.editor_ids or self.voice_artist_ids or
self.viewer_ids):
raise utils.ValidationError(
'Community-owned explorations should have no owners, '
'editors, voice artists or viewers specified.')
if self.community_owned and self.status == ACTIVITY_STATUS_PRIVATE:
raise utils.ValidationError(
'Community-owned explorations cannot be private.')
if self.status != ACTIVITY_STATUS_PRIVATE and self.viewer_ids:
raise utils.ValidationError(
'Public explorations should have no viewers specified.')
owner_editor = set(self.owner_ids) & set(self.editor_ids)
owner_voice_artist = set(self.owner_ids) & set(self.voice_artist_ids)
owner_viewer = set(self.owner_ids) & set(self.viewer_ids)
editor_voice_artist = set(self.editor_ids) & set(self.voice_artist_ids)
editor_viewer = set(self.editor_ids) & set(self.viewer_ids)
voice_artist_viewer = set(self.voice_artist_ids) & set(self.viewer_ids)
if owner_editor:
raise utils.ValidationError(
'A user cannot be both an owner and an editor: %s' %
owner_editor)
if owner_voice_artist:
raise utils.ValidationError(
'A user cannot be both an owner and a voice artist: %s' %
owner_voice_artist)
if owner_viewer:
raise utils.ValidationError(
'A user cannot be both an owner and a viewer: %s' %
owner_viewer)
if editor_voice_artist:
raise utils.ValidationError(
'A user cannot be both an editor and a voice artist: %s' %
editor_voice_artist)
if editor_viewer:
raise utils.ValidationError(
'A user cannot be both an editor and a viewer: %s' %
editor_viewer)
if voice_artist_viewer:
raise utils.ValidationError(
'A user cannot be both a voice artist and a viewer: %s' %
voice_artist_viewer)
def to_dict(self):
"""Returns a dict suitable for use by the frontend.
Returns:
dict. A dict version of ActivityRights suitable for use by the
frontend.
"""
if self.community_owned:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': True,
'owner_names': [],
'editor_names': [],
'voice_artist_names': [],
'viewer_names': [],
'viewable_if_private': self.viewable_if_private,
}
else:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': False,
'owner_names': user_services.get_human_readable_user_ids(
self.owner_ids),
'editor_names': user_services.get_human_readable_user_ids(
self.editor_ids),
'voice_artist_names': user_services.get_human_readable_user_ids(
self.voice_artist_ids),
'viewer_names': user_services.get_human_readable_user_ids(
self.viewer_ids),
'viewable_if_private': self.viewable_if_private,
}
def is_owner(self, user_id):
"""Checks whether given user is owner of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity owner.
"""
return bool(user_id in self.owner_ids)
def is_editor(self, user_id):
"""Checks whether given user is editor of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity editor.
"""
return bool(user_id in self.editor_ids)
def is_voice_artist(self, user_id):
"""Checks whether given user is voice artist of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity voice artist.
"""
return bool(user_id in self.voice_artist_ids)
def is_viewer(self, user_id):
"""Checks whether given user is viewer of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity viewer.
"""
return bool(user_id in self.viewer_ids)
def is_published(self):
"""Checks whether activity is published.
Returns:
bool. Whether activity is published.
"""
return bool(self.status == ACTIVITY_STATUS_PUBLIC)
def is_private(self):
"""Checks whether activity is private.
Returns:
bool. Whether activity is private.
"""
return bool(self.status == ACTIVITY_STATUS_PRIVATE)
class ActivityRightsChange(change_domain.BaseChange):
"""Domain object class for an activity rights change.
The allowed commands, together with the attributes:
- 'create_new'
- 'change_role' (with assignee_id, old_role, new_role)
- 'change_exploration_status' (with old_status, new_status)
- 'change_collection_status' (with old_status, new_status)
- 'change_private_viewability' (with
old_viewable_if_private, new_viewable_if_private)
- 'release_ownership'
- 'update_first_published_msec' (with
old_first_published_msec, new_first_published_msec)
A role must be one of the ALLOWED_ROLES.
A status must be one of the ALLOWED_STATUS.
"""
ALLOWED_COMMANDS = COMMON_ALLOWED_COMMANDS
class ExplorationRightsChange(ActivityRightsChange):
"""Domain object class for an exploration rights change."""
ALLOWED_COMMANDS = copy.deepcopy(COMMON_ALLOWED_COMMANDS)
ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_EXPLORATION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_STATUS, 'new_status': ALLOWED_STATUS}
})
class CollectionRightsChange(ActivityRightsChange):
"""Domain object class for an collection rights change."""
ALLOWED_COMMANDS = copy.deepcopy(COMMON_ALLOWED_COMMANDS)
ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_COLLECTION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_STATUS, 'new_status': ALLOWED_STATUS}
})
def get_activity_rights_from_model(activity_rights_model, activity_type):
"""Constructs an ActivityRights object from the given activity rights model.
Args:
activity_rights_model: ActivityRightsModel. Activity rights from the
datastore.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Returns:
ActivityRights. The rights object created from the model.
"""
return ActivityRights(
activity_rights_model.id,
activity_rights_model.owner_ids,
activity_rights_model.editor_ids,
activity_rights_model.voice_artist_ids,
activity_rights_model.viewer_ids,
community_owned=activity_rights_model.community_owned,
cloned_from=(
activity_rights_model.cloned_from
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION else None),
status=activity_rights_model.status,
viewable_if_private=activity_rights_model.viewable_if_private,
first_published_msec=activity_rights_model.first_published_msec
)
def _save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds):
"""Saves an ExplorationRights or CollectionRights domain object to the
datastore.
Args:
committer_id: str. ID of the committer.
activity_rights: ActivityRights. The rights object for the given
activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
commit_message: str. Descriptive message for the commit.
commit_cmds: list(dict). A list of commands describing what kind of
commit was done.
"""
activity_rights.validate()
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
model_cls = exp_models.ExplorationRightsModel
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
model_cls = collection_models.CollectionRightsModel
model = model_cls.get(activity_rights.id, strict=False)
model.owner_ids = activity_rights.owner_ids
model.editor_ids = activity_rights.editor_ids
model.viewer_ids = activity_rights.viewer_ids
model.voice_artist_ids = activity_rights.voice_artist_ids
model.community_owned = activity_rights.community_owned
model.status = activity_rights.status
model.viewable_if_private = activity_rights.viewable_if_private
model.first_published_msec = activity_rights.first_published_msec
model.commit(committer_id, commit_message, commit_cmds)
def _update_exploration_summary(activity_rights):
"""Updates the exploration summary for the activity associated with the
given rights object.
The ID of rights object is the same as the ID of associated activity.
Args:
activity_rights: ActivityRights. The rights object for the given
activity.
"""
# TODO(msl): Get rid of inline imports by refactoring code.
from core.domain import exp_services
exp_services.update_exploration_summary(
activity_rights.id, None)
def _update_collection_summary(activity_rights):
"""Updates the collection summary for the given activity associated with
the given rights object.
The ID of rights object is the same as the ID of associated activity.
Args:
activity_rights: ActivityRights. The rights object for the given
activity.
"""
from core.domain import collection_services
collection_services.update_collection_summary(
activity_rights.id, None)
def _update_activity_summary(activity_type, activity_rights):
"""Updates the activity summary for the given activity associated with
the given rights object.
The ID of rights object is the same as the ID of associated activity.
Args:
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
activity_rights: ActivityRights. The rights object for the given
activity.
"""
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
_update_exploration_summary(activity_rights)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
_update_collection_summary(activity_rights)
def update_activity_first_published_msec(
activity_type, activity_id, first_published_msec):
"""Updates the first_published_msec field for the given activity.
The caller is responsible for ensuring that this value is not already
set before updating it.
Args:
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
activity_id: str. ID of the activity.
first_published_msec: float. First publication time in milliseconds
since the Epoch.
"""
activity_rights = _get_activity_rights(activity_type, activity_id)
commit_cmds = [{
'cmd': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'old_first_published_msec': activity_rights.first_published_msec,
'new_first_published_msec': first_published_msec
}]
activity_rights.first_published_msec = first_published_msec
_save_activity_rights(
feconf.SYSTEM_COMMITTER_ID, activity_rights, activity_type,
'set first published time in msec', commit_cmds)
def create_new_exploration_rights(exploration_id, committer_id):
"""Creates a new exploration rights object and saves it to the datastore.
Subscribes the committer to the new exploration.
Args:
exploration_id: str. ID of the exploration.
committer_id: str. ID of the committer.
"""
exploration_rights = ActivityRights(
exploration_id, [committer_id], [], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
exp_models.ExplorationRightsModel(
id=exploration_rights.id,
owner_ids=exploration_rights.owner_ids,
editor_ids=exploration_rights.editor_ids,
voice_artist_ids=exploration_rights.voice_artist_ids,
viewer_ids=exploration_rights.viewer_ids,
community_owned=exploration_rights.community_owned,
status=exploration_rights.status,
viewable_if_private=exploration_rights.viewable_if_private,
first_published_msec=exploration_rights.first_published_msec,
).commit(committer_id, 'Created new exploration', commit_cmds)
subscription_services.subscribe_to_exploration(
committer_id, exploration_id)
def get_exploration_rights(exploration_id, strict=True):
"""Retrieves the rights for this exploration from the datastore.
Args:
exploration_id: str. ID of the exploration.
strict: bool. Whether to raise an error if there is no exploration
matching the given ID.
Returns:
ActivityRights. The rights object for the given exploration.
Raises:
EntityNotFoundError. The exploration with ID exploration_id was not
found in the datastore.
"""
model = exp_models.ExplorationRightsModel.get(
exploration_id, strict=strict)
if model is None:
return None
return get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_EXPLORATION)
def get_multiple_exploration_rights_by_ids(exp_ids):
"""Returns a list of ActivityRights objects for given exploration ids.
Args:
exp_ids: list(str). List of exploration ids.
Returns:
list(ActivityRights or None). List of rights object --> ActivityRights
objects for existing exploration or None.
"""
exp_rights_models = exp_models.ExplorationRightsModel.get_multi(
exp_ids)
exp_models_list = []
for model in exp_rights_models:
if model is None:
exp_models_list.append(None)
else:
exp_models_list.append(
get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_EXPLORATION))
return exp_models_list
def is_exploration_private(exploration_id):
"""Returns whether exploration is private.
Args:
exploration_id: str. ID of the exploration.
Returns:
bool. Whether the exploration is private or not.
"""
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PRIVATE
def is_exploration_public(exploration_id):
"""Returns whether exploration is public.
Args:
exploration_id: str. ID of the exploration.
Returns:
bool. Whether the exploration is public.
"""
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PUBLIC
def is_exploration_cloned(exploration_id):
"""Returns whether the exploration is a clone of another exploration.
Args:
exploration_id: str. ID of the exploration.
Returns:
bool. Whether the exploration is a clone of another exploration.
"""
exploration_rights = get_exploration_rights(exploration_id)
return bool(exploration_rights.cloned_from)
def create_new_collection_rights(collection_id, committer_id):
"""Creates a new collection rights object and saves it to the datastore.
Subscribes the committer to the new collection.
Args:
collection_id: str. ID of the collection.
committer_id: str. ID of the committer.
"""
collection_rights = ActivityRights(
collection_id, [committer_id], [], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
collection_models.CollectionRightsModel(
id=collection_rights.id,
owner_ids=collection_rights.owner_ids,
editor_ids=collection_rights.editor_ids,
voice_artist_ids=collection_rights.voice_artist_ids,
viewer_ids=collection_rights.viewer_ids,
community_owned=collection_rights.community_owned,
status=collection_rights.status,
viewable_if_private=collection_rights.viewable_if_private,
first_published_msec=collection_rights.first_published_msec
).commit(committer_id, 'Created new collection', commit_cmds)
subscription_services.subscribe_to_collection(committer_id, collection_id)
def get_collection_rights(collection_id, strict=True):
"""Retrieves the rights for this collection from the datastore.
Args:
collection_id: str. ID of the collection.
strict: bool. Whether to raise an error if ID is not found.
Returns:
ActivityRights. The rights object for the collection.
Raises:
EntityNotFoundError. The collection with ID collection_id is not found
in the datastore.
"""
model = collection_models.CollectionRightsModel.get(
collection_id, strict=strict)
if model is None:
return None
return get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_COLLECTION)
def get_collection_owner_names(collection_id):
"""Retrieves the owners for this collection from the datastore.
Args:
collection_id: str. ID of the collection.
Returns:
list(str). Human-readable usernames (or truncated email addresses) of
owners for this collection.
"""
collection_rights = get_collection_rights(collection_id)
return user_services.get_human_readable_user_ids(
collection_rights.owner_ids)
def is_collection_private(collection_id):
"""Returns whether the collection is private.
Args:
collection_id: str. ID of the collection.
Returns:
bool. Whether the collection is private.
"""
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PRIVATE
def is_collection_public(collection_id):
"""Returns whether the collection is public.
Args:
collection_id: str. ID of the collection.
Returns:
bool. Whether the collection is public.
"""
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PUBLIC
def _get_activity_rights(activity_type, activity_id):
"""Retrieves the rights object for the given activity
based on its type.
Args:
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
activity_id: str. ID of the activity.
Returns:
ActivityRights. The rights object associated with the given activity.
Raises:
Exception. activity_type provided is unknown.
"""
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
return get_exploration_rights(activity_id, strict=False)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
return get_collection_rights(activity_id, strict=False)
else:
raise Exception(
'Cannot get activity rights for unknown activity type: %s' % (
activity_type))
def check_can_access_activity(user, activity_rights):
"""Checks whether the user can access given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: AcitivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the given activity can be accessed by the given user.
"""
if activity_rights is None:
return False
elif activity_rights.is_published():
return bool(
role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY in user.actions)
elif activity_rights.is_private():
return bool(
(role_services.ACTION_PLAY_ANY_PRIVATE_ACTIVITY in user.actions) or
activity_rights.is_viewer(user.user_id) or
activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id) or
activity_rights.is_voice_artist(user.user_id) or
activity_rights.viewable_if_private)
def check_can_edit_activity(user, activity_rights):
"""Checks whether the user can edit given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the given user can edit this activity.
"""
if activity_rights is None:
return False
if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions:
return False
if (activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id)):
return True
if (activity_rights.community_owned or
(role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)):
return True
if (activity_rights.is_published() and
(role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in
user.actions)):
return True
return False
def check_can_voiceover_activity(user, activity_rights):
"""Checks whether the user can voiceover given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the given user can voiceover this activity.
"""
if activity_rights is None:
return False
if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions:
return False
if (activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id) or
activity_rights.is_voice_artist(user.user_id)):
return True
if (activity_rights.community_owned or
(role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)):
return True
if (activity_rights.is_published() and
(role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in
user.actions)):
return True
return False
def check_can_save_activity(user, activity_rights):
"""Checks whether the user can save given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can save given activity.
"""
return (check_can_edit_activity(user, activity_rights) or (
check_can_voiceover_activity(user, activity_rights)))
def check_can_delete_activity(user, activity_rights):
"""Checks whether the user can delete given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can delete given activity.
"""
if activity_rights is None:
return False
if role_services.ACTION_DELETE_ANY_ACTIVITY in user.actions:
return True
elif (activity_rights.is_private() and
(role_services.ACTION_DELETE_OWNED_PRIVATE_ACTIVITY in user.actions)
and activity_rights.is_owner(user.user_id)):
return True
elif (activity_rights.is_published() and
(role_services.ACTION_DELETE_ANY_PUBLIC_ACTIVITY in user.actions)):
return True
return False
def check_can_modify_activity_roles(user, activity_rights):
"""Checks whether the user can modify roles for given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can modify roles for given activity.
"""
if activity_rights is None:
return False
if (activity_rights.community_owned or
activity_rights.cloned_from):
return False
if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY in
user.actions):
return True
if (role_services.ACTION_MODIFY_ROLES_FOR_OWNED_ACTIVITY in
user.actions):
if activity_rights.is_owner(user.user_id):
return True
return False
def check_can_release_ownership(user, activity_rights):
"""Checks whether the user can release ownership for given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can release ownership for given activity.
"""
if activity_rights is None:
return False
if activity_rights.is_private():
return False
return check_can_modify_activity_roles(
user, activity_rights)
def check_can_publish_activity(user, activity_rights):
"""Checks whether the user can publish given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can publish given activity.
"""
if activity_rights is None:
return False
if activity_rights.cloned_from:
return False
if activity_rights.is_published():
return False
if role_services.ACTION_PUBLISH_ANY_ACTIVITY in user.actions:
return True
if role_services.ACTION_PUBLISH_OWNED_ACTIVITY in user.actions:
if activity_rights.is_owner(user.user_id):
return True
return False
def check_can_unpublish_activity(user, activity_rights):
"""Checks whether the user can unpublish given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can unpublish given activity.
"""
if activity_rights is None:
return False
if activity_rights.community_owned:
return False
if activity_rights.is_published():
if role_services.ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY in user.actions:
return True
return False
def _assign_role(
committer, assignee_id, new_role, activity_id, activity_type):
"""Assigns a new role to the user.
Args:
committer: UserActionsInfo. UserActionInfo object for the user
who is performing the action.
assignee_id: str. ID of the user whose role is being changed.
new_role: str. The name of the new role: One of
ROLE_OWNER
ROLE_EDITOR
ROLE_VOICE_ARTIST
ROLE_VIEWER
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raises:
Exception. The committer does not have rights to modify a role.
Exception. The user already owns the activity.
Exception. The user can already edit the activity.
Exception. The user can already voiceover the activity.
Exception. The activity is already publicly editable.
Exception. The activity is already publicly translatable.
Exception. The user can already view the activity.
Exception. The activity is already publicly viewable.
Exception. The role is invalid.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_modify_activity_roles(committer, activity_rights):
logging.error(
'User %s tried to allow user %s to be a(n) %s of activity %s '
'but was refused permission.' % (
committer_id, assignee_id, new_role, activity_id))
raise Exception(
'UnauthorizedUserException: Could not assign new role.')
assignee_username = user_services.get_username(assignee_id)
old_role = ROLE_NONE
if new_role == ROLE_OWNER:
if activity_rights.is_owner(assignee_id):
raise Exception('This user already owns this %s.' % activity_type)
activity_rights.owner_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
if assignee_id in activity_rights.editor_ids:
activity_rights.editor_ids.remove(assignee_id)
old_role = ROLE_EDITOR
if assignee_id in activity_rights.voice_artist_ids:
activity_rights.voice_artist_ids.remove(assignee_id)
old_role = ROLE_VOICE_ARTIST
elif new_role == ROLE_EDITOR:
if (activity_rights.is_editor(assignee_id) or
activity_rights.is_owner(assignee_id)):
raise Exception(
'This user already can edit this %s.' % activity_type)
activity_rights.editor_ids.append(assignee_id)
if assignee_id in activity_rights.voice_artist_ids:
activity_rights.voice_artist_ids.remove(assignee_id)
old_role = ROLE_VOICE_ARTIST
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
elif new_role == ROLE_VOICE_ARTIST:
if (activity_rights.is_editor(assignee_id) or
activity_rights.is_voice_artist(assignee_id) or
activity_rights.is_owner(assignee_id)):
raise Exception(
'This user already can voiceover this %s.' % activity_type)
activity_rights.voice_artist_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
elif new_role == ROLE_VIEWER:
if (activity_rights.is_owner(assignee_id) or
activity_rights.is_editor(assignee_id) or
activity_rights.is_viewer(assignee_id)):
raise Exception(
'This user already can view this %s.' % activity_type)
if activity_rights.status != ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Public %ss can be viewed by anyone.' % activity_type)
activity_rights.viewer_ids.append(assignee_id)
else:
raise Exception('Invalid role: %s' % new_role)
commit_message = 'Changed role of %s from %s to %s' % (
assignee_username, old_role, new_role)
commit_cmds = [{
'cmd': CMD_CHANGE_ROLE,
'assignee_id': assignee_id,
'old_role': old_role,
'new_role': new_role
}]
_save_activity_rights(
committer_id, activity_rights, activity_type,
commit_message, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _release_ownership_of_activity(committer, activity_id, activity_type):
"""Releases ownership of the given activity to the community.
Args:
committer: UserActionsInfo. UserActionsInfo object for the user who
is performing the action.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raise:
Exception. The committer does not have release rights.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_release_ownership(committer, activity_rights):
logging.error(
'User %s tried to release ownership of %s %s but was '
'refused permission.' % (committer_id, activity_type, activity_id))
raise Exception(
'The ownership of this %s cannot be released.' % activity_type)
activity_rights.community_owned = True
activity_rights.owner_ids = []
activity_rights.editor_ids = []
activity_rights.viewer_ids = []
commit_cmds = [{
'cmd': CMD_RELEASE_OWNERSHIP,
}]
_save_activity_rights(
committer_id, activity_rights, activity_type,
'%s ownership released to the community.' % activity_type, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _change_activity_status(
committer_id, activity_id, activity_type, new_status, commit_message):
"""Changes the status of the given activity.
Args:
committer_id: str. ID of the user who is performing the update action.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
new_status: str. The new status of the activity.
commit_message: str. The human-written commit message for this change.
"""
activity_rights = _get_activity_rights(activity_type, activity_id)
old_status = activity_rights.status
activity_rights.status = new_status
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
cmd_type = CMD_CHANGE_EXPLORATION_STATUS
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
cmd_type = CMD_CHANGE_COLLECTION_STATUS
commit_cmds = [{
'cmd': cmd_type,
'old_status': old_status,
'new_status': new_status
}]
if new_status != ACTIVITY_STATUS_PRIVATE:
activity_rights.viewer_ids = []
if activity_rights.first_published_msec is None:
activity_rights.first_published_msec = (
utils.get_current_time_in_millisecs())
_save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _publish_activity(committer, activity_id, activity_type):
"""Publishes the given activity.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raises:
Exception. The committer does not have rights to publish the
activity.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_publish_activity(committer, activity_rights):
logging.error(
'User %s tried to publish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be published.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PUBLIC,
'%s published.' % activity_type)
def _unpublish_activity(committer, activity_id, activity_type):
"""Unpublishes the given activity.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raises:
Exception. The committer does not have rights to unpublish the
activity.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_unpublish_activity(committer, activity_rights):
logging.error(
'User %s tried to unpublish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be unpublished.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PRIVATE,
'%s unpublished.' % activity_type)
activity_services.remove_featured_activity(activity_type, activity_id)
# Rights functions for activities.
def assign_role_for_exploration(
committer, exploration_id, assignee_id, new_role):
"""Assigns a user to the given role and subscribes the assignee to future
exploration updates.
The caller should ensure that assignee_id corresponds to a valid user in
the system.
Args:
committer: UserActionsInfo. The UserActionsInfo object for the
committer.
exploration_id: str. ID of the exploration.
assignee_id: str. ID of the user whose role is being changed.
new_role: str. The name of the new role: One of
ROLE_OWNER
ROLE_EDITOR
ROLE_VOICE_ARTIST
Raises:
Exception. This could potentially throw an exception from
_assign_role.
"""
_assign_role(
committer, assignee_id, new_role, exploration_id,
constants.ACTIVITY_TYPE_EXPLORATION)
if new_role in [ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST]:
subscription_services.subscribe_to_exploration(
assignee_id, exploration_id)
def release_ownership_of_exploration(committer, exploration_id):
"""Releases ownership of the given exploration to the community.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
Raises:
Exception. This could potentially throw an exception from
_release_ownership_of_activity.
"""
_release_ownership_of_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
def set_private_viewability_of_exploration(
committer, exploration_id, viewable_if_private):
"""Sets the viewable_if_private attribute for the given exploration's rights
object.
If viewable_if_private is True, this allows a private exploration
to be viewed by anyone with the link.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
viewable_if_private: bool. Whether the exploration should be made
viewable (by anyone with the link).
Raises:
Exception. The committer does not have the permission to perform change
action.
Exception. If the viewable_if_private property is already as desired.
"""
committer_id = committer.user_id
exploration_rights = get_exploration_rights(exploration_id)
# The user who can publish activity can change its private viewability.
if not check_can_publish_activity(committer, exploration_rights):
logging.error(
'User %s tried to change private viewability of exploration %s '
'but was refused permission.' % (committer_id, exploration_id))
raise Exception(
'The viewability status of this exploration cannot be changed.')
old_viewable_if_private = exploration_rights.viewable_if_private
if old_viewable_if_private == viewable_if_private:
raise Exception(
'Trying to change viewability status of this exploration to %s, '
'but that is already the current value.' % viewable_if_private)
exploration_rights.viewable_if_private = viewable_if_private
commit_cmds = [{
'cmd': CMD_CHANGE_PRIVATE_VIEWABILITY,
'old_viewable_if_private': old_viewable_if_private,
'new_viewable_if_private': viewable_if_private,
}]
commit_message = (
'Made exploration viewable to anyone with the link.'
if viewable_if_private else
'Made exploration viewable only to invited playtesters.')
_save_activity_rights(
committer_id, exploration_rights, constants.ACTIVITY_TYPE_EXPLORATION,
commit_message, commit_cmds)
_update_exploration_summary(exploration_rights)
def publish_exploration(committer, exploration_id):
"""Publishes the given exploration.
It is the responsibility of the caller to check that the exploration is
valid prior to publication.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
Raises:
Exception. This could potentially throw an exception from
_publish_activity.
"""
_publish_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
def unpublish_exploration(committer, exploration_id):
"""Unpublishes the given exploration.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
Raises:
Exception. This could potentially throw an exception from
_unpublish_activity.
"""
_unpublish_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
# Rights functions for collections.
def assign_role_for_collection(
committer, collection_id, assignee_id, new_role):
"""Assign the given user to the given role and subscribes the assignee
to future collection updates.
The caller should ensure that assignee_id corresponds to a valid user in
the system.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
assignee_id: str. ID of the user whose role is being changed.
new_role: str. The name of the new role: One of
ROLE_OWNER
ROLE_EDITOR
Raises:
Exception. This could potentially throw an exception from
_assign_role.
"""
_assign_role(
committer, assignee_id, new_role, collection_id,
constants.ACTIVITY_TYPE_COLLECTION)
if new_role in [ROLE_OWNER, ROLE_EDITOR]:
subscription_services.subscribe_to_collection(
assignee_id, collection_id)
def release_ownership_of_collection(committer, collection_id):
"""Releases ownership of the given collection to the community.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
Raises:
Exception. This could potentially throw an exception from
_release_ownership_of_activity.
"""
_release_ownership_of_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
def publish_collection(committer, collection_id):
"""Publishes the given collection.
It is the responsibility of the caller to check that the collection is
valid prior to publication.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
Raises:
Exception. This could potentially throw an exception from
_publish_activity.
"""
_publish_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
def unpublish_collection(committer, collection_id):
"""Unpublishes the given collection.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
Raises:
Exception. This could potentially throw an exception from
_unpublish_activity.
"""
_unpublish_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Line visual implementing Agg- and GL-based drawing modes.
"""
from __future__ import division
import numpy as np
from ... import gloo, glsl
from ...color import Color, ColorArray, get_colormap
from ...ext.six import string_types
from ..shaders import Function
from ..visual import Visual, CompoundVisual
from ...util.profiler import Profiler
from .dash_atlas import DashAtlas
vec2to4 = Function("""
vec4 vec2to4(vec2 inp) {
return vec4(inp, 0, 1);
}
""")
vec3to4 = Function("""
vec4 vec3to4(vec3 inp) {
return vec4(inp, 1);
}
""")
"""
TODO:
* Agg support is very minimal; needs attention.
* Optimization--avoid creating new buffers, avoid triggering program
recompile.
"""
joins = {'miter': 0, 'round': 1, 'bevel': 2}
caps = {'': 0, 'none': 0, '.': 0,
'round': 1, ')': 1, '(': 1, 'o': 1,
'triangle in': 2, '<': 2,
'triangle out': 3, '>': 3,
'square': 4, '=': 4, 'butt': 4,
'|': 5}
class LineVisual(CompoundVisual):
"""Line visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
method : str
Mode to use for drawing.
* "agg" uses anti-grain geometry to draw nicely antialiased lines
with proper joins and endcaps.
* "gl" uses OpenGL's built-in line rendering. This is much faster,
but produces much lower-quality results and is not guaranteed to
obey the requested line width or join/endcap styles.
antialias : bool
Enables or disables antialiasing.
For method='gl', this specifies whether to use GL's line smoothing,
which may be unavailable or inconsistent on some platforms.
"""
def __init__(self, pos=None, color=(0.5, 0.5, 0.5, 1), width=1,
connect='strip', method='gl', antialias=False):
self._line_visual = None
self._changed = {'pos': False, 'color': False, 'width': False,
'connect': False}
self._pos = None
self._color = None
self._width = None
self._connect = None
self._bounds = None
self._antialias = None
self._method = 'none'
CompoundVisual.__init__(self, [])
# don't call subclass set_data; these often have different
# signatures.
LineVisual.set_data(self, pos=pos, color=color, width=width,
connect=connect)
self.antialias = antialias
self.method = method
@property
def antialias(self):
return self._antialias
@antialias.setter
def antialias(self, aa):
self._antialias = bool(aa)
self.update()
@property
def method(self):
"""The current drawing method"""
return self._method
@method.setter
def method(self, method):
if method not in ('agg', 'gl'):
raise ValueError('method argument must be "agg" or "gl".')
if method == self._method:
return
self._method = method
if self._line_visual is not None:
self.remove_subvisual(self._line_visual)
if method == 'gl':
self._line_visual = _GLLineVisual(self)
elif method == 'agg':
self._line_visual = _AggLineVisual(self)
self.add_subvisual(self._line_visual)
for k in self._changed:
self._changed[k] = True
def set_data(self, pos=None, color=None, width=None, connect=None):
"""Set the data used to draw this visual.
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
width:
The width of the line in px. Line widths < 1 px will be rounded up
to 1 px when using the 'gl' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* int numpy arrays specify the exact set of segment pairs to
connect.
* bool numpy arrays specify which _adjacent_ pairs to connect.
"""
if pos is not None:
self._bounds = None
self._pos = pos
self._changed['pos'] = True
if color is not None:
self._color = color
self._changed['color'] = True
if width is not None:
self._width = width
self._changed['width'] = True
if connect is not None:
self._connect = connect
self._changed['connect'] = True
self.update()
@property
def color(self):
return self._color
@property
def width(self):
return self._width
@property
def connect(self):
return self._connect
@property
def pos(self):
return self._pos
def _interpret_connect(self):
if isinstance(self._connect, np.ndarray):
# Convert a boolean connection array to a vertex index array
if self._connect.ndim == 1 and self._connect.dtype == bool:
index = np.empty((len(self._connect), 2), dtype=np.uint32)
index[:] = np.arange(len(self._connect))[:, np.newaxis]
index[:, 1] += 1
return index[self._connect]
elif self._connect.ndim == 2 and self._connect.shape[1] == 2:
return self._connect.astype(np.uint32)
else:
raise TypeError("Got invalid connect array of shape %r and "
"dtype %r" % (self._connect.shape,
self._connect.dtype))
else:
return self._connect
def _interpret_color(self, color_in=None):
color_in = self._color if color_in is None else color_in
colormap = None
if isinstance(color_in, string_types):
try:
colormap = get_colormap(color_in)
color = Function(colormap.glsl_map)
except KeyError:
color = Color(color_in).rgba
elif isinstance(color_in, Function):
color = Function(color_in)
else:
color = ColorArray(color_in).rgba
if len(color) == 1:
color = color[0]
return color, colormap
def _compute_bounds(self, axis, view):
"""Get the bounds
Parameters
----------
mode : str
Describes the type of boundary requested. Can be "visual", "data",
or "mouse".
axis : 0, 1, 2
The axis along which to measure the bounding values, in
x-y-z order.
"""
# Can and should we calculate bounds?
if (self._bounds is None) and self._pos is not None:
pos = self._pos
self._bounds = [(pos[:, d].min(), pos[:, d].max())
for d in range(pos.shape[1])]
# Return what we can
if self._bounds is None:
return
else:
if axis < len(self._bounds):
return self._bounds[axis]
else:
return (0, 0)
def _prepare_draw(self, view):
if self._width == 0:
return False
CompoundVisual._prepare_draw(self, view)
class _GLLineVisual(Visual):
VERTEX_SHADER = """
varying vec4 v_color;
void main(void) {
gl_Position = $transform($to_vec4($position));
v_color = $color;
}
"""
FRAGMENT_SHADER = """
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
"""
def __init__(self, parent):
self._parent = parent
self._pos_vbo = gloo.VertexBuffer()
self._color_vbo = gloo.VertexBuffer()
self._connect_ibo = gloo.IndexBuffer()
self._connect = None
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self.set_gl_state('translucent')
def _prepare_transforms(self, view):
xform = view.transforms.get_transform()
view.view_program.vert['transform'] = xform
def _prepare_draw(self, view):
prof = Profiler()
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
pos = np.ascontiguousarray(self._parent._pos.astype(np.float32))
self._pos_vbo.set_data(pos)
self._program.vert['position'] = self._pos_vbo
if pos.shape[-1] == 2:
self._program.vert['to_vec4'] = vec2to4
elif pos.shape[-1] == 3:
self._program.vert['to_vec4'] = vec3to4
else:
raise TypeError("Got bad position array shape: %r"
% (pos.shape,))
if self._parent._changed['color']:
color, cmap = self._parent._interpret_color()
# If color is not visible, just quit now
if isinstance(color, Color) and color.is_blank:
return False
if isinstance(color, Function):
# TODO: Change to the parametric coordinate once that is done
self._program.vert['color'] = color(
'(gl_Position.x + 1.0) / 2.0')
else:
if color.ndim == 1:
self._program.vert['color'] = color
else:
self._color_vbo.set_data(color)
self._program.vert['color'] = self._color_vbo
self.shared_program['texture2D_LUT'] = cmap.texture_lut() \
if (hasattr(cmap, 'texture_lut')) else None
# Do we want to use OpenGL, and can we?
GL = None
from ...app._default_app import default_app
if default_app is not None and \
default_app.backend_name != 'ipynb_webgl':
try:
import OpenGL.GL as GL
except Exception: # can be other than ImportError sometimes
pass
# Turn on line smooth and/or line width
if GL:
if self._parent._antialias:
GL.glEnable(GL.GL_LINE_SMOOTH)
else:
GL.glDisable(GL.GL_LINE_SMOOTH)
px_scale = self.transforms.pixel_scale
width = px_scale * self._parent._width
GL.glLineWidth(max(width, 1.))
if self._parent._changed['connect']:
self._connect = self._parent._interpret_connect()
if isinstance(self._connect, np.ndarray):
self._connect_ibo.set_data(self._connect)
if self._connect is None:
return False
prof('prepare')
# Draw
if isinstance(self._connect, string_types) and \
self._connect == 'strip':
self._draw_mode = 'line_strip'
self._index_buffer = None
elif isinstance(self._connect, string_types) and \
self._connect == 'segments':
self._draw_mode = 'lines'
self._index_buffer = None
elif isinstance(self._connect, np.ndarray):
self._draw_mode = 'lines'
self._index_buffer = self._connect_ibo
else:
raise ValueError("Invalid line connect mode: %r" % self._connect)
prof('draw')
class _AggLineVisual(Visual):
_agg_vtype = np.dtype([('a_position', np.float32, (2,)),
('a_tangents', np.float32, (4,)),
('a_segment', np.float32, (2,)),
('a_angles', np.float32, (2,)),
('a_texcoord', np.float32, (2,)),
('alength', np.float32),
('color', np.float32, (4,))])
VERTEX_SHADER = glsl.get('lines/agg.vert')
FRAGMENT_SHADER = glsl.get('lines/agg.frag')
def __init__(self, parent):
self._parent = parent
self._vbo = gloo.VertexBuffer()
self._pos = None
self._color = None
self._da = DashAtlas()
dash_index, dash_period = self._da['solid']
self._U = dict(dash_index=dash_index, dash_period=dash_period,
linejoin=joins['round'],
linecaps=(caps['round'], caps['round']),
dash_caps=(caps['round'], caps['round']),
antialias=1.0)
self._dash_atlas = gloo.Texture2D(self._da._data)
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self._index_buffer = gloo.IndexBuffer()
self.set_gl_state('translucent', depth_test=False)
self._draw_mode = 'triangles'
def _prepare_transforms(self, view):
data_doc = view.get_transform('visual', 'document')
doc_px = view.get_transform('document', 'framebuffer')
px_ndc = view.get_transform('framebuffer', 'render')
vert = view.view_program.vert
vert['transform'] = data_doc
vert['doc_px_transform'] = doc_px
vert['px_ndc_transform'] = px_ndc
def _prepare_draw(self, view):
bake = False
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
self._pos = np.ascontiguousarray(
self._parent._pos.astype(np.float32))
bake = True
if self._parent._changed['color']:
color, cmap = self._parent._interpret_color()
self._color = color
bake = True
if self._parent._changed['connect']:
if self._parent._connect not in [None, 'strip']:
raise NotImplementedError("Only 'strip' connection mode "
"allowed for agg-method lines.")
if bake:
V, idxs = self._agg_bake(self._pos, self._color)
self._vbo.set_data(V)
self._index_buffer.set_data(idxs)
# self._program.prepare()
self.shared_program.bind(self._vbo)
uniforms = dict(closed=False, miter_limit=4.0, dash_phase=0.0,
linewidth=self._parent._width)
for n, v in uniforms.items():
self.shared_program[n] = v
for n, v in self._U.items():
self.shared_program[n] = v
self.shared_program['u_dash_atlas'] = self._dash_atlas
@classmethod
def _agg_bake(cls, vertices, color, closed=False):
"""
Bake a list of 2D vertices for rendering them as thick line. Each line
segment must have its own vertices because of antialias (this means no
vertex sharing between two adjacent line segments).
"""
n = len(vertices)
P = np.array(vertices).reshape(n, 2).astype(float)
idx = np.arange(n) # used to eventually tile the color array
dx, dy = P[0] - P[-1]
d = np.sqrt(dx*dx+dy*dy)
# If closed, make sure first vertex = last vertex (+/- epsilon=1e-10)
if closed and d > 1e-10:
P = np.append(P, P[0]).reshape(n+1, 2)
idx = np.append(idx, idx[-1])
n += 1
V = np.zeros(len(P), dtype=cls._agg_vtype)
V['a_position'] = P
# Tangents & norms
T = P[1:] - P[:-1]
N = np.sqrt(T[:, 0]**2 + T[:, 1]**2)
# T /= N.reshape(len(T),1)
V['a_tangents'][+1:, :2] = T
V['a_tangents'][0, :2] = T[-1] if closed else T[0]
V['a_tangents'][:-1, 2:] = T
V['a_tangents'][-1, 2:] = T[0] if closed else T[-1]
# Angles
T1 = V['a_tangents'][:, :2]
T2 = V['a_tangents'][:, 2:]
A = np.arctan2(T1[:, 0]*T2[:, 1]-T1[:, 1]*T2[:, 0],
T1[:, 0]*T2[:, 0]+T1[:, 1]*T2[:, 1])
V['a_angles'][:-1, 0] = A[:-1]
V['a_angles'][:-1, 1] = A[+1:]
# Segment
L = np.cumsum(N)
V['a_segment'][+1:, 0] = L
V['a_segment'][:-1, 1] = L
# V['a_lengths'][:,2] = L[-1]
# Step 1: A -- B -- C => A -- B, B' -- C
V = np.repeat(V, 2, axis=0)[1:-1]
V['a_segment'][1:] = V['a_segment'][:-1]
V['a_angles'][1:] = V['a_angles'][:-1]
V['a_texcoord'][0::2] = -1
V['a_texcoord'][1::2] = +1
idx = np.repeat(idx, 2)[1:-1]
# Step 2: A -- B, B' -- C -> A0/A1 -- B0/B1, B'0/B'1 -- C0/C1
V = np.repeat(V, 2, axis=0)
V['a_texcoord'][0::2, 1] = -1
V['a_texcoord'][1::2, 1] = +1
idx = np.repeat(idx, 2)
idxs = np.resize(np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32),
(n-1)*(2*3))
idxs += np.repeat(4*np.arange(n-1, dtype=np.uint32), 6)
# Length
V['alength'] = L[-1] * np.ones(len(V))
# Color
if color.ndim == 1:
color = np.tile(color, (len(V), 1))
elif color.ndim == 2 and len(color) == n:
color = color[idx]
else:
raise ValueError('Color length %s does not match number of '
'vertices %s' % (len(color), n))
V['color'] = color
return V, idxs
|
import tautulli
import config
import time
from config import client_id
from pypresence import Presence
RPC = Presence(client_id)
def main():
RPC.connect()
print("Check discord")
while True:
current_activity = tautulli.get_my_activity()
if current_activity is not None:
to_send = dict(state=current_activity['title'])
if current_activity['grandparent_title'] != "":
to_send['details'] = current_activity['grandparent_title']
RPC.update(**to_send)
else:
RPC.clear()
time.sleep(15) # rich presence is limited to once per 15 seconds
if __name__ == "__main__":
main()
# print(get_data("get_server_friendly_name"))
|
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trestle remote tests."""
|
#!/usr/bin/env python
import diffpy.pdffit2.tests
assert diffpy.pdffit2.tests.test().wasSuccessful()
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import nets_factory
class NetworksTest(tf.test.TestCase):
def testGetNetworkFnFirstHalf(self):
batch_size = 5
num_classes = 1000
for net in nets_factory.networks_map.keys()[:10]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224)
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
def testGetNetworkFnSecondHalf(self):
batch_size = 5
num_classes = 1000
for net in nets_factory.networks_map.keys()[10:]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224)
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
if __name__ == '__main__':
tf.test.main()
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04b_classification.model.meta_arch.common.ipynb (unless otherwise specified).
__all__ = ['GeneralizedImageClassifier']
# Cell
import logging
from collections import namedtuple
from typing import *
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.core.memory import get_human_readable_count
from torch.nn import Module
from ..backbones import ImageClassificationBackbone
from ..build import build_backbone, build_head
from ..heads import ImageClassificationHead
from ....core_classes import BasicModule
from ....utils.shape_spec import ShapeSpec
_logger = logging.getLogger(__name__)
# Cell
class GeneralizedImageClassifier(BasicModule):
"""
A General Image Classifier. Any models that contains the following 2 components:
1. Feature extractor (aka backbone)
2. Image Classification head (Pooling + Classifier)
"""
_hypers = namedtuple("hypers", field_names=["lr", "wd"])
def __init__(
self,
backbone: ImageClassificationBackbone,
head: ImageClassificationHead,
):
"""
Arguments:
1. `backbone`: a `ImageClassificationBackbone` module, must follow gale's backbone interface
2. `head`: a head containg the classifier. and the pooling layer, must be an instance of
`ImageClassificationHead`.
"""
super(GeneralizedImageClassifier, self).__init__()
self.backbone = backbone
assert isinstance(backbone, ImageClassificationBackbone)
self.head = head
assert isinstance(head, ImageClassificationHead)
def forward(self, batched_inputs: torch.Tensor) -> torch.Tensor:
"""
Runs the batched_inputs through `backbone` followed by the `head`.
Returns a Tensor which contains the logits for the batched_inputs.
"""
# forward pass through the backbone
out = self.backbone(batched_inputs)
# pass through the classification layer
out = self.head(out)
return out
@classmethod
def from_config_dict(cls, cfg: DictConfig):
"""
Instantiate the Meta Architecture from gale config
"""
if not hasattr(cfg.model, "backbone"):
raise ValueError("Configuration for model backbone not found")
if not hasattr(cfg.model, "head"):
raise ValueError("Configuration for model head not found")
input_shape = ShapeSpec(cfg.input.channels, cfg.input.height, cfg.input.width)
_logger.debug(f"Inputs: {input_shape}")
backbone = build_backbone(cfg, input_shape=input_shape)
param_count = get_human_readable_count(
sum([m.numel() for m in backbone.parameters()])
)
_logger.debug(
"Backbone {} created, param count: {}.".format(
cfg.model.backbone.name, param_count
)
)
head = build_head(cfg, backbone.output_shape())
param_count = get_human_readable_count(
sum([m.numel() for m in head.parameters()])
)
_logger.debug(
"Head {} created, param count: {}.".format(cfg.model.head.name, param_count)
)
kwds = {"backbone": backbone, "head": head}
instance = cls(**kwds)
instance.input_shape = input_shape
param_count = get_human_readable_count(
sum([m.numel() for m in instance.parameters()])
)
_logger.info("Model created, param count: {}.".format(param_count))
return instance
def build_param_dicts(self):
"""
Builds up the Paramters dicts for optimization
"""
backbone_params = self.backbone.build_param_dicts()
head_params = self.head.build_param_dicts()
return backbone_params + head_params
@property
def hypers(self) -> Tuple:
"""
Returns list of parameters like `lr` and `wd`
for each param group
"""
lrs = []
wds = []
for p in self.build_param_dicts():
lrs.append(p["lr"])
wds.append(p["weight_decay"])
return self._hypers(lrs, wds)
|
from util import getPrime, inv, gcd
from random import randrange
from time import time
from datetime import timedelta
def gen_keys():
p = getPrime(512)
q = getPrime(512)
p_s = p ** 2
n = p_s * q
phi = (p_s - p) * (q - 1)
e = randrange(1, phi)
g = gcd(e, phi)
while g != 1:
e = randrange(1, phi)
g = gcd(e, phi)
e = 41
d = inv(e, phi)
dp = d % (p - 1)
dq = d % (q - 1)
p2_inv_q = inv(p_s, q)
e_inv_p = inv(e, p)
#public, private
return [(n, e), (p, q, dp, dq, p2_inv_q, e_inv_p), d]
def encrypt(public, m):
return pow(m, public[1], public[0])
def hensel(cp, dp, p, e_inv_p, e, c):
p_s = p**2
m_p = pow(cp, dp-1, p)
K0 = m_p * cp % p
A = -pow(K0, e, p_s)
A = (A + c) % p_s
m_p = m_p * A % p_s
m_p = m_p * e_inv_p % p_s
m_p = (m_p + K0) % p_s
return m_p
def decrypt(c, privk, pub):
p, q, dp, dq, p2_inv_q, e_inv_p = privk
n, e = pub
p_s = p**2
c_p = c % p_s
c_q = c % q
m_p = hensel(c_p, dp, p, e_inv_p, e, c)
m_q = pow(c_q, dq, q)
V = (m_q - m_p) % q
V = V * p2_inv_q % q
M = V * p_s % n
M = (M + m_p) % n
return M
def classic_decrypt(c, d, n):
return pow(c, d, n)
if __name__ == '__main__':
m_ = 65
public, private, d = gen_keys()
#print(public)
c = encrypt(public, m_)
start_hensel = time()
dec = decrypt(c, private, public)
elapsed = time() - start_hensel
print(str(timedelta(seconds=elapsed)))
delta1 = timedelta(seconds=elapsed)
print(dec)
start_normal = time()
dec_ = classic_decrypt(c, d, public[0])
elapsed_ = time() - start_normal
print(str(timedelta(seconds=elapsed_)))
delta2 = timedelta(seconds=elapsed_)
print(dec_)
print(delta2/delta1)
|
"""
'toc' sub-command of the 'handbook' command.
This module composes a TOC for the Handbook from configuration files.
"""
import os
import sys
from urllib.request import pathname2url
from handbook_tools.lib.command_base import CommandBase
from handbook_tools.lib.navigation_tree import NavigationTree
__version__ = '0.6.8'
class Toc(CommandBase):
"""
Compose a TOC of the Handbook from configuration.
Usage:
toc [options]
Options:
-h, --help Show this help message and exit
--version Show the version and exit
-o, --output=FILE Specify output TOC file relative to site root
-d, --depth=LEVEL Max depth of the generated TOC tree [default: 8]
--no-stop Ignore 'stop' tags to scan the entire tree
--no-prefix Do not include item prefix for the TOC items
--no-index Do not include index numbers for the TOC items
--no-link Do not include links for the TOC items
--header Include HTML header for the TOC file
Examples:
handbook toc -h
handbook toc --version
handbook toc
handbook --root=tests/fixtures/site toc
handbook toc -d 3
handbook toc --depth=3 --no-index
handbook toc --d 2 --no-index --no-link -o toc2.md
handbook toc --no-stop -o toc.md
"""
def __init__(self, command_args=None, global_args=None):
""""""
super().__init__(command_args, global_args, version=__version__)
# kill bullets of unordered list (not supported by GitHub)
self.toc_header = '<style>ul { list-style-type: none; }</style>\n\n'
self.toc_title = '# Table of Contents\n\n'
self.markdown_ul = '-'
self._process_args()
self.toc_file = self._init_output_file(self.output_filename)
try:
if self.include_toc_header:
self.toc_file.write(self.toc_header)
self.toc_file.write(self.toc_title)
except IOError as err:
print('Error: Operation failed: {}'.format(err.strerror))
self.depth = 0
self.index = []
self.navigation_tree = None
def execute(self):
"""Entry point for the execution of this sub-command"""
self.navigation_tree = NavigationTree(self.site_root, self.verbose, self.no_stop)
self.navigation_tree.scan(self.node_performer)
if self.toc_file is not sys.stdout:
self.toc_file.close()
def node_performer(self, root_path, *_):
"""Custom performer executed for each visited node"""
name = os.path.basename(root_path)
link = root_path.replace(self.site_root, '')
self._update_index_counter(link)
# skip handbook root and too deep TOC items
if self.depth > 1 and (self.depth - 1) <= self.max_depth:
self.toc_file.write(self._format_toc(name, link))
def _process_args(self):
"""Process command_args"""
# default values not set by docopt were set in CommandBase
self.output_filename = self.args['--output']
self.max_depth = int(self.args['--depth'])
self.no_stop = self.args['--no-stop']
self.include_prefix = not self.args['--no-prefix']
self.include_index = not self.args['--no-index']
self.include_link = not self.args['--no-link']
self.include_toc_header = self.args['--header']
def _update_index_counter(self, link):
""""""
depth = len(link.split(os.sep)) - 1
if depth > len(self.index):
self.index += [1]
if depth <= self.depth:
self.index[depth-1] += 1
self.index = self.index[:depth]
self.depth = depth
def _format_toc(self, name, link):
""""""
# compose indent string
indent = ' ' * 2 * (self.depth - 2)
# compose optional item prefix string
prefix = ''
if self.include_prefix:
prefix = self.markdown_ul + ' '
# compose optional index string
index_string = ''
if self.include_index:
index_string = '.'.join(str(e) for e in self.index[1:self.depth])
index_string += ' '
# compose item string with optional link
toc_item = name
if self.include_link:
link_url = pathname2url(link)
toc_item = '[' + name + '](' + link_url + ')'
return '{}{}{}{}\n'.format(indent, prefix, index_string, toc_item)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import uuid
import beta_snippets
from google.cloud import storage
PROJECT_ID = os.environ['GCLOUD_PROJECT']
@pytest.fixture(scope='function')
def bucket():
"""Create a temporary bucket to store annotation output."""
bucket_name = str(uuid.uuid1())
storage_client = storage.Client()
bucket = storage_client.create_bucket(bucket_name)
yield bucket
bucket.delete(force=True)
@pytest.fixture(scope='session')
def glossary():
"""Get the ID of a glossary available to session (do not mutate/delete)."""
glossary_id = 'must-start-with-letters-' + str(uuid.uuid1())
beta_snippets.create_glossary(PROJECT_ID, glossary_id)
yield glossary_id
try:
beta_snippets.delete_glossary(PROJECT_ID, glossary_id)
except Exception:
pass
@pytest.fixture(scope='function')
def unique_glossary_id():
"""Get a unique ID. Attempts to delete glossary with this ID after test."""
glossary_id = 'must-start-with-letters-' + str(uuid.uuid1())
yield glossary_id
try:
beta_snippets.delete_glossary(PROJECT_ID, glossary_id)
except Exception:
pass
def test_translate_text(capsys):
beta_snippets.translate_text(PROJECT_ID, 'Hello world')
out, _ = capsys.readouterr()
assert 'Zdravo svet' in out
def test_batch_translate_text(capsys, bucket):
beta_snippets.batch_translate_text(
PROJECT_ID,
'gs://cloud-samples-data/translation/text.txt',
'gs://{}/translation/BATCH_TRANSLATION_OUTPUT/'.format(bucket.name))
out, _ = capsys.readouterr()
assert 'Total Characters: 13' in out
assert 'Translated Characters: 13' in out
def test_detect_language(capsys):
beta_snippets.detect_language(PROJECT_ID, 'Hæ sæta')
out, _ = capsys.readouterr()
assert 'is' in out
def test_list_languages(capsys):
beta_snippets.list_languages(PROJECT_ID)
out, _ = capsys.readouterr()
assert 'zh-CN' in out
def test_list_languages_with_target(capsys):
beta_snippets.list_languages_with_target(PROJECT_ID, 'is')
out, _ = capsys.readouterr()
assert u'Language Code: sq' in out
assert u'Display Name: albanska' in out
def test_create_glossary(capsys, unique_glossary_id):
beta_snippets.create_glossary(PROJECT_ID, unique_glossary_id)
out, _ = capsys.readouterr()
assert 'Created' in out
assert PROJECT_ID in out
assert unique_glossary_id in out
assert 'gs://cloud-samples-data/translation/glossary.csv' in out
def test_get_glossary(capsys, glossary):
beta_snippets.get_glossary(PROJECT_ID, glossary)
out, _ = capsys.readouterr()
assert glossary in out
assert 'gs://cloud-samples-data/translation/glossary.csv' in out
def test_list_glossary(capsys, glossary):
beta_snippets.list_glossaries(PROJECT_ID)
out, _ = capsys.readouterr()
assert glossary in out
assert 'gs://cloud-samples-data/translation/glossary.csv' in out
def test_translate_text_with_glossary(capsys, glossary):
beta_snippets.translate_text_with_glossary(
PROJECT_ID, glossary, 'directions')
out, _ = capsys.readouterr()
assert 'direcciones' in out
def test_delete_glossary(capsys, unique_glossary_id):
beta_snippets.create_glossary(PROJECT_ID, unique_glossary_id)
beta_snippets.delete_glossary(PROJECT_ID, unique_glossary_id)
out, _ = capsys.readouterr()
assert PROJECT_ID in out
assert 'us-central1' in out
assert unique_glossary_id in out
|
import pygame # importa a biblioteca Pygame
import random # importa a biblioteca Random
from audioplayer import AudioPlayer
inicio = False
source = "/home/joao/Arquivos/jogoCobrinha/"
# Começar partida
def iniciar(inicio, tela, fonte, texto):
texto = fonte.render("Pressione T para iniciar: ", True, cor_pontos)
tela.blit(imagem, [0, 263])
tela.blit(texto, [150, 150])
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_t:
inicio = True
if event.type == pygame.QUIT:
raise Execption
return inicio
while True:
status = True
pygame.init()
player = AudioPlayer(source+"supermario.mp3")
comer = AudioPlayer(source+"comer.mp3")
erro = AudioPlayer(source+"Erro.mp3")
player.play()
# pygame.mixer.init()
# pygame.mixer.music.load('supermario.mp3')
# pygame.mixer.music.play()
# Definir cores
cor_inicio = (64, 193, 255)
cor_fundo = (150, 255, 159) # Define a cor do fundo
cor_cobra = (255, 0, 0) # Define a cor da cobra
cor_comida = (138, 0, 0) # Define a cor da comida 128,60,60
cor_pontos = (0, 0, 0) # Define a cor dos pontos
cor_inicio = (64, 193, 255)
cor_fim = (255, 255, 110)
#########
dimensoes = (600, 600)
fim = ""
# Valores Iniciais
pontuação = ""
texto = ""
tempo = 9.0
direcao_x = "Liberado"
direcao_y = "Liberado"
x = 300
y = 300
d = 20
dx = 0
dy = 0
x_comida = round(random.randrange(0, 600 - d)/20)*20
y_comida = round(random.randrange(0, 600 - d)/20)*20
fonte = pygame.font.SysFont("hack", 35)
fonte2 = pygame.font.SysFont("hack", 100)
lista_cobra = [[x, y]]
tela = pygame.display.set_mode((dimensoes))
pygame.display.set_caption("Snake")
tela.fill(cor_inicio)
imagem = pygame.image.load(source+"cobrinha.png")
estatico = imagem.get_rect()
clock = pygame.time.Clock()
if inicio == False:
while inicio == False:
pygame.display.update()
inicio = iniciar(inicio, tela, fonte, texto)
def desenha_cobra(lista_cobra):
tela.fill(cor_fundo)
for unidade in lista_cobra:
pygame.draw.rect(tela, cor_cobra, [unidade[0], unidade[1], d, d])
tela.fill(cor_fundo)
def mover_cobra(dx, dy, lista_cobra, direcao_x, direcao_y):
delta_x = 0
delta_y = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
raise Exception
if event.type == pygame.KEYDOWN:
if direcao_x == "Liberado":
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
dx = -d
dy = 0
direcao_x = "Ocupado"
direcao_y = "Liberado"
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
dx = d
dy = 0
direcao_x = "Ocupado"
direcao_y = "Liberado"
if direcao_y == "Liberado":
if event.key == pygame.K_UP or event.key == pygame.K_w:
dx = 0
dy = -d
direcao_y = "Ocupado"
direcao_x = "Liberado"
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
dx = 0
dy = d
direcao_y = "Ocupado"
direcao_x = "Liberado"
if event.key == pygame.K_ESCAPE:
raise Exception
x_novo = lista_cobra[-1][0] + dx
y_novo = lista_cobra[-1][1] + dy
lista_cobra.append([x_novo, y_novo])
del lista_cobra[0]
# x = x + delta_x
# y = y + delta_y
return dx, dy, lista_cobra, direcao_x, direcao_y
def verifica_comida(dx, dy, x_comida, y_comida, lista_cobra, tempo):
head = lista_cobra[-1]
x_novo = head[0] + dx
y_novo = head[1] + dy
if head[0] == x_comida and head[1] == y_comida:
comer.play()
lista_cobra.append([x_novo, y_novo])
tempo = tempo + 0.5
x_comida = round(random.randrange(0, 600 - d)/20)*20
y_comida = round(random.randrange(0, 600 - d)/20)*20
pygame.draw.rect(tela, cor_comida, [x_comida, y_comida, d, d])
return x_comida, y_comida, lista_cobra, tempo
def verifica_parede(lista_cobra, status):
head = lista_cobra[-1]
x = head[0]
y = head[1]
if x not in range(600) or y not in range(600):
status = False
return status
def verifica_mordeu_cobra(lista_cobra, status):
head = lista_cobra[-1]
corpo = lista_cobra.copy()
del corpo[-1]
for x, y in corpo:
if x == head[0] and y == head[1]:
status = False
return status
def atualizar_pontos(lista_cobra):
pontos = str(len(lista_cobra))
score = fonte.render("Scores: " + pontos, True, cor_pontos)
tela.blit(score, [0, 0])
return pontos
while status == True:
pygame.display.update()
desenha_cobra(lista_cobra)
dx, dy, lista_cobra, direcao_x, direcao_y = mover_cobra(
dx, dy, lista_cobra, direcao_x, direcao_y)
x_comida, y_comida, lista_cobra, tempo = verifica_comida(
dx, dy, x_comida, y_comida, lista_cobra, tempo)
# print(lista_cobra)
status = verifica_parede(lista_cobra, status)
status = verifica_mordeu_cobra(lista_cobra, status)
pontuação = atualizar_pontos(lista_cobra)
clock.tick(tempo)
erro.play()
pygame.display.update()
tela.fill(cor_fim)
fim = fonte2.render("Gamer Over: ", True, cor_pontos)
tela.blit(fim, [100, 50])
pontuação = fonte2.render("Pontos: " + pontuação, True, cor_pontos)
tela.blit(pontuação, [100, 200])
pygame.display.update()
clock.tick(0.3)
|
#from app import db
from datetime import datetime, timedelta
#class User(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# user_key = db.Column(db.String(32), index=True, unique=True)
# join_date = db.Column(db.String())
# last_active_date = db.Column(db.String())
# def __init__(self, user_key):
# self.user_key = user_key
# self.join_date = datetime.strftime(
# datetime.utcnow() + timedelta(hours=9),
# "%Y.%m.%d %H:%M:%S")
# self.last_active_date = self.join_date
# def __repr__(self):
# return "<User %r>" % (self.user_key)
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from __future__ import annotations
import logging
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Optional
import numpy as np
import torch
from radio import CTImagesMaskedBatch
from radio.batchflow import Dataset, action, inbatch_parallel
from InnerEye.Common.type_annotations import TupleFloat3
from InnerEye.ML import config
from InnerEye.ML.common import ModelExecutionMode
from InnerEye.ML.config import SegmentationModelBase
from InnerEye.ML.lightning_helpers import load_from_checkpoint_and_adjust_for_inference
from InnerEye.ML.lightning_models import SegmentationLightning
from InnerEye.ML.model_config_base import ModelConfigBase
from InnerEye.ML.models.architectures.base_model import BaseSegmentationModel
from InnerEye.ML.utils import image_util, ml_util
from InnerEye.ML.utils.image_util import compute_uncertainty_map_from_posteriors, gaussian_smooth_posteriors, \
posteriors_to_segmentation
class InferencePipelineBase:
"""Base class for all inference pipelines."""
def __init__(self, model_config: ModelConfigBase):
self.model_config = model_config
class FullImageInferencePipelineBase(InferencePipelineBase):
"""
Base Class for full image inference intended to be inherited by inference pipelines
that can perform full image prediction
"""
def __init__(self, model_config: SegmentationModelBase):
super().__init__(model_config)
def predict_and_post_process_whole_image(self, image_channels: np.ndarray,
voxel_spacing_mm: TupleFloat3,
mask: np.ndarray = None,
patient_id: int = 0) -> InferencePipeline.Result:
return self.post_process(self.predict_whole_image(image_channels, voxel_spacing_mm, mask, patient_id))
def predict_whole_image(self, image_channels: np.ndarray,
voxel_spacing_mm: TupleFloat3,
mask: np.ndarray = None,
patient_id: int = 0) -> InferencePipeline.Result:
raise NotImplementedError("Full image inference capability must be implemented by concrete classes")
def post_process(self, results: InferencePipeline.Result) -> InferencePipeline.Result:
"""
Perform connected component analysis to update segmentation with largest
connected component based on the configurations
:param results: inference results to post-process
:return: post-processed version of results
"""
if self.model_config.posterior_smoothing_mm:
posteriors = gaussian_smooth_posteriors(
posteriors=results.posteriors,
kernel_size_mm=self.model_config.posterior_smoothing_mm,
voxel_spacing_mm=results.voxel_spacing_mm
)
results = InferencePipeline.Result(
patient_id=results.patient_id,
posteriors=posteriors,
segmentation=posteriors_to_segmentation(posteriors),
voxel_spacing_mm=results.voxel_spacing_mm
)
if self.model_config.summed_probability_rules and not self.model_config.disable_extra_postprocessing:
assert isinstance(self.model_config, SegmentationModelBase)
results = results.with_new_segmentation(
image_util.apply_summed_probability_rules(self.model_config, results.posteriors, results.segmentation))
if self.model_config.largest_connected_component_foreground_classes is not None:
# get indices for classes to restrict
restrict_class_indices_and_thresholds = []
for name, idx in self.model_config.class_and_index_with_background().items():
for name2, threshold in self.model_config.largest_connected_component_foreground_classes:
if name2 == name:
restrict_class_indices_and_thresholds.append((idx, threshold))
results = results.with_new_segmentation(
image_util.extract_largest_foreground_connected_component(
multi_label_array=results.segmentation,
# mypy gets confused below because List is invariant. Sequence is covariant
# but does not allow "append".
restrictions=restrict_class_indices_and_thresholds)) # type: ignore
if self.model_config.slice_exclusion_rules and not self.model_config.disable_extra_postprocessing:
results = results.with_new_segmentation(
image_util.apply_slice_exclusion_rules(self.model_config, results.segmentation))
return results
class InferencePipeline(FullImageInferencePipelineBase):
"""
Pipeline class for model for whole image inference on ct-images.
"""
# the model output is expected to be a valid probability distribution
MODEL_OUTPUT_POSTERIOR_RANGE = (0, 1)
class Variables(Enum):
"""
Variables associated with the inference pipeline
"""
# an instantiated model to use for inference.
Model = 'model'
# the configuration associated with the model.
ModelConfig = 'model_config'
# the shape of the image required as output from the pipeline.
OutputImageShape = 'output_image_shape'
# A Tuple[int,int,int] with the crop size that should be used. For large images, this will be
# the test_crop_size from the model config, but for smaller images, it will be the componentwise
# minimum of test_crop_size and image_size
CropSize = 'crop_size'
# The stride size to use, possibly adjusted for small images (see above for crop_size)
Stride = 'stride'
# The size of the output tensor that the model will produce when fed with an input tensor that
# has the given crop_size.
OutputSize = 'output_size'
class Result:
"""
Contains the inference results from a single pass of the inference pipeline
"""
def __init__(self,
patient_id: int,
segmentation: np.ndarray,
posteriors: np.ndarray,
voxel_spacing_mm: TupleFloat3):
"""
:param patient_id: The id of the patient instance for with inference is being performed on.
:param segmentation: Z x Y x X (argmaxed over the posteriors in the class dimension)
:param voxel_spacing_mm: Voxel spacing to use for each dimension in (Z x Y x X) order
:param posteriors: Class x Z x Y x X
"""
self.patient_id = patient_id
self.segmentation = segmentation
self.posteriors = posteriors
self.voxel_spacing_mm = voxel_spacing_mm
if len(self.voxel_spacing_mm) != 3:
raise ValueError(f"voxel_spacing_mm must have length 3, found: {voxel_spacing_mm}")
if any(np.array(self.voxel_spacing_mm) <= 0):
raise ValueError(f"voxel_spacing_mm must have values > 0 in each dimension, found: {voxel_spacing_mm}")
ml_util.check_size_matches(self.segmentation,
self.posteriors,
dim1=3,
dim2=4,
matching_dimensions=[-3, -2, -1],
arg1_name="segmentation",
arg2_name="posteriors")
segmentation_value_range = np.unique(self.segmentation)
if not np.all([x in range(self.posteriors.shape[0]) for x in segmentation_value_range]):
raise Exception("values in the segmentation map must be in range [0, classes), "
"found classes:{}, segmentation range:{}"
.format(self.posteriors.shape[0], segmentation_value_range))
self._uncertainty = compute_uncertainty_map_from_posteriors(self.posteriors)
@property
def uncertainty(self) -> np.ndarray:
return self._uncertainty
def with_new_segmentation(self, segmentation: np.ndarray) -> InferencePipeline.Result:
if segmentation.shape != self.segmentation.shape:
raise ValueError(f"Attempt to replace segmentation of shape {self.segmentation.shape} "
f"with one of shape {segmentation.shape}")
return InferencePipeline.Result(
patient_id=self.patient_id,
segmentation=segmentation,
posteriors=self.posteriors,
voxel_spacing_mm=self.voxel_spacing_mm)
def __init__(self, model: SegmentationLightning, model_config: config.SegmentationModelBase,
pipeline_id: int = 0):
super().__init__(model_config)
self.model = model
self.model.model.eval()
self.pipeline_id = pipeline_id
@staticmethod
def create_from_checkpoint(path_to_checkpoint: Path,
model_config: SegmentationModelBase,
pipeline_id: int = 0) -> Optional[InferencePipeline]:
"""
Creates an instance of the inference pipeline for a given epoch from a stored checkpoint.
After loading, the model parameters are checked for NaN and Infinity values.
If there is no checkpoint file for the given epoch, return None.
:param path_to_checkpoint: The path to the checkpoint that we want to load
model_config.checkpoint_folder
:param model_config: Model related configurations.
:param pipeline_id: Numeric identifier for the pipeline (useful for logging when ensembling)
:return InferencePipeline: an instantiated inference pipeline instance, or None if there was no checkpoint
file for this epoch.
"""
if not path_to_checkpoint.is_file():
# not raising a value error here: This is used to create individual pipelines for ensembles,
# possible one model cannot be created but others can
logging.warning(f"Could not recover model from checkpoint path {path_to_checkpoint}")
return None
lightning_model = load_from_checkpoint_and_adjust_for_inference(model_config, path_to_checkpoint)
assert isinstance(lightning_model, SegmentationLightning)
return InferencePipeline(model=lightning_model, model_config=model_config, pipeline_id=pipeline_id)
def predict_whole_image(self, image_channels: np.ndarray,
voxel_spacing_mm: TupleFloat3,
mask: np.ndarray = None,
patient_id: int = 0) -> InferencePipeline.Result:
"""
Performs a single inference pass through the pipeline for the provided image
:param image_channels: The input image channels to perform inference on in format: Channels x Z x Y x X.
:param voxel_spacing_mm: Voxel spacing to use for each dimension in (Z x Y x X) order
:param mask: A binary image used to ignore results outside it in format: Z x Y x X.
:param patient_id: The identifier of the patient this image belongs to (defaults to 0 if None provided).
:return InferenceResult: that contains Segmentation for each of the classes and their posterior probabilities.
"""
if image_channels is None:
raise Exception("image_channels cannot be None")
if image_channels.ndim != 4:
raise NotImplementedError("image_channels must be in shape: Channels x Z x Y x X"
"found image_channels shape: {}".format(image_channels.shape))
if mask is not None:
ml_util.check_size_matches(image_channels, mask, 4, 3, [-1, -2, -3])
self.model.eval()
# create the dataset for the batch
batch_dataset = Dataset(index=[patient_id], batch_class=InferenceBatch)
# setup the pipeline
pipeline = (batch_dataset.p
# define pipeline variables
.init_variables([InferencePipeline.Variables.Model,
InferencePipeline.Variables.ModelConfig,
InferencePipeline.Variables.CropSize,
InferencePipeline.Variables.OutputSize,
InferencePipeline.Variables.OutputImageShape,
InferencePipeline.Variables.Stride])
# update the variables for the batch actions
.update_variable(name=InferencePipeline.Variables.Model, value=self.model)
.update_variable(name=InferencePipeline.Variables.ModelConfig, value=self.model_config)
# perform cascaded batch actions
.load(image_channels=image_channels, mask=mask)
.pre_process()
.predict()
.post_process()
)
# run the batch through the pipeline
logging.info(f"Inference pipeline ({self.pipeline_id}), Predicting patient: {patient_id}")
processed_batch: InferenceBatch = pipeline.next_batch(batch_size=1)
posteriors = processed_batch.get_component(InferenceBatch.Components.Posteriors)
image_util.check_array_range(posteriors, error_prefix="Whole image posteriors")
# prepare pipeline results from the processed batch
return InferencePipeline.Result(
patient_id=patient_id,
segmentation=processed_batch.get_component(InferenceBatch.Components.Segmentation),
posteriors=posteriors,
voxel_spacing_mm=voxel_spacing_mm
)
class InferenceBatch(CTImagesMaskedBatch):
"""
Batch class for IO with the inference pipeline. One instance of a batch will load the image
into the 'images' component of the pipeline, and store the results of the full pass
of the pipeline into the 'segmentation' and 'posteriors' components.
"""
class Components(Enum):
"""
Components associated with the inference batch class
"""
# the input image channels in Channels x Z x Y x X format.
ImageChannels = 'channels'
# a set of 2D image slices (ie: a 3D image channel), stacked in Z x Y x X format.
Images = 'images'
# a binary mask used to ignore predictions in Z x Y x X format.
Mask = 'mask'
# a numpy.ndarray in Z x Y x X format with class labels for each voxel in the original image.
Segmentation = 'segmentation'
# a numpy.ndarray with the first dimension indexing each class in C x Z x Y x X format
# with each Z x Y x X being the same shape as the Images component, and consisting of
# [0, 1] values representing the model confidence for each voxel.
Posteriors = 'posteriors'
def __init__(self, index: int, *args: Any, **kwargs: Any):
super().__init__(index, *args, **kwargs)
self.components = [x.value for x in InferenceBatch.Components]
@action
def load(self, image_channels: np.ndarray, mask: np.ndarray) -> InferenceBatch:
"""
Load image channels and mask into their respective pipeline components.
"""
self.set_component(component=InferenceBatch.Components.ImageChannels, data=image_channels)
model_config = self.get_configs()
if model_config is None:
raise ValueError("model_config is None")
if model_config.test_crop_size is None:
raise ValueError("model_config.test_crop_size is None")
if model_config.inference_stride_size is None:
raise ValueError("model_config.inference_stride_size is None")
# fetch the image channels from the batch
image_channels = self.get_component(InferenceBatch.Components.ImageChannels)
self.pipeline.set_variable(name=InferencePipeline.Variables.OutputImageShape, value=image_channels[0].shape)
# There may be cases where the test image is smaller than the test_crop_size. Adjust crop_size
# to always fit into image. If test_crop_size is smaller than the image, crop will remain unchanged.
image_size = image_channels.shape[1:]
model: BaseSegmentationModel = self.pipeline.get_variable(InferencePipeline.Variables.Model).model
effective_crop, effective_stride = \
model.crop_size_constraints.restrict_crop_size_to_image(image_size,
model_config.test_crop_size,
model_config.inference_stride_size)
self.pipeline.set_variable(name=InferencePipeline.Variables.CropSize, value=effective_crop)
self.pipeline.set_variable(name=InferencePipeline.Variables.Stride, value=effective_stride)
logging.debug(
f"Inference on image size {image_size} will run "
f"with crop size {effective_crop} and stride {effective_stride}")
# In most cases, we will be able to read the output size from the pre-computed values
# via get_output_size. Only if we have a non-standard (smaller) crop size, re-computed the output size.
output_size = model_config.get_output_size(execution_mode=ModelExecutionMode.TEST)
if effective_crop != model_config.test_crop_size:
output_size = model.get_output_shape(input_shape=effective_crop) # type: ignore
self.pipeline.set_variable(name=InferencePipeline.Variables.OutputSize, value=output_size)
if mask is not None:
self.set_component(component=InferenceBatch.Components.Mask, data=mask)
return self
@action
def pre_process(self) -> InferenceBatch:
"""
Prepare the input components of the batch for further processing.
"""
model_config = self.get_configs()
# fetch the image channels from the batch
image_channels = self.get_component(InferenceBatch.Components.ImageChannels)
crop_size = self.pipeline.get_variable(InferencePipeline.Variables.CropSize)
output_size = self.pipeline.get_variable(InferencePipeline.Variables.OutputSize)
image_channels = image_util.pad_images_for_inference(
images=image_channels,
crop_size=crop_size,
output_size=output_size,
padding_mode=model_config.padding_mode
)
# update the post-processed components
self.set_component(component=InferenceBatch.Components.ImageChannels, data=image_channels)
return self
@action
def predict(self) -> InferenceBatch:
"""
Perform a forward pass of the model on the provided image, this generates
a set of posterior maps for each class, as well as a segmentation output
stored in the respective 'posteriors' and 'segmentation' components.
"""
model_config = self.get_configs()
# extract patches for each image channel: Num patches x Channels x Z x Y x X
patches = self._extract_patches_for_image_channels()
# split the generated patches into batches and perform forward passes
predictions = []
batch_size = model_config.inference_batch_size
for batch_idx in range(0, len(patches), batch_size):
# slice over the batches to prepare batch
batch = torch.tensor(patches[batch_idx: batch_idx + batch_size, ...]).float()
if model_config.use_gpu:
batch = batch.cuda()
# perform the forward pass
batch_predictions = self._model_fn(batch).detach().cpu().numpy()
# collect the predictions over each of the batches
predictions.append(batch_predictions)
# map the batched predictions to the original batch shape
# of shape but with an added class dimension: Num patches x Class x Z x Y x X
predictions = np.concatenate(predictions, axis=0)
# create posterior output for each class with the shape: Class x Z x Y x x. We use float32 as these
# arrays can be big.
output_image_shape = self.pipeline.get_variable(InferencePipeline.Variables.OutputImageShape)
posteriors = np.zeros(shape=[model_config.number_of_classes] + list(output_image_shape), dtype=np.float32)
stride = self.pipeline.get_variable(InferencePipeline.Variables.Stride)
for c in range(len(posteriors)):
# stitch the patches for each posterior class
self.load_from_patches(predictions[:, c, ...], # type: ignore
stride=stride,
scan_shape=output_image_shape,
data_attr=InferenceBatch.Components.Posteriors.value)
# extract computed output from the component so the pipeline buffer can be reused
posteriors[c] = self.get_component(InferenceBatch.Components.Posteriors)
# store the stitched up results for the batch
self.set_component(component=InferenceBatch.Components.Posteriors, data=posteriors)
return self
@action
def post_process(self) -> InferenceBatch:
"""
Perform post processing on the computed outputs of the a single pass of the pipelines.
Currently the following operations are performed:
-------------------------------------------------------------------------------------
1) the mask is applied to the posteriors (if required).
2) the final posteriors are used to perform an argmax to generate a multi-label segmentation.
3) extract the largest foreground connected component in the segmentation if required
"""
mask = self.get_component(InferenceBatch.Components.Mask)
posteriors = self.get_component(InferenceBatch.Components.Posteriors)
if mask is not None:
posteriors = image_util.apply_mask_to_posteriors(posteriors=posteriors, mask=mask)
# create segmentation using an argmax over the posterior probabilities
segmentation = image_util.posteriors_to_segmentation(posteriors)
# update the post-processed posteriors and save the segmentation
self.set_component(component=InferenceBatch.Components.Posteriors, data=posteriors)
self.set_component(component=InferenceBatch.Components.Segmentation, data=segmentation)
return self
def get_configs(self) -> config.SegmentationModelBase:
return self.pipeline.get_variable(InferencePipeline.Variables.ModelConfig)
def get_component(self, component: InferenceBatch.Components) -> np.ndarray:
return getattr(self, component.value) if hasattr(self, component.value) else None
@inbatch_parallel(init='indices', post='_post_custom_components', target='threads')
def set_component(self, batch_idx: int, component: InferenceBatch.Components, data: np.ndarray) \
-> Dict[str, Any]:
logging.debug("Updated data in pipeline component: {}, for batch: {}.".format(component.value, batch_idx))
return {
component.value: {'type': component.value, 'data': data}
}
def _extract_patches_for_image_channels(self) -> np.ndarray:
"""
Extracts deterministically, patches from each image channel
:return: Patches for each image channel in format: Num patches x Channels x Z x Y x X
"""
model_config = self.get_configs()
image_channels = self.get_component(InferenceBatch.Components.ImageChannels)
# There may be cases where the test image is smaller than the test_crop_size. Adjust crop_size
# to always fit into image, and adjust stride accordingly. If test_crop_size is smaller than the
# image, crop and stride will remain unchanged.
crop_size = self.pipeline.get_variable(InferencePipeline.Variables.CropSize)
stride = self.pipeline.get_variable(InferencePipeline.Variables.Stride)
patches = []
for channel_index, channel in enumerate(image_channels):
# set the current image channel component to process
self.set_component(component=InferenceBatch.Components.Images, data=channel)
channel_patches = self.get_patches(patch_shape=crop_size,
stride=stride,
padding=model_config.padding_mode.value,
data_attr=InferenceBatch.Components.Images.value)
logging.debug(
f"Image channel {channel_index}: Tensor with extracted patches has size {channel_patches.shape}")
patches.append(channel_patches)
# reset the images component
self.set_component(component=InferenceBatch.Components.Images, data=[])
return np.stack(patches, axis=1)
def _model_fn(self, patches: torch.Tensor) -> torch.Tensor:
"""
Wrapper function to handle the model forward pass
:param patches: Image patches to be passed to the model in format Patches x Channels x Z x Y x X
:return posteriors: Confidence maps [0,1] for each patch per class
in format: Patches x Channels x Class x Z x Y x X
"""
model = self.pipeline.get_variable(InferencePipeline.Variables.Model)
# Model forward pass returns posteriors
with torch.no_grad():
return model(patches)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_dialog_report_compare_coder_file.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog_reportCompareCoderFile(object):
def setupUi(self, Dialog_reportCompareCoderFile):
Dialog_reportCompareCoderFile.setObjectName("Dialog_reportCompareCoderFile")
Dialog_reportCompareCoderFile.setWindowModality(QtCore.Qt.NonModal)
Dialog_reportCompareCoderFile.resize(989, 580)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog_reportCompareCoderFile)
self.verticalLayout.setContentsMargins(1, 1, 1, 1)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMinimumSize(QtCore.QSize(0, 120))
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 120))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(10, 20, 101, 22))
self.label_2.setObjectName("label_2")
self.comboBox_coders = QtWidgets.QComboBox(self.groupBox)
self.comboBox_coders.setGeometry(QtCore.QRect(112, 20, 211, 28))
self.comboBox_coders.setObjectName("comboBox_coders")
self.label_title = QtWidgets.QLabel(self.groupBox)
self.label_title.setGeometry(QtCore.QRect(10, -2, 291, 22))
self.label_title.setObjectName("label_title")
self.label_matrix = QtWidgets.QLabel(self.groupBox)
self.label_matrix.setGeometry(QtCore.QRect(600, 20, 30, 30))
self.label_matrix.setText("")
self.label_matrix.setObjectName("label_matrix")
self.label_memos = QtWidgets.QLabel(self.groupBox)
self.label_memos.setGeometry(QtCore.QRect(600, 70, 30, 30))
self.label_memos.setText("")
self.label_memos.setObjectName("label_memos")
self.label_selections = QtWidgets.QLabel(self.groupBox)
self.label_selections.setGeometry(QtCore.QRect(330, 20, 611, 28))
self.label_selections.setObjectName("label_selections")
self.pushButton_clear = QtWidgets.QPushButton(self.groupBox)
self.pushButton_clear.setGeometry(QtCore.QRect(50, 60, 32, 32))
self.pushButton_clear.setText("")
self.pushButton_clear.setObjectName("pushButton_clear")
self.pushButton_export_odt = QtWidgets.QPushButton(self.groupBox)
self.pushButton_export_odt.setGeometry(QtCore.QRect(90, 60, 32, 32))
self.pushButton_export_odt.setText("")
self.pushButton_export_odt.setObjectName("pushButton_export_odt")
self.pushButton_run = QtWidgets.QPushButton(self.groupBox)
self.pushButton_run.setGeometry(QtCore.QRect(10, 60, 32, 32))
self.pushButton_run.setText("")
self.pushButton_run.setObjectName("pushButton_run")
self.pushButton_help1 = QtWidgets.QPushButton(self.groupBox)
self.pushButton_help1.setGeometry(QtCore.QRect(130, 60, 32, 32))
self.pushButton_help1.setText("")
self.pushButton_help1.setObjectName("pushButton_help1")
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(self.groupBox_2)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.splitter_vert = QtWidgets.QSplitter(self.splitter)
self.splitter_vert.setOrientation(QtCore.Qt.Vertical)
self.splitter_vert.setObjectName("splitter_vert")
self.listWidget_files = QtWidgets.QListWidget(self.splitter_vert)
self.listWidget_files.setObjectName("listWidget_files")
self.treeWidget = QtWidgets.QTreeWidget(self.splitter_vert)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.headerItem().setText(0, "Code Tree")
self.textEdit = QtWidgets.QTextEdit(self.splitter)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_2)
self.retranslateUi(Dialog_reportCompareCoderFile)
QtCore.QMetaObject.connectSlotsByName(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.setTabOrder(self.comboBox_coders, self.treeWidget)
Dialog_reportCompareCoderFile.setTabOrder(self.treeWidget, self.textEdit)
def retranslateUi(self, Dialog_reportCompareCoderFile):
_translate = QtCore.QCoreApplication.translate
Dialog_reportCompareCoderFile.setWindowTitle(_translate("Dialog_reportCompareCoderFile", "Reports"))
self.label_2.setText(_translate("Dialog_reportCompareCoderFile", "Coders:"))
self.label_title.setToolTip(_translate("Dialog_reportCompareCoderFile", "To compare coding.\n"
"Select two coders, one file, one code."))
self.label_title.setText(_translate("Dialog_reportCompareCoderFile", "Coder comparisons by file"))
self.label_matrix.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Matrix options</p></body></html>"))
self.label_memos.setToolTip(_translate("Dialog_reportCompareCoderFile", "Memo reporting options"))
self.label_selections.setText(_translate("Dialog_reportCompareCoderFile", "Coders selected"))
self.pushButton_clear.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Clear selection</p></body></html>"))
self.pushButton_export_odt.setToolTip(_translate("Dialog_reportCompareCoderFile", "Export ODT file"))
self.pushButton_run.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Run comparison</p></body></html>"))
self.pushButton_help1.setToolTip(_translate("Dialog_reportCompareCoderFile", "Statistics explanation"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog_reportCompareCoderFile = QtWidgets.QDialog()
ui = Ui_Dialog_reportCompareCoderFile()
ui.setupUi(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.show()
sys.exit(app.exec_())
|
import glob
import logging
from importlib import import_module
from os.path import basename, isdir, isfile
from pathlib import Path
from aiogram import Dispatcher
class ModuleManager:
def __init__(self, dp: Dispatcher):
self.dp = dp
self.root = Path(__file__).parent.parent
def load_path(self, path: str):
mod_paths = glob.glob(f"{self.root}/{path}/*.py")
all_modules = [
basename(module)[:-3]
for module in mod_paths
if isfile(module) and module.endswith(".py")
]
for module in all_modules:
self.load(path.replace("/", ".") + f".{module}")
def load(self, module: str):
try:
imp_module = import_module("app." + module)
except ModuleNotFoundError:
logging.error(f"Module <{module}> was not found.")
raise SystemExit()
if not hasattr(imp_module, "setup"):
logging.error(f"Module <{module}> doesn't have <setup>.")
raise SystemExit()
if not callable(imp_module.setup):
logging.error(f"Module <{module}> doesn't have callable <setup>.")
raise SystemExit()
try:
imp_module.setup(self.dp)
except Exception as error:
logging.exception(f"An error occured in <{module}>: {error}")
raise SystemExit()
logging.debug(f"Module <{module}> was loaded.")
return module
def load_all(self, modules: list):
"""
Iterates through modules and loads them.
"""
for module in modules:
# Shortcut for %module%.__init__
if module.startswith("$"):
self.load(f"{module[1:]}.__init__")
elif isdir(f"{self.root}/{module}/"):
self.load_path(module)
else:
self.load(module)
|
import xml.dom.minidom
import sys
# this uses 658 MB
document = xml.dom.minidom.parse(sys.stdin)
sets = []
entities = {}
for group in document.getElementsByTagName('group'):
if (group.getAttribute('name') == 'html5' or group.getAttribute('name') == 'mathml'):
for set in group.getElementsByTagName('set'):
sets.append(set.getAttribute('name'))
for entity in document.getElementsByTagName('entity'):
assert entity.parentNode.tagName == 'character'
assert entity.hasAttribute('set')
set = entity.getAttribute('set')
if (set in sets):
assert entity.hasAttribute('id')
name = entity.getAttribute('id')
assert len(name) > 0
assert entity.parentNode.hasAttribute('id')
value = entity.parentNode.getAttribute('id')
assert name not in entities or entities[name] == value, '(name: ' + name + ' old value: ' + entities[name] + ' new value: ' + value + ')'
if (name not in entities):
entities[name] = value
if ('-' in value):
value1 = value[1:6];
value2 = value[7:];
glyph = '<span data-x="" class="glyph compound">&#x' + value1 + ';&#x' + value2 + ';</span>'
print(' <tr id="entity-' + name + '"> <td> <code data-x="">' + name + ';</code> </td> <td> U+' + value1 + ' U+' + value2 + ' </td> <td> ' + glyph + ' </td> </tr>');
else:
if (value[1:] in ['020DC', '00311', '020DB', '020DB']):
glyph = '<span data-x="" class="glyph composition">◌' + '&#x' + value[1:] + ';</span>'
elif ('00000' < value[1:] < '00020'):
glyph = '<span data-x="" class="glyph control">$' + value[4:] + ';</span>'
else:
glyph = '<span data-x="" class="glyph">&#x' + value[1:] + ';</span>'
print(' <tr id="entity-' + name + '"> <td> <code data-x="">' + name + ';</code> </td> <td> U+' + value[1:] + ' </td> <td> ' + glyph + ' </td> </tr>');
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Spaghetti: Web Server Security Scanner
#
# @url: https://github.com/m4ll0k/Spaghetti
# @author: Momo Outaadi (M4ll0k)
# @license: See the file 'doc/LICENSE'
from lib.net import http
from lib.net import utils
from lib.utils import printer
class ModStatus():
def __init__(self,url,agent,proxy,redirect):
self.url = url
self.printer = printer.Printer()
self.http = http.Http(agent=agent,proxy=proxy,redirect=redirect)
self.checker = utils.Checker()
def Run(self):
info = {
'name' : 'ModStatus',
'author' : 'Momo Outaadi (@M4ll0k)',
'description' : 'Apache mod_status'
}
try:
for x in ['/server-status/', '/server_status/', '/serverstatus/', '/mod-status/', '/mod_status/', '/modstatus', 'status']:
resp = self.http.Send(self.checker.Path(self.url,x))
if resp.status_code == 200 and re.search(r'*Apache Server Status for*',resp.content,re.I):
self.printer.plus('Apache (mod_status) information disclosure at: %s'%resp.url)
break
except Exception,ERROR:
pass
|
from collections import Counter
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix
def __majority(arr):
counter = Counter(arr)
value, _ = counter.most_common(1)[0]
return value
def clustering_accuracy(y_true, y_clustering):
clustering_labels = list(set(y_clustering))
new_labels = np.zeros_like(y_clustering)
for clustering_label in clustering_labels:
locator = y_clustering == clustering_label
locations = np.argwhere(locator)
real_labels = y_true[locations].ravel()
major_label = __majority(real_labels)
new_labels[locator] = major_label
return accuracy_score(y_true, new_labels)
def confusion_matrix_majority(y_true, y_clustering):
clustering_labels = list(set(y_clustering))
new_labels = np.zeros_like(y_clustering)
for clustering_label in clustering_labels:
locator = y_clustering == clustering_label
locations = np.argwhere(locator)
real_labels = y_true[locations].ravel()
major_label = __majority(real_labels)
new_labels[locator] = major_label
return confusion_matrix(y_true, new_labels)
|
# Generated by Django 3.0.5 on 2020-04-14 14:07
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
|
"""Codec for currency property inside an XRPL issued currency amount json."""
from __future__ import annotations # Requires Python 3.7+
from typing import Optional, Type
from typing_extensions import Final
from xrpl.constants import HEX_CURRENCY_REGEX, ISO_CURRENCY_REGEX
from xrpl.core.binarycodec.exceptions import XRPLBinaryCodecException
from xrpl.core.binarycodec.types.hash160 import Hash160
_CURRENCY_CODE_LENGTH: Final[int] = 20 # bytes
def _is_iso_code(value: str) -> bool:
"""Tests if value is a valid 3-char iso code."""
return bool(ISO_CURRENCY_REGEX.fullmatch(value))
def _iso_code_from_hex(value: bytes) -> Optional[str]:
candidate_iso = value.decode("ascii")
if candidate_iso == "XRP":
raise XRPLBinaryCodecException(
"Disallowed currency code: to indicate the currency "
"XRP you must use 20 bytes of 0s"
)
if _is_iso_code(candidate_iso):
return candidate_iso
return None
def _is_hex(value: str) -> bool:
"""Tests if value is a valid 40-char hex string."""
return bool(HEX_CURRENCY_REGEX.fullmatch(value))
def _iso_to_bytes(iso: str) -> bytes:
"""
Convert an ISO code to a 160-bit (20 byte) encoded representation.
See "Currency codes" subheading in
`Amount Fields <https://xrpl.org/serialization.html#amount-fields>`_
"""
if not _is_iso_code(iso):
raise XRPLBinaryCodecException(f"Invalid ISO code: {iso}")
if iso == "XRP":
# This code (160 bit all zeroes) is used to indicate XRP in
# rare cases where a field must specify a currency code for XRP.
return bytes(_CURRENCY_CODE_LENGTH)
iso_bytes = iso.encode("ASCII")
# Currency Codes: https://xrpl.org/currency-formats.html#standard-currency-codes
# 160 total bits:
# 8 bits type code (0x00)
# 88 bits reserved (0's)
# 24 bits ASCII
# 16 bits version (0x00)
# 24 bits reserved (0's)
return bytes(12) + iso_bytes + bytes(5)
class Currency(Hash160):
"""
Codec for serializing and deserializing currency codes in issued currency amounts.
`Amount fields <https://xrpl.org/serialization.html#amount-fields>`_
Attributes:
buffer: The byte encoding of this currency.
_iso: The three-character ISO currency code if standard format, else None.
"""
LENGTH: Final[int] = 20
_iso: Optional[str] = None
def __init__(self: Currency, buffer: Optional[bytes] = None) -> None:
"""Construct a Currency."""
if buffer is not None:
super().__init__(buffer)
else:
super().__init__(bytes(self.LENGTH))
code_bytes = self.buffer[12:15]
# Determine whether this currency code is in standard or nonstandard format:
# https://xrpl.org/currency-formats.html#nonstandard-currency-codes
if self.buffer[0] != 0:
# non-standard currency
self._iso = None
elif self.buffer.hex() == "0" * 40: # all 0s
# the special case for literal XRP
self._iso = "XRP"
else:
self._iso = _iso_code_from_hex(code_bytes)
@classmethod
def from_value(cls: Type[Currency], value: str) -> Currency:
"""
Construct a Currency object from a string representation of a currency.
Args:
value: The string to construct a Currency object from.
Returns:
A Currency object constructed from value.
Raises:
XRPLBinaryCodecException: If the Currency representation is invalid.
"""
if not isinstance(value, str):
raise XRPLBinaryCodecException(
"Invalid type to construct a Currency: expected str,"
f" received {value.__class__.__name__}."
)
if _is_iso_code(value):
return Currency(_iso_to_bytes(value))
if _is_hex(value):
return cls(bytes.fromhex(value))
raise XRPLBinaryCodecException("Unsupported Currency representation: {value}")
def to_json(self: Currency) -> str:
"""
Returns the JSON representation of a currency.
Returns:
The JSON representation of a Currency.
"""
if self._iso is not None:
return self._iso
return self.buffer.hex().upper()
|
#We’ve already seen a few python functions such as print and input, but now we’re going to dive into writing our own functions. To get started, we’ll write a function that takes in a number and squares it:
def square(x):
return x * x
#Notice how we use the def keyword to indicate we’re defining a function, that we’re taking in a single input called x and that we use the return keyword to indicate what the function’s output should be.
#We can then “call” this function just as we’ve called other ones: using parentheses:
for i in range(10):
print(f"The square of {i} is {square(i)}")
""" Output:
The square of 0 is 0
The square of 1 is 1
The square of 2 is 4
The square of 3 is 9
The square of 4 is 16
The square of 5 is 25
The square of 6 is 36
The square of 7 is 49
The square of 8 is 64
The square of 9 is 81
"""
|
class SomeClass(object):
_name = "some.model.name"
|
import logging
import json
from django.shortcuts import render,HttpResponse
from django.http import Http404
from django.views import View
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from . import models
from . import constants
from utils.json_fun import to_json_data
from utils.res_code import Code,error_map
from myblog import settings
# Create your views here.
# 导入日志器
logger = logging.getLogger('django')
# def index(request):
# """
# index page
# :param request:
# :return:
# """
# return render(request,'news/index.html')
# def detail(request):
# return render(request,'news/news_detail.html')
# def search(request):
# return render(request,'news/search.html')
class IndexView(View):
"""
create news view
render tags hot_news
"""
def get(self, request):
"""
create index page view
"""
tags = models.Tag.objects.only('id', 'name').filter(is_delete=False)
hot_news = models.HotNews.objects.select_related('news').only('news__title', 'news__image_url',
'news__id').filter(is_delete=False).order_by(
'priority', '-news__clicks')[0:constants.SHOW_HOTNEWS_COUNT]
context = {
'tags':tags,
'hot_news':hot_news,
'navId' : 0
}
navId = 0
return render(request, 'news/index.html', locals())
#1.创建类视图
#2.校验参数
#3.从数据库中查询新闻列表数据
#4.序列化数据
#5.返回给前端
class NewsListView(View):
"""
create news list view
route :/news/
"""
def get(self, request):
print(request)
try:
tag_id = int(request.GET.get('tag_id', 0))
except Exception as e:
logger.error("标签错误:\n{}".format(e))
tag_id = 0
try:
page = int(request.GET.get('page', 1))
except Exception as e:
logger.error("当前页数错误:\n{}".format(e))
page = 1
news_queryset = models.News.objects.select_related('tag', 'author'). \
only('id','title', 'digest', 'image_url', 'update_time', 'tag__name', 'author__username')
# if models.Tag.objects.only('id').filter(is_delete=False, id=tag_id).exists():
# news = news_queryset.filter(is_delete=False, tag_id=tag_id)
# else:
# news = news_queryset.filter(is_delete=False)
news = news_queryset.filter(is_delete=False, tag_id=tag_id) or \
news_queryset.filter(is_delete=False)
paginator = Paginator(news, constants.PER_PAGE_NEWS_COUNT)
try:
news_info = paginator.page(page)
except EmptyPage:
# 若用户访问的页数大于实际页数,则返回最后一页数据
logging.info("用户访问的页数大于总页数。")
news_info = paginator.page(paginator.num_pages)
# 4.序列化输出
news_info_list = []
for n in news_info:
news_info_list.append({
'id': n.id,
'title': n.title,
'digest': n.digest,
'image_url': n.image_url,
'tag_name': n.tag.name,
'author': n.author.username,
'update_time': n.update_time.strftime('%Y年%m月%d日 %H:%M'),
})
# 5.创建返回给前端的数据
data = {
'total_pages': paginator.num_pages,
'news': news_info_list
}
# print(data)
return to_json_data(data=data)
class NewsBanner(View):
"""
create news banner model
router:/news/banners/
"""
def get(self, request):
banners = models.Banner.objects.select_related('news').only('image_url', 'news__id', 'news__title').\
filter(is_delete=False)[0:constants.SHOW_BANNER_COUNT]
# 序列化输出
banners_info_list = []
for b in banners:
banners_info_list.append({
'image_url': b.image_url,
'news_id': b.news.id,
'news_title': b.news.title,
})
# 创建返回给前端的数据
data = {
'banners': banners_info_list
}
return to_json_data(data=data)
class NewsDetailView(View):
"""
create news detail view
router:/news/<int:news_id>/
"""
# /* 为文章内容添加样式 */
# 在templates/news1/news_detail.html文件中需要添加如下内容:
# .news-content p {
# font-size: 16px;
# line-height: 26px;
# text-align: justify;
# word-wrap: break-word;
# padding: 3px 0
# }
def get(self, request, news_id):
news = models.News.objects.select_related('tag', 'author'). \
only('title', 'content', 'update_time', 'tag__name', 'author__username').\
filter(is_delete=False, id=news_id).first()
if news:
comments = models.Comments.objects.select_related('author', 'parents').\
only('content', 'author__username', 'update_time',
'parents__author__username', 'parents__content', 'parents__update_time').\
filter(is_delete=False, news_id=news_id)
# 序列化输出
comments_list = []
# 迭代之后开始去数据库查
for comm in comments:
comments_list.append(comm.to_dict_data())
comments_count = len(comments_list)
return render(request, 'news/news_detail.html', locals())
else:
raise Http404("<新闻{}>不存在😢".format(news_id))
# return Http404('<h1>Page not found</h1>')
#return HttpResponseNotFound('<h1>Page not found</h1>')
class NewsCommentView(View):
"""
create newscomments detail view
router:news/<int:news_id>/comments/
"""
# print('2222')
def post(self, request, news_id):
# print('111111', request)
if not request.user.is_authenticated:
return to_json_data(errno=Code.SESSIONERR, errmsg=error_map[Code.SESSIONERR])
if not models.News.objects.only('id').filter(is_delete=False, id=news_id).exists():
return to_json_data(errno=Code.PARAMERR, errmsg="新闻不存在!")
# 从前端获取参数
try:
json_data = request.body
# print('111111',json_data)
if not json_data:
return to_json_data(errno=Code.PARAMERR, errmsg="参数为空,请重新输入!")
# 将json转化为dict
dict_data = json.loads(json_data.decode('utf8'))
except Exception as e:
logger.info('错误信息:\n{}'.format(e))
return to_json_data(errno=Code.UNKOWNERR,errmsg=error_map[Code.UNKOWNERR])
content = dict_data.get('content')
if not content:
return to_json_data(errno=Code.PARAMERR, errmsg="评论内容不能为空!")
parents_id = dict_data.get('parents_id')
try:
if parents_id:
parent_id = int(parents_id)
if not models.Comments.objects.only('id'). \
filter(is_delete=False, id=parents_id, news_id=news_id).exists():
return to_json_data(errno=Code.PARAMERR, errmsg=error_map[Code.PARAMERR])
except Exception as e:
logging.info("前端传过来的parents_id异常:\n{}".format(e))
return to_json_data(errno=Code.PARAMERR, errmsg="未知异常")
# 保存到数据库
new_content = models.Comments()
new_content.content = content
new_content.news_id = news_id
new_content.author = request.user
new_content.parents_id = parents_id if parents_id else None
new_content.save()
return to_json_data(data=new_content.to_dict_data())
from haystack.views import SearchView as _SearchView
class SearchView(_SearchView):
# 模版文件
template = 'news/search.html'
# 重写响应方式,如果请求参数q为空,返回模型News的热门新闻数据,否则根据参数q搜索相关数据
def create_response(self):
kw = self.request.GET.get('q', '')
if not kw:
show_all = True
hot_news = models.HotNews.objects.select_related('news'). \
only('news__title', 'news__image_url', 'news__id'). \
filter(is_delete=False).order_by('priority', '-news__clicks')
paginator = Paginator(hot_news, settings.HAYSTACK_SEARCH_RESULTS_PER_PAGE)
try:
page = paginator.page(int(self.request.GET.get('page', 1)))
except PageNotAnInteger:
# 如果参数page的数据类型不是整型,则返回第一页数据
page = paginator.page(1)
except EmptyPage:
# 用户访问的页数大于实际页数,则返回最后一页的数据
page = paginator.page(paginator.num_pages)
navId = 3
return render(self.request, self.template, locals())
else:
show_all = False
qs = super(SearchView, self).create_response()
return qs
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ContributedFeature(Model):
"""ContributedFeature.
:param _links: Named links describing the feature
:type _links: :class:`ReferenceLinks <feature-management.v4_0.models.ReferenceLinks>`
:param default_state: If true, the feature is enabled unless overridden at some scope
:type default_state: bool
:param default_value_rules: Rules for setting the default value if not specified by any setting/scope. Evaluated in order until a rule returns an Enabled or Disabled state (not Undefined)
:type default_value_rules: list of :class:`ContributedFeatureValueRule <feature-management.v4_0.models.ContributedFeatureValueRule>`
:param description: The description of the feature
:type description: str
:param id: The full contribution id of the feature
:type id: str
:param name: The friendly name of the feature
:type name: str
:param override_rules: Rules for overriding a feature value. These rules are run before explicit user/host state values are checked. They are evaluated in order until a rule returns an Enabled or Disabled state (not Undefined)
:type override_rules: list of :class:`ContributedFeatureValueRule <feature-management.v4_0.models.ContributedFeatureValueRule>`
:param scopes: The scopes/levels at which settings can set the enabled/disabled state of this feature
:type scopes: list of :class:`ContributedFeatureSettingScope <feature-management.v4_0.models.ContributedFeatureSettingScope>`
:param service_instance_type: The service instance id of the service that owns this feature
:type service_instance_type: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'default_state': {'key': 'defaultState', 'type': 'bool'},
'default_value_rules': {'key': 'defaultValueRules', 'type': '[ContributedFeatureValueRule]'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'override_rules': {'key': 'overrideRules', 'type': '[ContributedFeatureValueRule]'},
'scopes': {'key': 'scopes', 'type': '[ContributedFeatureSettingScope]'},
'service_instance_type': {'key': 'serviceInstanceType', 'type': 'str'}
}
def __init__(self, _links=None, default_state=None, default_value_rules=None, description=None, id=None, name=None, override_rules=None, scopes=None, service_instance_type=None):
super(ContributedFeature, self).__init__()
self._links = _links
self.default_state = default_state
self.default_value_rules = default_value_rules
self.description = description
self.id = id
self.name = name
self.override_rules = override_rules
self.scopes = scopes
self.service_instance_type = service_instance_type
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print("\nData: \n\n", data)
print("\nType of data: \n\n", type(data))
census = np.concatenate((data, new_record), axis=0)
print(census)
#Code starts here
# --------------
#Code starts here
age=census[:,0]
print(age)
max_age = np.max(age)
min_age = np.min(age)
age_mean = np.mean(age)
age_std = np.std(age)
print("max of age : ", max_age)
print("min of age : ", min_age)
print("mean of age : ", age_mean)
print("standard deviation of age : ", age_std)
# --------------
#Code starts here
race_0 = census[census[:,2] == 0]
race_1 = census[census[:,2] == 1]
race_2 = census[census[:,2] == 2]
race_3 = census[census[:,2] == 3]
race_4 = census[census[:,2] == 4]
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
minority_race = 3
print(race_0)
# --------------
#Code starts here
senior_citizens = census[census[:, 0] > 60]
working_hours = senior_citizens[:,6]
working_hours_sum = working_hours.sum()
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum / senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high = census[census[:,1] > 10]
low = census[census[:,1] <= 10]
avg_pay_high = high[:, 7].mean()
avg_pay_low = low[:, 7].mean()
if avg_pay_high > avg_pay_low:
print("Better education leads to better pay")
else:
print("Better education does not lead to better pay")
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
from Configuration.Eras.Era_Run2_2016_cff import Run2_2016
process = cms.Process('RECO2',Run2_2016)
options = VarParsing.VarParsing('analysis')
options.register('globalTag',
"auto:run2_mc", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"input file name")
options.parseArguments()
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
secondaryFileNames = cms.untracked.vstring()
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
process.options = cms.untracked.PSet()
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step1 nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string(''),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('step1_RECO.root'),
outputCommands = process.RECOSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
process.RECOSIMoutput.outputCommands = cms.untracked.vstring("keep *_myRefittedTracks_*_*")
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
import RecoTracker.TrackProducer.trackProducerFromPatMuons_cfi
process.tracksFromMuons = RecoTracker.TrackProducer.trackProducerFromPatMuons_cfi.trackProducerFromPatMuons.clone(
src = "slimmedMuons",
innerTrackOnly = True
)
import RecoTracker.TrackProducer.TrackRefitter_cfi
process.myRefittedTracks = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone(
src = 'tracksFromMuons',
NavigationSchool = '',
Fitter = 'FlexibleKFFittingSmoother'
)
# Path and EndPath definitions
process.reconstruction_step = cms.Path(process.tracksFromMuons*process.myRefittedTracks)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.reconstruction_step,process.endjob_step,process.RECOSIMoutput_step)
|
__author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4']
rv.EDGES = {'base': {'z1': 20, 'z2': 50, 'z3': 20, 'z4': 50}, 'z1': {'base': 20, 'z2': 30, 'z4': 50}, 'z2': {'base': 50, 'z1': 30, 'z3': 30}, 'z3': {'base': 20, 'z2': 30, 'z4': 30}, 'z4': {'base': 50, 'z3': 30, 'z1': 50}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 80, 'r1': 50, 'r2': 50}
state.data = { 'UAV': 1, 'r1': 3, 'r2': 3}
state.pos = {'c1': 'base', 'e1': 'base', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base', 'o1': 'UAV'}
state.load = {'r1': NIL, 'r2': NIL, 'UAV': 'o1'}
state.storm = {'active': False}
tasks = {
5: [['doActivities', 'UAV', [['survey', 'z2'], ['survey', 'z4'], ['survey', 'base']]]],
}
eventsEnv = {
}
|
from . import util
from .source import util as source_util
import gc
import decimal
import imp
import importlib
import sys
import timeit
def bench_cache(import_, repeat, number):
"""Measure the time it takes to pull from sys.modules."""
name = '<benchmark import>'
with util.uncache(name):
module = imp.new_module(name)
sys.modules[name] = module
runs = []
for x in range(repeat):
start_time = timeit.default_timer()
for y in range(number):
import_(name)
end_time = timeit.default_timer()
runs.append(end_time - start_time)
return min(runs)
def bench_importing_source(import_, repeat, number, loc=100000):
"""Measure importing source from disk.
For worst-case scenario, the line endings are \\r\\n and thus require
universal newline translation.
"""
name = '__benchmark'
with source_util.create_modules(name) as mapping:
with open(mapping[name], 'w') as file:
for x in range(loc):
file.write("{0}\r\n".format(x))
with util.import_state(path=[mapping['.root']]):
runs = []
for x in range(repeat):
start_time = timeit.default_timer()
for y in range(number):
try:
import_(name)
finally:
del sys.modules[name]
end_time = timeit.default_timer()
runs.append(end_time - start_time)
return min(runs)
def main(import_):
args = [('sys.modules', bench_cache, 5, 500000),
('source', bench_importing_source, 5, 10000)]
test_msg = "{test}, {number} times (best of {repeat}):"
result_msg = "{result:.2f} secs"
gc.disable()
try:
for name, meth, repeat, number in args:
result = meth(import_, repeat, number)
print(test_msg.format(test=name, repeat=repeat,
number=number).ljust(40),
result_msg.format(result=result).rjust(10))
finally:
gc.enable()
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('-b', '--builtin', dest='builtin', action='store_true',
default=False, help="use the built-in __import__")
options, args = parser.parse_args()
if args:
raise RuntimeError("unrecognized args: {0}".format(args))
import_ = __import__
if not options.builtin:
import_ = importlib.__import__
main(import_)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/tensor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import resource_handle_pb2 as tensorflow_dot_core_dot_framework_dot_resource__handle__pb2
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/tensor.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n&tensorflow/core/framework/tensor.proto\x12\ntensorflow\x1a/tensorflow/core/framework/resource_handle.proto\x1a,tensorflow/core/framework/tensor_shape.proto\x1a%tensorflow/core/framework/types.proto\"\xdc\x03\n\x0bTensorProto\x12#\n\x05\x64type\x18\x01 \x01(\x0e\x32\x14.tensorflow.DataType\x12\x32\n\x0ctensor_shape\x18\x02 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\x16\n\x0eversion_number\x18\x03 \x01(\x05\x12\x16\n\x0etensor_content\x18\x04 \x01(\x0c\x12\x14\n\x08half_val\x18\r \x03(\x05\x42\x02\x10\x01\x12\x15\n\tfloat_val\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x16\n\ndouble_val\x18\x06 \x03(\x01\x42\x02\x10\x01\x12\x13\n\x07int_val\x18\x07 \x03(\x05\x42\x02\x10\x01\x12\x12\n\nstring_val\x18\x08 \x03(\x0c\x12\x18\n\x0cscomplex_val\x18\t \x03(\x02\x42\x02\x10\x01\x12\x15\n\tint64_val\x18\n \x03(\x03\x42\x02\x10\x01\x12\x14\n\x08\x62ool_val\x18\x0b \x03(\x08\x42\x02\x10\x01\x12\x18\n\x0c\x64\x63omplex_val\x18\x0c \x03(\x01\x42\x02\x10\x01\x12<\n\x13resource_handle_val\x18\x0e \x03(\x0b\x32\x1f.tensorflow.ResourceHandleProto\x12\x37\n\x0bvariant_val\x18\x0f \x03(\x0b\x32\".tensorflow.VariantTensorDataProto\"g\n\x16VariantTensorDataProto\x12\x11\n\ttype_name\x18\x01 \x01(\t\x12\x10\n\x08metadata\x18\x02 \x01(\x0c\x12(\n\x07tensors\x18\x03 \x03(\x0b\x32\x17.tensorflow.TensorProtoB-\n\x18org.tensorflow.frameworkB\x0cTensorProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_resource__handle__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TENSORPROTO = _descriptor.Descriptor(
name='TensorProto',
full_name='tensorflow.TensorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dtype', full_name='tensorflow.TensorProto.dtype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_shape', full_name='tensorflow.TensorProto.tensor_shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version_number', full_name='tensorflow.TensorProto.version_number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_content', full_name='tensorflow.TensorProto.tensor_content', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='half_val', full_name='tensorflow.TensorProto.half_val', index=4,
number=13, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='float_val', full_name='tensorflow.TensorProto.float_val', index=5,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='double_val', full_name='tensorflow.TensorProto.double_val', index=6,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='int_val', full_name='tensorflow.TensorProto.int_val', index=7,
number=7, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='string_val', full_name='tensorflow.TensorProto.string_val', index=8,
number=8, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scomplex_val', full_name='tensorflow.TensorProto.scomplex_val', index=9,
number=9, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='int64_val', full_name='tensorflow.TensorProto.int64_val', index=10,
number=10, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='bool_val', full_name='tensorflow.TensorProto.bool_val', index=11,
number=11, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='dcomplex_val', full_name='tensorflow.TensorProto.dcomplex_val', index=12,
number=12, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='resource_handle_val', full_name='tensorflow.TensorProto.resource_handle_val', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='variant_val', full_name='tensorflow.TensorProto.variant_val', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=189,
serialized_end=665,
)
_VARIANTTENSORDATAPROTO = _descriptor.Descriptor(
name='VariantTensorDataProto',
full_name='tensorflow.VariantTensorDataProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type_name', full_name='tensorflow.VariantTensorDataProto.type_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metadata', full_name='tensorflow.VariantTensorDataProto.metadata', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensors', full_name='tensorflow.VariantTensorDataProto.tensors', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=667,
serialized_end=770,
)
_TENSORPROTO.fields_by_name['dtype'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_TENSORPROTO.fields_by_name['tensor_shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_TENSORPROTO.fields_by_name['resource_handle_val'].message_type = tensorflow_dot_core_dot_framework_dot_resource__handle__pb2._RESOURCEHANDLEPROTO
_TENSORPROTO.fields_by_name['variant_val'].message_type = _VARIANTTENSORDATAPROTO
_VARIANTTENSORDATAPROTO.fields_by_name['tensors'].message_type = _TENSORPROTO
DESCRIPTOR.message_types_by_name['TensorProto'] = _TENSORPROTO
DESCRIPTOR.message_types_by_name['VariantTensorDataProto'] = _VARIANTTENSORDATAPROTO
TensorProto = _reflection.GeneratedProtocolMessageType('TensorProto', (_message.Message,), dict(
DESCRIPTOR = _TENSORPROTO,
__module__ = 'tensorflow.core.framework.tensor_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorProto)
))
_sym_db.RegisterMessage(TensorProto)
VariantTensorDataProto = _reflection.GeneratedProtocolMessageType('VariantTensorDataProto', (_message.Message,), dict(
DESCRIPTOR = _VARIANTTENSORDATAPROTO,
__module__ = 'tensorflow.core.framework.tensor_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.VariantTensorDataProto)
))
_sym_db.RegisterMessage(VariantTensorDataProto)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\014TensorProtosP\001\370\001\001'))
_TENSORPROTO.fields_by_name['half_val'].has_options = True
_TENSORPROTO.fields_by_name['half_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['float_val'].has_options = True
_TENSORPROTO.fields_by_name['float_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['double_val'].has_options = True
_TENSORPROTO.fields_by_name['double_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['int_val'].has_options = True
_TENSORPROTO.fields_by_name['int_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['scomplex_val'].has_options = True
_TENSORPROTO.fields_by_name['scomplex_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['int64_val'].has_options = True
_TENSORPROTO.fields_by_name['int64_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['bool_val'].has_options = True
_TENSORPROTO.fields_by_name['bool_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['dcomplex_val'].has_options = True
_TENSORPROTO.fields_by_name['dcomplex_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.