text
stringlengths 2
999k
|
|---|
"""
Tests for functions in class SolveDiffusion2D
"""
import numpy as np
#import pytest
from diffusion2d import SolveDiffusion2D
from unittest import TestCase
class TestOperations(TestCase):
"""
Test suite for mathematical operations functions.
"""
def setUp(self):
# Fixture
self.w = 12.
self.h = 20.
self.dx = 0.4
self.dy = 0.2
self.D = 0.5
self.T_cold = 300.
self.T_hot = 700.
def test_initialize_domain(self):
"""
Check function SolveDiffusion2D.initialize_domain
"""
solver = SolveDiffusion2D()
expected_nx = 30 #int(self.w / self.dx)
expected_ny = 100 #int(self.h / self.dy)
solver.initialize_domain(self.w,self.h,self.dx,self.dy)
self.assertEqual(solver.nx, expected_nx)
self.assertEqual(solver.ny, expected_ny)
def test_initialize_physical_parameters(self):
"""
Checks function SolveDiffusion2D.initialize_domain
"""
solver = SolveDiffusion2D()
solver.dx = self.dx
solver.dy = self.dy
#dx**2 * dy**2 / (2 * d * (dx**2 + dy**2))
expected_dt = 0.032
solver.initialize_physical_parameters(self.D)
self.assertAlmostEqual(solver.dt, expected_dt, 6)
def test_get_initial_condition(self):
"""
Checks function SolveDiffusion2D.get_initial_function
"""
solver = SolveDiffusion2D()
solver.T_cold = self.T_cold
solver.T_hot = self.T_hot
solver.initialize_domain(self.w,self.h,self.dx,self.dy)
expected_u = self.T_cold * np.ones((solver.nx, solver.ny))
# Initial conditions - circle of radius r centred at (cx,cy) (mm)
r, cx, cy = 2, 5, 5
r2 = r ** 2
for i in range(solver.nx):
for j in range(solver.ny):
p2 = (i * solver.dx - cx) ** 2 + (j * solver.dy - cy) ** 2
if p2 < r2:
expected_u[i, j] = self.T_hot
actual_u = solver.get_initial_condition()
for i in range(solver.nx):
for j in range(solver.ny):
self.assertEqual(actual_u[i,j], expected_u[i,j])
# def test_initialize_domain():
# """
# Check function SolveDiffusion2D.initialize_domain
# """
# solver = SolveDiffusion2D()
#
# w = 12.
# h = 20.
# dx = 0.4
# dy = 0.2
# expected_nx = 30 #int(w / dx)
# expected_ny = 100 #int(h / dy)
#
# solver.initialize_domain(w,h,dx,dy)
#
# assert solver.nx == expected_nx
# assert solver.ny == expected_ny
#
# def test_initialize_physical_parameters():
# """
# Checks function SolveDiffusion2D.initialize_domain
# """
# solver = SolveDiffusion2D()
# solver.dx = 0.2
# solver.dy = 0.4
# d=5.
#
# #dx**2 * dy**2 / (2 * d * (dx**2 + dy**2))
# expected_dt = pytest.approx(0.0032, abs=0.000001)
#
# solver.initialize_physical_parameters(d)
#
# assert solver.dt == expected_dt
#
# def test_get_initial_condition():
# """
# Checks function SolveDiffusion2D.get_initial_function
# """
# solver = SolveDiffusion2D()
# solver.T_cold = 300.
# solver.T_hot = 700.
# solver.dx = 0.1
# solver.dy = 0.2
# solver.nx = 100
# solver.ny = 50
#
# expected_u = solver.T_cold * np.ones((solver.nx, solver.ny))
#
# # Initial conditions - circle of radius r centred at (cx,cy) (mm)
# r, cx, cy = 2, 5, 5
# r2 = r ** 2
# for i in range(solver.nx):
# for j in range(solver.ny):
# p2 = (i * solver.dx - cx) ** 2 + (j * solver.dy - cy) ** 2
# if p2 < r2:
# expected_u[i, j] = solver.T_hot
#
# actual_u = solver.get_initial_condition()
#
# assert np.all(actual_u == expected_u)
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Access AO integrals
Mole.intor and Mole.intor_by_shell functions can generate AO integrals.
Calling Mole.intor with the integral function name returns a integral matrix
for all basis functions defined in Mole. If the integral operator has many
compenents eg gradients, keyword argument comp=* needs to be specified to
tell the function how many components the integrals have.
Mole.intor_by_shell function generates the integrals for the given shell
indices. Keyword argument comp=* is also required when the integral operator
has multiple components.
See pyscf/gto/moleintor.py file for the complete list of supported integrals.
'''
import numpy
from pyscf import gto, scf
mol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
mf = scf.RHF(mol)
mf.kernel()
dm = mf.make_rdm1()
# Overlap, kinetic, nuclear attraction
s = mol.intor('cint1e_ovlp_sph')
t = mol.intor('cint1e_kin_sph')
v = mol.intor('cint1e_nuc_sph')
# Overlap, kinetic, nuclear attraction gradients (against electron coordinates)
s1 = mol.intor('cint1e_ipovlp_sph', comp=3)
t1 = mol.intor('cint1e_ipkin_sph' , comp=3)
v1 = mol.intor('cint1e_ipnuc_sph' , comp=3)
print('Dipole %s' % numpy.einsum('xij,ij->x',
mol.intor('cint1e_r_sph', comp=3), dm))
#
# AO overlap between two molecules
#
mol1 = gto.M(
verbose = 0,
atom = 'H 0 1 0; H 1 0 0',
basis = 'ccpvdz'
)
s = gto.intor_cross('cint1e_ovlp_sph', mol, mol1)
print('overlap shape (%d, %d)' % s.shape)
#
# 2e integrals. Keyword aosym is to specify the permutation symmetry in the
# AO integral matrix. s8 means 8-fold symmetry, s2kl means 2-fold symmetry
# for the symmetry between kl in (ij|kl)
#
eri = mol.intor('cint2e_sph', aosym='s8')
#
# 2e gradient integrals on first atom only
#
eri = mol.intor('cint2e_ip1_sph', aosym='s2kl')
#
# 2e integral gradients on certain atom
#
atm_id = 1 # second atom
bas_start, bas_end, ao_start, ao_end = mol.aoslice_by_atom()[atm_id]
tot_bra = ao_end - ao_start
nao = mol.nao_nr()
eri1 = numpy.empty((3,tot_bra,nao,nao,nao))
pi = 0
for i in range(mol.nbas):
if mol.bas_atom(i) == atm_id:
pj = 0
for j in range(mol.nbas):
pk = 0
for k in range(mol.nbas):
pl = 0
for l in range(mol.nbas):
shls = (i, j, k, l)
buf = mol.intor_by_shell('cint2e_ip1_sph', shls, comp=3)
di, dj, dk, dl = buf.shape[1:]
eri1[:,pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
print('integral shape %s' % str(eri1.shape))
#
# Generate a sub-block of AO integrals. The sub-block (ij|kl) contains the
# shells 2:5 for basis i, 0:2 for j, 0:4 for k and 1:3 for l
#
sub_eri = mol.intor('int2e_sph', shls_slice=(2,5,0,2,0,4,1,3))
# This statement is equivalent to
dims = []
for i in range(mol.nbas):
l = mol.bas_angular(i)
nc = mol.bas_nctr(i)
dims.append((l * 2 + 1) * nc)
nao_i = sum(dims[2:5])
nao_j = sum(dims[0:2])
nao_k = sum(dims[0:4])
nao_l = sum(dims[1:3])
sub_eri = numpy.empty((nao_i,nao_j,nao_k,nao_l))
pi = 0
for i in range(2,5):
pj = 0
for j in range(0,2):
pk = 0
for k in range(0,4):
pl = 0
for l in range(1,3):
shls = (i, j, k, l)
buf = mol.intor_by_shell('int2e_sph', shls)
di, dj, dk, dl = buf.shape
sub_eri[pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
sub_eri = sub_eri.reshape(nao_i*nao_j,nao_k*nao_l)
#
# Generate all AO integrals for a sub-system.
#
mol = gto.M(atom=[['H', 0,0,i] for i in range(10)])
atom_idx = [0,2,4] # The disjoint atoms
sub_mol = mol.copy()
sub_mol._bas = mol._bas[atom_idx]
sub_eri = sub_mol.intor('int2e_sph', aosym='s1')
# This statement is equivalent to
sub_nao = 0
for i in range(mol.nbas):
if mol.bas_atom(i) in atom_idx:
l = mol.bas_angular(i)
nc = mol.bas_nctr(i)
sub_nao += (l * 2 + 1) * nc
sub_eri = numpy.empty((sub_nao,sub_nao,sub_nao,sub_nao))
pi = 0
for i in range(mol.nbas):
if mol.bas_atom(i) in atom_idx:
pj = 0
for j in range(mol.nbas):
if mol.bas_atom(j) in atom_idx:
pk = 0
for k in range(mol.nbas):
if mol.bas_atom(k) in atom_idx:
pl = 0
for l in range(mol.nbas):
if mol.bas_atom(l) in atom_idx:
shls = (i, j, k, l)
buf = mol.intor_by_shell('int2e_sph', shls)
di, dj, dk, dl = buf.shape
sub_eri[pi:pi+di,pj:pj+dj,pk:pk+dk,pl:pl+dl] = buf
pl += dl
pk += dk
pj += dj
pi += di
sub_eri = sub_eri.reshape(sub_nao**2,sub_nao**2)
|
from setuptools import setup, find_packages
from distutils.extension import Extension
import numpy as np
import cython_gsl
import versioneer
def read_requirements():
import os
path = os.path.dirname(os.path.abspath(__file__))
requirements_file = os.path.join(path, 'requirements.txt')
try:
with open(requirements_file, 'r') as req_fp:
requires = req_fp.read().split()
except IOError:
return []
else:
return [require.split() for require in requires]
setup(name='plume',
version=versioneer.get_version(),
description='A hypopycnal sediment-carrying plume entering the ocean',
author='Eric Hutton',
author_email='huttone@colorado.edu',
url='http://csdms.colorado.edu',
install_requires=read_requirements(),
setup_requires=['setuptools', ],
packages=find_packages(),
include_dirs = [np.get_include(), cython_gsl.get_include()],
entry_points={
'console_scripts': [
'plume=plume.cli:main',
],
},
ext_modules = [
Extension('plume.ext.centerline',
['plume/ext/centerline.pyx'],
extra_compile_args=['-O3'],
libraries=cython_gsl.get_libraries(),
library_dirs=[cython_gsl.get_library_dir()],
include_dirs=[cython_gsl.get_cython_include_dir()])],
cmdclass=versioneer.get_cmdclass(),
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-17 03:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SemesterModules',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('module_tile', models.CharField(max_length=256, verbose_name='Title')),
('module_icon', models.CharField(max_length=128, verbose_name='Font Awesome Icon')),
('module_description', models.TextField(max_length=1024, verbose_name='Description')),
],
),
]
|
import pickle
# =============================================================================
# EL MÉTODO __str__ NO PERMITE IMPRIMIR LA INFO DEL OBJETO COMO STRING, YA QUE
# DE LO CONTRARIO EL MÉTODO showp() MOSTRARÍA LOS OBJETOS CREADOS EN MEMORIA
# PERO NO SU INFO: (<__main__.People object at 0x00000218F088B9C8>)
# =============================================================================
class Person:
def __init__(self , name , nac , age):
self.name = name
self.nac = nac
self.age = age
print("\nIt's been created:" , self.name)
def __str__(self):
return "{} {} {}".format(self.name , self.nac , self.age)
# =============================================================================
# LA CLASE NO TENÍA EL __init__ PERO SI SU CONTENIDO, LO AGREGUÉ PARA TENER
# CLARO QUE LO QUE SE ESTABA HACIENDO ERA CREAR UNA "PROPIEDAD" DEL OBJETO
# PEOPLELIST QUE CONSISTE EN UNA LISTA Y QUE POR LO TANTO COMO CUALQUIER OTRA
# PROPIEDAD DEBE SER LLAMADA EXPRESAMENTE PARA PODERLA UTILIZAR/MODIFICAR
# =============================================================================
class Peoplelist:
def __init__(self):
self.persons = []
def addp(self , p):
self.persons.append(p)
def showp(self):
for i in self.persons:
print(i)
# =============================================================================
x = input("Would you like to add (a) or read (r)?: \n>> ")
while True:
if x == "q":
print("\t--Process finished by user--")
del x
break
if x== "r":
try:
with open("People_info" , "rb") as pickledfile:
unpickled = pickle.load(pickledfile)
for i in unpickled:
print(i)
del unpickled
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
except:
print("\t--File doesn't exist, you should create one first--")
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
elif x== "a":
lst = Peoplelist()
p = Person(input("Name: "), input("Country: "), int(input("Age: ")))
try:
with open("People_info" , "rb") as reading2update:
lst.persons = pickle.load(reading2update)
lst.addp(p)
except:
lst.addp(p)
finally:
lst.showp()
with open("People_info" , "wb") as file:
pickle.dump(lst.persons, file)
# del lst
print("Pickling process succesfully finished")
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
else:
print("\t--You must select a valid option (a, r or q--")
x = input("Would you like to add (a), read (r) or quit (q)?: \n>> ")
print("\n** THIS IS THE END **")
|
# Trie Tree Node
from typing import Optional
class TrieNode:
def __init__(self, char: Optional[str] = None):
self.char = char
self.children = []
self.counter = 0
self.end = False
def add(self, word: str):
node = self
for char in word:
found_in_children = False
for child in node.children:
if child.char == char:
found_in_children = True
child.counter += 1
node = child
break
if not found_in_children:
new_node = TrieNode(char)
node.children.append(new_node)
node = new_node
node.end = True
|
from moto.ec2.utils import add_tag_specification
from ._base_response import EC2BaseResponse
class ElasticIPAddresses(EC2BaseResponse):
def allocate_address(self):
domain = self._get_param("Domain", if_none="standard")
reallocate_address = self._get_param("Address", if_none=None)
tags = self._get_multi_param("TagSpecification")
tags = add_tag_specification(tags)
if self.is_not_dryrun("AllocateAddress"):
if reallocate_address:
address = self.ec2_backend.allocate_address(
domain, address=reallocate_address, tags=tags
)
else:
address = self.ec2_backend.allocate_address(domain, tags=tags)
template = self.response_template(ALLOCATE_ADDRESS_RESPONSE)
return template.render(address=address)
def associate_address(self):
instance = eni = None
if "InstanceId" in self.querystring:
instance = self.ec2_backend.get_instance(self._get_param("InstanceId"))
elif "NetworkInterfaceId" in self.querystring:
eni = self.ec2_backend.get_network_interface(
self._get_param("NetworkInterfaceId")
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect InstanceId/NetworkId parameter.",
)
reassociate = False
if "AllowReassociation" in self.querystring:
reassociate = self._get_param("AllowReassociation") == "true"
if self.is_not_dryrun("AssociateAddress"):
if instance or eni:
if "PublicIp" in self.querystring:
eip = self.ec2_backend.associate_address(
instance=instance,
eni=eni,
address=self._get_param("PublicIp"),
reassociate=reassociate,
)
elif "AllocationId" in self.querystring:
eip = self.ec2_backend.associate_address(
instance=instance,
eni=eni,
allocation_id=self._get_param("AllocationId"),
reassociate=reassociate,
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect PublicIp/AllocationId parameter.",
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect either instance or ENI.",
)
template = self.response_template(ASSOCIATE_ADDRESS_RESPONSE)
return template.render(address=eip)
def describe_addresses(self):
self.error_on_dryrun()
allocation_ids = self._get_multi_param("AllocationId")
public_ips = self._get_multi_param("PublicIp")
filters = self._filters_from_querystring()
addresses = self.ec2_backend.describe_addresses(
allocation_ids, public_ips, filters
)
template = self.response_template(DESCRIBE_ADDRESS_RESPONSE)
return template.render(addresses=addresses)
def disassociate_address(self):
if self.is_not_dryrun("DisAssociateAddress"):
if "PublicIp" in self.querystring:
self.ec2_backend.disassociate_address(
address=self._get_param("PublicIp")
)
elif "AssociationId" in self.querystring:
self.ec2_backend.disassociate_address(
association_id=self._get_param("AssociationId")
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect PublicIp/AssociationId parameter.",
)
return self.response_template(DISASSOCIATE_ADDRESS_RESPONSE).render()
def release_address(self):
if self.is_not_dryrun("ReleaseAddress"):
if "PublicIp" in self.querystring:
self.ec2_backend.release_address(address=self._get_param("PublicIp"))
elif "AllocationId" in self.querystring:
self.ec2_backend.release_address(
allocation_id=self._get_param("AllocationId")
)
else:
self.ec2_backend.raise_error(
"MissingParameter",
"Invalid request, expect PublicIp/AllocationId parameter.",
)
return self.response_template(RELEASE_ADDRESS_RESPONSE).render()
ALLOCATE_ADDRESS_RESPONSE = """<AllocateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
</AllocateAddressResponse>"""
ASSOCIATE_ADDRESS_RESPONSE = """<AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
</AssociateAddressResponse>"""
DESCRIBE_ADDRESS_RESPONSE = """<DescribeAddressesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<addressesSet>
{% for address in addresses %}
<item>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.instance %}
<instanceId>{{ address.instance.id }}</instanceId>
{% else %}
<instanceId/>
{% endif %}
{% if address.eni %}
<networkInterfaceId>{{ address.eni.id }}</networkInterfaceId>
{% else %}
<networkInterfaceId/>
{% endif %}
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
<tagSet>
{% for tag in address.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</addressesSet>
</DescribeAddressesResponse>"""
DISASSOCIATE_ADDRESS_RESPONSE = """<DisassociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateAddressResponse>"""
RELEASE_ADDRESS_RESPONSE = """<ReleaseAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ReleaseAddressResponse>"""
|
#!/usr/bin/python
"""
HeaderID Extension for Python-Markdown
======================================
Adds ability to set HTML IDs for headers.
Basic usage:
>>> import markdown
>>> text = "# Some Header # {#some_id}"
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="some_id">Some Header</h1>'
All header IDs are unique:
>>> text = '''
... #Header
... #Another Header {#header}
... #Third Header {#header}'''
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="header">Header</h1>\\n<h1 id="header_1">Another Header</h1>\\n<h1 id="header_2">Third Header</h1>'
To fit within a html template's hierarchy, set the header base level:
>>> text = '''
... #Some Header
... ## Next Level'''
>>> md = markdown.markdown(text, ['headerid(level=3)'])
>>> md
u'<h3 id="some_header">Some Header</h3>\\n<h4 id="next_level">Next Level</h4>'
Turn off auto generated IDs:
>>> text = '''
... # Some Header
... # Header with ID # { #foo }'''
>>> md = markdown.markdown(text, ['headerid(forceid=False)'])
>>> md
u'<h1>Some Header</h1>\\n<h1 id="foo">Header with ID</h1>'
Use with MetaData extension:
>>> text = '''header_level: 2
... header_forceid: Off
...
... # A Header'''
>>> md = markdown.markdown(text, ['headerid', 'meta'])
>>> md
u'<h2>A Header</h2>'
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/HeaderId>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
from string import ascii_lowercase, digits, punctuation
ID_CHARS = ascii_lowercase + digits + '-_'
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
""" Replacement BlockProcessor for Header IDs. """
# Detect a header at start of any line in block
RE = re.compile(r"""(^|\n)
(?P<level>\#{1,6}) # group('level') = string of hashes
(?P<header>.*?) # group('header') = Header text
\#* # optional closing hashes
(?:[ \t]*\{[ \t]*\#(?P<id>[-_:a-zA-Z0-9]+)[ \t]*\})?
(\n|$) # ^^ group('id') = id attribute
""",
re.VERBOSE)
IDs = []
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
start_level, force_id = self._get_meta()
level = len(m.group('level')) + start_level
if level > 6:
level = 6
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = m.group('header').strip()
if m.group('id'):
h.set('id', self._unique_id(m.group('id')))
elif force_id:
h.set('id', self._create_id(m.group('header').strip()))
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
message(CRITICAL, "We've got a problem header!")
def _get_meta(self):
""" Return meta data suported by this ext as a tuple """
level = int(self.config['level'][0]) - 1
force = self._str2bool(self.config['forceid'][0])
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('header_level'):
level = int(self.md.Meta['header_level'][0]) - 1
if self.md.Meta.has_key('header_forceid'):
force = self._str2bool(self.md.Meta['header_forceid'][0])
return level, force
def _str2bool(self, s, default=False):
""" Convert a string to a booleen value. """
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
def _unique_id(self, id):
""" Ensure ID is unique. Append '_1', '_2'... if not """
while id in self.IDs:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
self.IDs.append(id)
return id
def _create_id(self, header):
""" Return ID from Header text. """
h = ''
for c in header.lower().replace(' ', '_'):
if c in ID_CHARS:
h += c
elif c not in punctuation:
h += '+'
return self._unique_id(h)
class HeaderIdExtension (markdown.Extension):
def __init__(self, configs):
# set defaults
self.config = {
'level' : ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.']
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = HeaderIdProcessor(md.parser)
self.processor.md = md
self.processor.config = self.config
# Replace existing hasheader in place.
md.parser.blockprocessors['hashheader'] = self.processor
def reset(self):
self.processor.IDs = []
def makeExtension(configs=None):
return HeaderIdExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models.
@@bidirectional_dynamic_rnn
@@dynamic_rnn
@@raw_rnn
@@static_rnn
@@static_state_saving_rnn
@@static_bidirectional_rnn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_concat = rnn_cell_impl._concat
assert_like_rnncell = rnn_cell_impl.assert_like_rnncell
# pylint: enable=protected-access
def _transpose_batch_time(x):
"""Transpose the batch and time dimensions of a Tensor.
Retains as much of the static shape information as possible.
Args:
x: A tensor of rank 2 or higher.
Returns:
x transposed along the first two dimensions.
Raises:
ValueError: if `x` is rank 1 or lower.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _best_effort_input_batch_size(flat_input):
"""Get static input batch size if available, with fallback to the dynamic one.
Args:
flat_input: An iterable of time major input Tensors of shape [max_time,
batch_size, ...]. All inputs should have compatible batch sizes.
Returns:
The batch size in Python integer if available, or a scalar Tensor otherwise.
Raises:
ValueError: if there is any input with an invalid shape.
"""
for input_ in flat_input:
shape = input_.shape
if shape.ndims is None:
continue
if shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2" % input_)
batch_size = shape[1].value
if batch_size is not None:
return batch_size
# Fallback to the dynamic batch size of the first input.
return array_ops.shape(flat_input[0])[1]
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
# If the state contains a scalar value we simply pass it through.
if output.shape.ndims == 0:
return new_output
copy_cond = (time >= sequence_length)
with ops.colocate_with(new_output):
return array_ops.where(copy_cond, output, new_output)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Takes input and builds independent forward and backward RNNs. The input_size
of forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such elements.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences in the batch.
If not provided, all batch entries are assumed to be full sequences; and
time reversal is applied from time `0` to `max_time` for each sequence.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
assert_like_rnncell("cell_fw", cell_fw)
assert_like_rnncell("cell_bw", cell_bw)
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
def _reverse(input_, seq_lengths, seq_dim, batch_dim):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_, seq_lengths=seq_lengths,
seq_dim=seq_dim, batch_dim=batch_dim)
else:
return array_ops.reverse(input_, axis=[seq_dim])
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = _reverse(
inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = _reverse(
tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, att_scores=None, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
Used to copy-through state and zero-out outputs when past a batch
element's sequence length. So it's more for correctness than performance.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
assert_like_rnncell("cell", cell)
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
batch_size = _best_effort_input_batch_size(flat_input)
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
att_scores = att_scores,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
outputs = nest.map_structure(_transpose_batch_time, outputs)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
att_scores = None,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = _best_effort_input_batch_size(flat_input)
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _concat(batch_size, size)
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[i].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state, att_scores=None):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
if att_scores is not None:
att_score = att_scores[:, time, :]
call_cell = lambda: cell(input_t, state, att_score)
else:
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
if att_scores is not None:
return (time + 1, output_ta_t, new_state, att_scores)
else:
return (time + 1, output_ta_t, new_state)
if att_scores is not None:
_, output_final_ta, final_state, _ = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state, att_scores),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
else:
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _concat(
[const_time_steps, const_batch_size], output_size, static=True)
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
assert_like_rnncell("cell", cell)
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.shape if emit.shape.is_fully_defined() else
array_ops.shape(emit) for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(_concat(batch_size, size_i), dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
def copy_fn(cur_i, cand_i):
with ops.colocate_with(cand_i):
return array_ops.where(elements_finished, cur_i, cand_i)
return nest.map_structure(copy_fn, current, candidate)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_ta = nest.map_structure(
lambda ta, emit: ta.write(time, emit), emit_ta, emit_output)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
def static_rnn(cell,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
```python
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
```
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time `t` for batch row `b`,
```python
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
```
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`, or a nested tuple of such elements.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input), or a nested
tuple of such elements.
- state is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the input depth
(column size) cannot be inferred from inputs via shape inference.
"""
assert_like_rnncell("cell", cell)
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Obtain the first sequence of the input
first_input = inputs
while nest.is_sequence(first_input):
first_input = first_input[0]
# Temporarily avoid EmbeddingWrapper and seq2seq badness
# TODO(lukaszkaiser): remove EmbeddingWrapper
if first_input.get_shape().ndims != 1:
input_shape = first_input.get_shape().with_rank_at_least(2)
fixed_batch_size = input_shape[0]
flat_inputs = nest.flatten(inputs)
for flat_input in flat_inputs:
input_shape = flat_input.get_shape().with_rank_at_least(2)
batch_size, input_size = input_shape[0], input_shape[1:]
fixed_batch_size.merge_with(batch_size)
for i, size in enumerate(input_size):
if size.value is None:
raise ValueError(
"Input size (dimension %d of inputs) must be accessible via "
"shape inference, but saw value None." % i)
else:
fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(first_input)[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, "
"dtype must be specified")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None: # Prepare variables
sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size")
def _create_zero_output(output_size):
# convert int to TensorShape if necessary
size = _concat(batch_size, output_size)
output = array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
shape = _concat(fixed_batch_size.value, output_size, static=True)
output.set_shape(tensor_shape.TensorShape(shape))
return output
output_size = cell.output_size
flat_output_size = nest.flatten(output_size)
flat_zero_output = tuple(
_create_zero_output(size) for size in flat_output_size)
zero_output = nest.pack_sequence_as(
structure=output_size, flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0:
varscope.reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
(output, state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=cell.state_size)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def static_state_saving_rnn(cell,
inputs,
state_saver,
state_name,
sequence_length=None,
scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of `RNNCell`.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the arity and
type of `state_name` does not match that of `cell.state_size`.
"""
state_size = cell.state_size
state_is_tuple = nest.is_sequence(state_size)
state_name_tuple = nest.is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError("state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s" % (str(state_name),
str(state_size)))
if state_is_tuple:
state_name_flat = nest.flatten(state_name)
state_size_flat = nest.flatten(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d" %
(len(state_name_flat), len(state_size_flat)))
initial_state = nest.pack_sequence_as(
structure=state_size,
flat_sequence=[state_saver.state(s) for s in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = static_rnn(
cell,
inputs,
initial_state=initial_state,
sequence_length=sequence_length,
scope=scope)
if state_is_tuple:
flat_state = nest.flatten(state)
state_name = nest.flatten(state_name)
save_state = [
state_saver.save_state(name, substate)
for name, substate in zip(state_name, flat_state)
]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
last_output = outputs[-1]
flat_last_output = nest.flatten(last_output)
flat_last_output = [
array_ops.identity(output) for output in flat_last_output
]
outputs[-1] = nest.pack_sequence_as(
structure=last_output, flat_sequence=flat_last_output)
return (outputs, state)
def static_bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_state_fw is the final state of the forward rnn.
output_state_bw is the final state of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None or an empty list.
"""
if not _like_rnncell(cell_fw):
raise TypeError("cell_fw must be an instance of RNNCell")
if not _like_rnncell(cell_bw):
raise TypeError("cell_bw must be an instance of RNNCell")
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = static_rnn(
cell_fw,
inputs,
initial_state_fw,
dtype,
sequence_length,
scope=fw_scope)
# Backward direction
with vs.variable_scope("bw") as bw_scope:
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = static_rnn(
cell_bw,
reversed_inputs,
initial_state_bw,
dtype,
sequence_length,
scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
flat_output_fw = nest.flatten(output_fw)
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(
array_ops.concat([fw, bw], 1)
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(
structure=output_fw, flat_sequence=flat_outputs)
return (outputs, output_state_fw, output_state_bw)
|
# --------------------------------------------------------
# Written by: Romuald FOTSO
# Licensed: MIT License
# Copyright (c) 2017
# Based on 'dandxy89' github repository:
# https://github.com/dandxy89/ImageModels/blob/master/KerasLayers/Custom_layers.py
# --------------------------------------------------------
from keras.engine import Layer
from keras import backend as K
class LRN2D(Layer):
def __init__(self, alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
if n % 2 == 0:
raise NotImplementedError(
"LRN2D only works with odd n. n provided: " + str(n))
super(LRN2D, self).__init__(**kwargs)
self.alpha = alpha
self.k = k
self.beta = beta
self.n = n
def get_output(self, train):
X = self.get_input(train)
b, ch, r, c = K.shape(X)
half_n = self.n // 2
input_sqr = K.square(X)
extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
input_sqr,
extra_channels[:, half_n + ch:, :, :]],
axis=1)
scale = self.k
for i in range(self.n):
scale += self.alpha * input_sqr[:, i:i + ch, :, :]
scale = scale ** self.beta
return X / scale
def get_config(self):
config = {"name": self.__class__.__name__,
"alpha": self.alpha,
"k": self.k,
"beta": self.beta,
"n": self.n}
base_config = super(LRN2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
from .timer import Timer
from .simple import Counter
from .heartbeat import HeartBeat
from .collector import Collector
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
from pathlib import Path
from time import perf_counter
import ezdxf
from ezdxf.render.forms import sphere
from ezdxf.addons import MengerSponge
from ezdxf.addons.pycsg import CSG
DIR = Path('~/Desktop/Outbox').expanduser()
doc = ezdxf.new()
doc.layers.new('sponge', dxfattribs={'color': 5})
doc.layers.new('sphere', dxfattribs={'color': 6})
doc.set_modelspace_vport(6, center=(5, 0))
msp = doc.modelspace()
sponge1 = MengerSponge(level=3).mesh()
sphere1 = sphere(count=32, stacks=16, radius=.5, quads=True).translate(.25, .25, 1)
t0 = perf_counter()
subtract = (CSG(sponge1, meshid=1) - CSG(sphere1, meshid=2))
t1 = perf_counter()
# get mesh result by id
subtract.mesh(1).render(msp, dxfattribs={'layer': 'sponge'})
subtract.mesh(2).render(msp, dxfattribs={'layer': 'sphere'})
print(f'runtime: {t1-t0:.3f}s')
doc.saveas(DIR / 'csg_sphere_vs_menger_sponge.dxf')
|
from __future__ import division
import six
import keras
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import add
from keras.layers import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
import tensorflow as tf
def _bn_relu(input):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
return Activation("relu")(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu block
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def _residual_block(block_function, filters, repetitions, is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks.
"""
def f(input):
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = (2, 2)
input = block_function(filters=filters, init_strides=init_strides,
is_first_block_of_first_layer=(is_first_layer and i == 0))(input)
return input
return f
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv2D(filters=filters, kernel_size=(3, 3),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(input, residual)
return f
def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(1, 1),
strides=init_strides)(input)
conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1)
residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3)
return _shortcut(input, residual)
return f
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_dim_ordering() == 'tf':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def _get_block(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
class ResnetBuilder(object):
@staticmethod
def build(input_shape, num_outputs, block_fn, repetitions, input):
"""Builds a custom ResNet like architecture.
Args:
input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
num_outputs: The number of outputs at final softmax layer
block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
The original paper used basic_block for layers < 50
repetitions: Number of repetitions of various block units.
At each block unit, the number of filters are doubled and the input size is halved
Returns:
The keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
# Permute dimension order if necessary
#if K.image_dim_ordering() == 'tf':
# input_shape = (input_shape[1], input_shape[2], input_shape[0])#???
# Load function from str if needed.
block_fn = _get_block(block_fn)
# input = Input(shape=input_shape)
conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1)
block = pool1
filters = 64
for i, r in enumerate(repetitions):
block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)
filters *= 2
# Last activation
block = _bn_relu(block)
# Classifier block
block_shape = K.int_shape(block)
pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]),
strides=(1, 1))(block)
flatten1 = Flatten()(pool2)
# dense = Dense(units=num_outputs, kernel_initializer="he_normal",
# activation="softmax")(flatten1)
# model = Model(inputs=input, outputs=flatten1)
return flatten1
@staticmethod
def build_resnet_18(input_shape, num_outputs, input):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [2, 2, 2, 2], input)
@staticmethod
def build_resnet_34(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [3, 4, 6, 3])
@staticmethod
def build_resnet_50(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 6, 3])
@staticmethod
def build_resnet_101(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 23, 3])
@staticmethod
def build_resnet_152(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3])
def resnet_builder(shape_list, nb_class):
input_layers = list()
resnet_layers = list()
for input_shape in shape_list:
input_layer = keras.layers.Input(shape=input_shape)
input_layers.append(input_layer)
resnet_layers.append(ResnetBuilder.build_resnet_18(input_shape, nb_class, input_layer))
merged_layer = keras.layers.concatenate(resnet_layers)
merged_dense = keras.layers.Dense(units=1000, activation='relu')(merged_layer)
merged_batchnorm = keras.layers.BatchNormalization()(merged_dense)
merged_dropout = keras.layers.Dropout(0.7)(merged_batchnorm)
merged_class_layer = keras.layers.Dense(units=nb_class, activation='softmax')(merged_dropout)
model = keras.models.Model(inputs=input_layers, output=merged_class_layer)
# model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),
# loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),
loss='categorical_crossentropy', metrics=['accuracy'])
return model
|
import math
class CustomType(type):
def __new__(mcls, name, bases, class_dict):
print(f'Using custom metaclass {mcls} to create class {name}...')
cls_obj = super().__new__(mcls, name, bases, class_dict)
cls_obj.circ = lambda self: 2 * math.pi * self.r
return cls_obj
class Circle(metaclass=CustomType):
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
def area(self):
return math.pi * self.r ** 2
# Using custom metaclass <class '__main__.CustomType'> to create class Circle...
c = Circle(0, 0, 1)
print(c.area())
print(c.circ())
|
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
def get_initial_state(size):
return np.random.choice([0, 1], size)
def compute_next_state(state):
new_state = np.zeros(state.shape, dtype=int)
for i in range(state.shape[0]):
for j in range(state.shape[1]):
low_x, high_x = max(0, i-1), min(i+2, state.shape[0])
low_y, high_y = max(0, j-1), min(j+2, state.shape[1])
n_live = np.sum(state[low_x: high_x, low_y: high_y]) - state[i, j]
if (state[i, j] == 1) and (n_live < 2):
new_state[i, j] = 0
elif (state[i, j] == 1) and (2 <= n_live <= 3):
new_state[i, j] = 1
elif (state[i, j] == 1) and (n_live > 3):
new_state[i, j] = 0
elif (state[i, j] == 0) and (n_live == 3):
new_state[i, j] = 1
else:
new_state[i, j] = state[i, j]
return new_state
def start(initial_state=None, loop_delay=1, size=(200, 200)):
if initial_state is None:
state = get_initial_state(size)
else:
state = initial_state
size = state.shape
age = np.zeros(size, dtype=int)
counter = 0
while True:
new_state = compute_next_state(state)
age += new_state
age = age * new_state
counter += 1
plt.imshow(age, cmap='Greys')
plt.xlim(right=size[1], left=0)
plt.ylim(top=0, bottom=size[0])
plt.pause(loop_delay)
if (np.sum(new_state) == 0) or (new_state == state).all():
print(counter)
state = get_initial_state(size)
age = np.zeros(size, dtype=int)
counter = 0
else:
state = new_state
if __name__ == "__main__":
start()
|
import datetime
import re
from unittest import mock
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_login_failed
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field, IntegerField
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from .models.custom_user import (
CustomUser, CustomUserWithoutIsActiveField, ExtensionUser,
)
from .models.with_custom_email_field import CustomEmailField
from .models.with_integer_username import IntegerUsernameUser
from .settings import AUTH_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='testclient@example.com')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
cls.u3 = User.objects.create_user(username='staff', password='password')
cls.u4 = User.objects.create(username='empty_password', password='')
cls.u5 = User.objects.create(username='unmanageable_password', password='$')
cls.u6 = User.objects.create(username='unknown_password', password='foo$bar')
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[str(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: jsmith@example.com>')
def test_unicode_username(self):
data = {
'username': '宝',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(u.username, '宝')
def test_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean() and ModelForm
# validation calls Model.clean().
ohm_username = 'testΩ' # U+2126 OHM SIGN
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertNotEqual(user.username, ohm_username)
self.assertEqual(user.username, 'testΩ') # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_duplicate_normalized_unicode(self):
"""
To prevent almost identical usernames, visually identical but differing
by their unicode code points only, Unicode NFKC normalization should
make appear them equal to Django.
"""
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
self.assertNotEqual(omega_username, ohm_username)
User.objects.create_user(username=omega_username, password='pwd')
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['username'], ["A user with that username already exists."]
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
def test_custom_form(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = ExtensionUser
fields = UserCreationForm.Meta.fields + ('date_of_birth',)
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_with_different_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email', 'date_of_birth')
data = {
'email': 'test@client222.com',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_hidden_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUserWithoutIsActiveField
fields = ('email',) # without USERNAME_FIELD
data = {
'email': 'testclient@example.com',
'password1': 'testclient',
'password2': 'testclient',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password1': ' testpassword ',
'password2': ' testpassword ',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_password_help_text(self):
form = UserCreationForm()
self.assertEqual(
form.fields['password1'].help_text,
'<ul><li>Your password can't be too similar to your other personal information.</li></ul>'
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_user_create_form_validates_password_with_all_data(self):
"""UserCreationForm password validation uses all of the form's data."""
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('username', 'email', 'first_name', 'last_name')
form = CustomUserCreationForm({
'username': 'testuser',
'password1': 'testpassword',
'password2': 'testpassword',
'first_name': 'testpassword',
'last_name': 'lastname',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['password2'],
['The password is too similar to the first name.'],
)
def test_username_field_autocapitalize_none(self):
form = UserCreationForm()
self.assertEqual(form.fields['username'].widget.attrs.get('autocapitalize'), 'none')
def test_html_autocomplete_attributes(self):
form = UserCreationForm()
tests = (
('username', 'username'),
('password1', 'new-password'),
('password2', 'new-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
# Use an authentication backend that rejects inactive users.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.ModelBackend'])
def test_inactive_user_incorrect_password(self):
"""An invalid login doesn't leak the inactive status of a user."""
data = {
'username': 'inactive',
'password': 'incorrect',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_login_failed(self):
signal_calls = []
def signal_handler(**kwargs):
signal_calls.append(kwargs)
user_login_failed.connect(signal_handler)
fake_request = object()
try:
form = AuthenticationForm(fake_request, {
'username': 'testclient',
'password': 'incorrect',
})
self.assertFalse(form.is_valid())
self.assertIs(signal_calls[0]['request'], fake_request)
finally:
user_login_failed.disconnect(signal_handler)
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
# Use an authentication backend that allows inactive users.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_unicode_username(self):
User.objects.create_user(username='Σαρα', password='pwd')
data = {
'username': 'Σαρα',
'password': 'pwd',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_username_field_max_length_matches_user_model(self):
self.assertEqual(CustomEmailField._meta.get_field('username').max_length, 255)
data = {
'username': 'u' * 255,
'password': 'pwd',
'email': 'test@example.com',
}
CustomEmailField.objects.create_user(**data)
form = AuthenticationForm(None, data)
self.assertEqual(form.fields['username'].max_length, 255)
self.assertEqual(form.errors, {})
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_username_field_max_length_defaults_to_254(self):
self.assertIsNone(IntegerUsernameUser._meta.get_field('username').max_length)
data = {
'username': '0123456',
'password': 'password',
}
IntegerUsernameUser.objects.create_user(**data)
form = AuthenticationForm(None, data)
self.assertEqual(form.fields['username'].max_length, 254)
self.assertEqual(form.errors, {})
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_autocapitalize_none(self):
form = AuthenticationForm()
self.assertEqual(form.fields['username'].widget.attrs.get('autocapitalize'), 'none')
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password': ' pass ',
}
form = AuthenticationForm(None, data)
form.is_valid() # Not necessary to have valid credentails for the test.
self.assertEqual(form.cleaned_data['password'], data['password'])
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_integer_username(self):
class CustomAuthenticationForm(AuthenticationForm):
username = IntegerField()
user = IntegerUsernameUser.objects.create_user(username=0, password='pwd')
data = {
'username': 0,
'password': 'pwd',
}
form = CustomAuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['username'], data['username'])
self.assertEqual(form.cleaned_data['password'], data['password'])
self.assertEqual(form.errors, {})
self.assertEqual(form.user_cache, user)
def test_get_invalid_login_error(self):
error = AuthenticationForm().get_invalid_login_error()
self.assertIsInstance(error, forms.ValidationError)
self.assertEqual(
error.message,
'Please enter a correct %(username)s and password. Note that both '
'fields may be case-sensitive.',
)
self.assertEqual(error.code, 'invalid_login')
self.assertEqual(error.params, {'username': 'username'})
def test_html_autocomplete_attributes(self):
form = AuthenticationForm()
tests = (
('username', 'username'),
('password', 'current-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["new_password2"].errors,
[str(form.error_messages['password_mismatch'])]
)
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': ' password ',
'new_password2': ' password ',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_help_text_translation(self):
french_help_texts = [
'Votre mot de passe ne peut pas trop ressembler à vos autres informations personnelles.',
'Votre mot de passe doit contenir au minimum 12 caractères.',
]
form = SetPasswordForm(self.u1)
with translation.override('fr'):
html = form.as_p()
for french_text in french_help_texts:
self.assertIn(french_text, html)
def test_html_autocomplete_attributes(self):
form = SetPasswordForm(self.u1)
tests = (
('new_password1', 'new-password'),
('new_password2', 'new-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors, [str(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors, [str(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields), ['old_password', 'new_password1', 'new_password2'])
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
user.set_password(' oldpassword ')
data = {
'old_password': ' oldpassword ',
'new_password1': ' pass ',
'new_password2': ' pass ',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['old_password'], data['old_password'])
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
def test_html_autocomplete_attributes(self):
user = User.objects.get(username='testclient')
form = PasswordChangeForm(user)
self.assertEqual(form.fields['old_password'].widget.attrs['autocomplete'], 'current-password')
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unusable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
# original hashed password contains $
self.assertIn('$', form.cleaned_data['password'])
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_custom_form(self):
class CustomUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = ExtensionUser
fields = ('username', 'password', 'date_of_birth',)
user = User.objects.get(username='testclient')
data = {
'username': 'testclient',
'password': 'testclient',
'date_of_birth': '1998-02-24',
}
form = CustomUserChangeForm(data, instance=user)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['username'], 'testclient')
self.assertEqual(form.cleaned_data['date_of_birth'], datetime.date(1998, 2, 24))
def test_password_excluded(self):
class UserChangeFormWithoutPassword(UserChangeForm):
password = None
class Meta:
model = User
exclude = ['password']
form = UserChangeFormWithoutPassword()
self.assertNotIn('password', form.fields)
def test_username_field_autocapitalize_none(self):
form = UserChangeForm()
self.assertEqual(form.fields['username'].widget.attrs.get('autocapitalize'), 'none')
@override_settings(TEMPLATES=AUTH_TEMPLATES)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[
("Really sorry to hear you forgot your password.", "text/html")
],
).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetForm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(
r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload()
))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_custom_email_field(self):
email = 'test@mail.com'
CustomEmailField.objects.create_user('test name', 'test password', email)
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [email])
def test_html_autocomplete_attributes(self):
form = PasswordResetForm()
self.assertEqual(form.fields['email'].widget.attrs['autocomplete'], 'email')
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.PBKDF2PasswordHasher'])
def test_render(self):
widget = ReadOnlyPasswordHashWidget()
value = 'pbkdf2_sha256$100000$a6Pucb1qSFcD$WmCkn9Hqidj48NVe5x0FEM6A9YiOqQcl/83m2Z5udm0='
self.assertHTMLEqual(
widget.render('name', value, {'id': 'id_password'}),
"""
<div id="id_password">
<strong>algorithm</strong>: pbkdf2_sha256
<strong>iterations</strong>: 100000
<strong>salt</strong>: a6Pucb******
<strong>hash</strong>: WmCkn9**************************************
</div>
"""
)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'password1': ' pass ',
'password2': ' pass ',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
def test_non_matching_passwords(self):
user = User.objects.get(username='testclient')
data = {'password1': 'password1', 'password2': 'password2'}
form = AdminPasswordChangeForm(user, data)
self.assertEqual(form.errors['password2'], [form.error_messages['password_mismatch']])
def test_missing_passwords(self):
user = User.objects.get(username='testclient')
data = {'password1': '', 'password2': ''}
form = AdminPasswordChangeForm(user, data)
required_error = [Field.default_error_messages['required']]
self.assertEqual(form.errors['password1'], required_error)
self.assertEqual(form.errors['password2'], required_error)
def test_one_password(self):
user = User.objects.get(username='testclient')
form1 = AdminPasswordChangeForm(user, {'password1': '', 'password2': 'test'})
required_error = [Field.default_error_messages['required']]
self.assertEqual(form1.errors['password1'], required_error)
self.assertNotIn('password2', form1.errors)
form2 = AdminPasswordChangeForm(user, {'password1': 'test', 'password2': ''})
self.assertEqual(form2.errors['password2'], required_error)
self.assertNotIn('password1', form2.errors)
def test_html_autocomplete_attributes(self):
user = User.objects.get(username='testclient')
form = AdminPasswordChangeForm(user)
tests = (
('password1', 'new-password'),
('password2', 'new-password'),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(form.fields[field_name].widget.attrs['autocomplete'], autocomplete)
|
# encoding=utf8
# pylint: disable=anomalous-backslash-in-string, old-style-class
import math
__all__ = ['ChungReynolds']
class ChungReynolds:
r"""Implementation of Chung Reynolds functions.
Date: 2018
Authors: Lucija Brezočnik
License: MIT
Function: **Chung Reynolds function**
:math:`f(\mathbf{x}) = \left(\sum_{i=1}^D x_i^2\right)^2`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-100, 100]`, for all :math:`i = 1, 2,..., D`
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (0,...,0)`
LaTeX formats:
Inline:
$f(\mathbf{x}) = \left(\sum_{i=1}^D x_i^2\right)^2$
Equation:
\begin{equation} f(\mathbf{x}) = \left(\sum_{i=1}^D x_i^2\right)^2 \end{equation}
Domain:
$-100 \leq x_i \leq 100$
Reference paper:
Jamil, M., and Yang, X. S. (2013).
A literature survey of benchmark functions for global optimisation problems.
International Journal of Mathematical Modelling and Numerical Optimisation,
4(2), 150-194.
"""
def __init__(self, Lower=-100.0, Upper=100.0):
self.Lower = Lower
self.Upper = Upper
@classmethod
def function(cls):
def evaluate(D, sol):
val = 0.0
for i in range(D):
val += math.pow(sol[i], 2)
return math.pow(val, 2)
return evaluate
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated source lists for bigtable_client - DO NOT EDIT."""
bigtable_client_hdrs = [
"admin_client.h",
"app_profile_config.h",
"async_row_reader.h",
"cell.h",
"client_options.h",
"cluster_config.h",
"cluster_list_responses.h",
"column_family.h",
"completion_queue.h",
"data_client.h",
"expr.h",
"filters.h",
"iam_binding.h",
"iam_policy.h",
"idempotent_mutation_policy.h",
"instance_admin.h",
"instance_admin_client.h",
"instance_config.h",
"instance_list_responses.h",
"instance_update_config.h",
"internal/async_bulk_apply.h",
"internal/async_longrunning_op.h",
"internal/async_poll_op.h",
"internal/async_retry_multi_page.h",
"internal/async_retry_op.h",
"internal/async_retry_unary_rpc_and_poll.h",
"internal/bulk_mutator.h",
"internal/client_options_defaults.h",
"internal/common_client.h",
"internal/conjunction.h",
"internal/google_bytes_traits.h",
"internal/prefix_range_end.h",
"internal/readrowsparser.h",
"internal/rowreaderiterator.h",
"internal/rpc_policy_parameters.h",
"internal/rpc_policy_parameters.inc",
"internal/unary_client_utils.h",
"metadata_update_policy.h",
"mutation_batcher.h",
"mutations.h",
"polling_policy.h",
"read_modify_write_rule.h",
"row.h",
"row_key.h",
"row_key_sample.h",
"row_range.h",
"row_reader.h",
"row_set.h",
"rpc_backoff_policy.h",
"rpc_retry_policy.h",
"table.h",
"table_admin.h",
"table_config.h",
"version.h",
"version_info.h",
]
bigtable_client_srcs = [
"admin_client.cc",
"app_profile_config.cc",
"client_options.cc",
"cluster_config.cc",
"data_client.cc",
"expr.cc",
"iam_binding.cc",
"iam_policy.cc",
"idempotent_mutation_policy.cc",
"instance_admin.cc",
"instance_admin_client.cc",
"instance_config.cc",
"instance_update_config.cc",
"internal/async_bulk_apply.cc",
"internal/bulk_mutator.cc",
"internal/common_client.cc",
"internal/google_bytes_traits.cc",
"internal/prefix_range_end.cc",
"internal/readrowsparser.cc",
"internal/rowreaderiterator.cc",
"metadata_update_policy.cc",
"mutation_batcher.cc",
"mutations.cc",
"polling_policy.cc",
"row_range.cc",
"row_reader.cc",
"row_set.cc",
"rpc_backoff_policy.cc",
"rpc_retry_policy.cc",
"table.cc",
"table_admin.cc",
"table_config.cc",
"version.cc",
]
|
class Device:
def __init__(self, id=None, token=None, platform=None, endpoint=None, created_at=None, updated_at=None):
self.id = id
self.token = token
self.platform = platform
self.endpoint = endpoint
self.created_at = created_at
self.updated_at = updated_at
|
from distutils.version import LooseVersion
import os
import importlib
import logging
import sys
from django.core.management.base import BaseCommand
from django.utils.version import get_version
from django_rq.queues import get_queues
from django_rq.workers import get_exception_handlers
from redis.exceptions import ConnectionError
from rq import use_connection
from rq.utils import ColorizingStreamHandler
# Setup logging for RQWorker if not already configured
logger = logging.getLogger('rq.worker')
if not logger.handlers:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s %(message)s',
datefmt='%H:%M:%S')
handler = ColorizingStreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
# Copied from rq.utils
def import_attribute(name):
"""Return an attribute from a dotted path name (e.g. "path.to.func")."""
module_name, attribute = name.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, attribute)
class Command(BaseCommand):
"""
Runs RQ workers on specified queues. Note that all queues passed into a
single rqworker command must share the same connection.
Example usage:
python manage.py rqworker high medium low
"""
args = '<queue queue ...>'
def add_arguments(self, parser):
parser.add_argument('--worker-class', action='store', dest='worker_class',
default='rq.Worker', help='RQ Worker class to use')
parser.add_argument('--pid', action='store', dest='pid',
default=None, help='PID file to write the worker`s pid into')
parser.add_argument('--burst', action='store_true', dest='burst',
default=False, help='Run worker in burst mode')
parser.add_argument('--name', action='store', dest='name',
default=None, help='Name of the worker')
parser.add_argument('--queue-class', action='store', dest='queue_class',
default='django_rq.queues.DjangoRQ', help='Queues class to use')
parser.add_argument('--worker-ttl', action='store', type=int,
dest='worker_ttl', default=420,
help='Default worker timeout to be used')
if LooseVersion(get_version()) >= LooseVersion('1.10'):
parser.add_argument('args', nargs='*', type=str,
help='The queues to work on, separated by space')
def handle(self, *args, **options):
pid = options.get('pid')
if pid:
with open(os.path.expanduser(pid), "w") as fp:
fp.write(str(os.getpid()))
try:
# Instantiate a worker
worker_class = import_attribute(options['worker_class'])
queues = get_queues(*args, queue_class=import_attribute(options['queue_class']))
w = worker_class(
queues,
connection=queues[0].connection,
name=options['name'],
exception_handlers=get_exception_handlers() or None,
default_worker_ttl=options['worker_ttl']
)
# Call use_connection to push the redis connection into LocalStack
# without this, jobs using RQ's get_current_job() will fail
use_connection(w.connection)
w.work(burst=options.get('burst', False))
except ConnectionError as e:
print(e)
sys.exit(1)
|
import torch
import argparse
# ----- Parser -----
def parser():
PARSER = argparse.ArgumentParser(description='Training parameters.')
# Dataset
PARSER.add_argument('--dataset', default='CIFAR10', type=str,
choices=['CIFAR10', 'CelebA', 'Imagenette', 'ImageNet32', 'ImageNet64'],
help="Data to be used.")
PARSER.add_argument('--img_resize', default=32, type=int,
help='Change image resolution.')
# Model
PARSER.add_argument('--model', default='VAE', type=str,
choices=['VAE', 'srVAE'],
help="Model to be used.")
PARSER.add_argument('--network', default='densenet32', type=str,
choices=['densenet32', 'densenet16x32'],
help="Neural Network architecture to be used.")
# Prior
PARSER.add_argument('--prior', default='MixtureOfGaussians', type=str,
choices=['StandardNormal', 'MixtureOfGaussians', 'RealNVP'],
help='Prior type.')
PARSER.add_argument('--z_dim', default=1024, type=int,
help='Dimensionality of z latent space.')
PARSER.add_argument('--u_dim', default=1024, type=int,
help='Dimensionality of z latent space.')
# data likelihood
PARSER.add_argument('--likelihood', default='dmol', type=str,
choices=['dmol'],
help="Type of likelihood.")
PARSER.add_argument('--iw_test', default=512, type=int,
help="Number of Importance Weighting samples used for approximating the test log-likelihood.")
# Training Parameters
PARSER.add_argument('--batch_size', default=32, type=int,
help='Batch size.')
PARSER.add_argument('--epochs', default=2000, type=int,
help='Number of training epochs.')
# General Configs
PARSER.add_argument('--seed', default=None, type=int,
help='Fix random seed.')
PARSER.add_argument('--n_samples', default=8, type=int,
help='Number of generated samples.')
PARSER.add_argument('--log_interval', default=True, type=bool,
help='Print progress on every batch.')
PARSER.add_argument('--device', default=None, type=str,
choices=['cpu', 'cuda'],
help='Device to run the experiment.')
PARSER.add_argument('--use_tb', default=True, type=bool,
help='Use TensorBoard.')
PARSER.add_argument('--tags', default='logs', type=str,
help='Run tags.')
ARGS = PARSER.parse_args()
# Check device
if ARGS.device is None:
ARGS.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return ARGS
args = parser()
if __name__ == "__main__":
pass
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import sys
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import histogram
from tracing.value.diagnostics import reserved_infos
class SparseDiagnosticTest(testing_common.TestCase):
"""Test case for functions in SparseDiagnostic."""
def setUp(self):
super(SparseDiagnosticTest, self).setUp()
self.SetCurrentUser('foo@bar.com', is_admin=True)
def _AddMockData(self, test_key):
data_samples = {
'owners': [
{
'type': 'GenericSet',
'guid': '1',
'values': ['1']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['2']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['3']
},
],
'bugs': [
{
'type': 'GenericSet',
'guid': '1',
'values': ['a']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['b']
},
{
'type': 'GenericSet',
'guid': '1',
'values': ['c']
},
]
}
for k, diagnostic_samples in data_samples.iteritems():
for i in xrange(len(diagnostic_samples)):
start_revision = i * 10
end_revision = (i + 1) * 10 - 1
if i == len(diagnostic_samples) - 1:
end_revision = sys.maxint
e = histogram.SparseDiagnostic(
data=diagnostic_samples[i], test=test_key,
start_revision=start_revision, end_revision=end_revision,
name=k, internal_only=False)
e.put()
def testFixupDiagnostics_Middle_FixesRange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['10']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key,
start_revision=5, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 4), (5, 9), (10, 19), (20, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query().fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_End_FixesRange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['10']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key,
start_revision=100, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, 99), (100, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query().fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_DifferentTestPath_NoChange(self):
test_key1 = utils.TestKey('Chromium/win7/1')
test_key2 = utils.TestKey('Chromium/win7/2')
self._AddMockData(test_key1)
self._AddMockData(test_key2)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['10']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key1,
start_revision=5, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key2).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == test_key2).fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testFixupDiagnostics_NotUnique_NoChange(self):
test_key = utils.TestKey('Chromium/win7/foo')
self._AddMockData(test_key)
data = {
'type': 'GenericSet',
'guid': '1',
'values': ['1']
}
e = histogram.SparseDiagnostic(
data=data, test=test_key,
start_revision=5, end_revision=sys.maxint,
name='owners', internal_only=False)
e.put()
histogram.SparseDiagnostic.FixDiagnostics(test_key).get_result()
expected = {
'owners': [(0, 9), (10, 19), (20, sys.maxint)],
'bugs': [(0, 9), (10, 19), (20, sys.maxint)],
}
diags = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == test_key).fetch()
for d in diags:
self.assertIn((d.start_revision, d.end_revision), expected[d.name])
expected[d.name].remove((d.start_revision, d.end_revision))
self.assertEqual(0, len(expected['owners']))
self.assertEqual(0, len(expected['bugs']))
def testGetMostRecentValuesByNames_ReturnAllData(self):
data_samples = [
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
},
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'values': ['abc']
}]
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[0]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[0]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[1]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[1]['guid'],
name=reserved_infos.BUG_COMPONENTS.name)
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentValuesByNames(
test_key, set([reserved_infos.OWNERS.name,
reserved_infos.BUG_COMPONENTS.name]))
self.assertEqual(lookup_result.get(reserved_infos.OWNERS.name),
['alice@chromium.org'])
self.assertEqual(lookup_result.get(reserved_infos.BUG_COMPONENTS.name),
['abc'])
def testGetMostRecentValuesByNames_ReturnsNoneIfNoneFound(self):
data_sample = {
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
}
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_sample), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_sample['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentValuesByNames(
test_key, set([reserved_infos.OWNERS.name,
reserved_infos.BUG_COMPONENTS.name]))
self.assertEqual(lookup_result.get(reserved_infos.OWNERS.name),
['alice@chromium.org'])
self.assertIsNone(lookup_result.get(reserved_infos.BUG_COMPONENTS.name))
def testGetMostRecentValuesByNames_ReturnsNoneIfNoName(self):
data_sample = {
'guid': 'abc',
'osName': 'linux',
'type': 'DeviceInfo'
}
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_sample), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_sample['guid'])
entity.put()
lookup_result = histogram.SparseDiagnostic.GetMostRecentValuesByNames(
test_key, set([reserved_infos.OWNERS.name,
reserved_infos.BUG_COMPONENTS.name]))
self.assertIsNone(lookup_result.get(reserved_infos.OWNERS.name))
self.assertIsNone(lookup_result.get(reserved_infos.BUG_COMPONENTS.name))
def testGetMostRecentValuesByNames_RaisesErrorIfDuplicateName(self):
data_samples = [
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'values': ['alice@chromium.org']
},
{
'type': 'GenericSet',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'values': ['bob@chromium.org']
}]
test_key = utils.TestKey('Chromium/win7/foo')
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[0]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[0]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
entity = histogram.SparseDiagnostic(
data=json.dumps(data_samples[1]), test=test_key, start_revision=1,
end_revision=sys.maxint, id=data_samples[1]['guid'],
name=reserved_infos.OWNERS.name)
entity.put()
self.assertRaises(
AssertionError,
histogram.SparseDiagnostic.GetMostRecentValuesByNames,
test_key,
set([reserved_infos.OWNERS.name, reserved_infos.BUG_COMPONENTS.name]))
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a6j(qtzl$#pd2g^fm+=g27^^r&%gz6sh!o45ekij=--bj)^qx$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
"""Provide functions for filtering."""
from .stats_filter import stats_filter
from .topology_filter import topology_filter
from .run_filters import run_filters
|
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from libcxx.test.dsl import *
import re
import shutil
import sys
import subprocess
_isClang = lambda cfg: '__clang__' in compilerMacros(cfg) and '__apple_build_version__' not in compilerMacros(cfg)
_isAppleClang = lambda cfg: '__apple_build_version__' in compilerMacros(cfg)
_isGCC = lambda cfg: '__GNUC__' in compilerMacros(cfg) and '__clang__' not in compilerMacros(cfg)
_isMSVC = lambda cfg: '_MSC_VER' in compilerMacros(cfg)
_msvcVersion = lambda cfg: (int(compilerMacros(cfg)['_MSC_VER']) // 100, int(compilerMacros(cfg)['_MSC_VER']) % 100)
DEFAULT_FEATURES = [
Feature(name='fcoroutines-ts',
when=lambda cfg: hasCompileFlag(cfg, '-fcoroutines-ts') and
featureTestMacros(cfg, flags='-fcoroutines-ts').get('__cpp_coroutines', 0) >= 201703,
actions=[AddCompileFlag('-fcoroutines-ts')]),
Feature(name='thread-safety',
when=lambda cfg: hasCompileFlag(cfg, '-Werror=thread-safety'),
actions=[AddCompileFlag('-Werror=thread-safety')]),
Feature(name='diagnose-if-support',
when=lambda cfg: hasCompileFlag(cfg, '-Wuser-defined-warnings'),
actions=[AddCompileFlag('-Wuser-defined-warnings')]),
Feature(name='has-fblocks', when=lambda cfg: hasCompileFlag(cfg, '-fblocks')),
Feature(name='-fsized-deallocation', when=lambda cfg: hasCompileFlag(cfg, '-fsized-deallocation')),
Feature(name='-faligned-allocation', when=lambda cfg: hasCompileFlag(cfg, '-faligned-allocation')),
Feature(name='fdelayed-template-parsing', when=lambda cfg: hasCompileFlag(cfg, '-fdelayed-template-parsing')),
Feature(name='libcpp-no-concepts', when=lambda cfg: featureTestMacros(cfg).get('__cpp_concepts', 0) < 201907),
Feature(name='libcpp-no-coroutines', when=lambda cfg: featureTestMacros(cfg).get('__cpp_impl_coroutine', 0) < 201902),
Feature(name='has-fobjc-arc', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc') and
sys.platform.lower().strip() == 'darwin'), # TODO: this doesn't handle cross-compiling to Apple platforms.
Feature(name='objective-c++', when=lambda cfg: hasCompileFlag(cfg, '-xobjective-c++ -fobjc-arc')),
Feature(name='non-lockfree-atomics',
when=lambda cfg: sourceBuilds(cfg, """
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { (void)x.load(); return 0; }
""")),
# TODO: Remove this feature once compiler-rt includes __atomic_is_lockfree()
# on all supported platforms.
Feature(name='is-lockfree-runtime-function',
when=lambda cfg: sourceBuilds(cfg, """
#include <atomic>
struct Large { int storage[100]; };
std::atomic<Large> x;
int main(int, char**) { return x.is_lock_free(); }
""")),
# Some tests rely on creating shared libraries which link in the C++ Standard Library. In some
# cases, this doesn't work (e.g. if the library was built as a static archive and wasn't compiled
# as position independent). This feature informs the test suite of whether it's possible to create
# a shared library in a shell test by using the '-shared' compiler flag.
#
# Note: To implement this check properly, we need to make sure that we use something inside the
# compiled library, not only in the headers. It should be safe to assume that all implementations
# define `operator new` in the compiled library.
Feature(name='cant-build-shared-library',
when=lambda cfg: not sourceBuilds(cfg, """
void f() { new int(3); }
""", ['-shared'])),
Feature(name='apple-clang', when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name=lambda cfg: 'apple-clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isAppleClang),
Feature(name='clang', when=_isClang,
actions=[AddCompileFlag('-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER')]),
Feature(name=lambda cfg: 'clang-{__clang_major__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name=lambda cfg: 'clang-{__clang_major__}.{__clang_minor__}.{__clang_patchlevel__}'.format(**compilerMacros(cfg)), when=_isClang),
Feature(name='gcc', when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name=lambda cfg: 'gcc-{__GNUC__}.{__GNUC_MINOR__}.{__GNUC_PATCHLEVEL__}'.format(**compilerMacros(cfg)), when=_isGCC),
Feature(name='msvc', when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
Feature(name=lambda cfg: 'msvc-{}.{}'.format(*_msvcVersion(cfg)), when=_isMSVC),
]
# Deduce and add the test features that that are implied by the #defines in
# the <__config_site> header.
#
# For each macro of the form `_LIBCPP_XXX_YYY_ZZZ` defined below that
# is defined after including <__config_site>, add a Lit feature called
# `libcpp-xxx-yyy-zzz`. When a macro is defined to a specific value
# (e.g. `_LIBCPP_ABI_VERSION=2`), the feature is `libcpp-xxx-yyy-zzz=<value>`.
macros = {
'_LIBCPP_HAS_NO_MONOTONIC_CLOCK': 'libcpp-has-no-monotonic-clock',
'_LIBCPP_HAS_NO_THREADS': 'libcpp-has-no-threads',
'_LIBCPP_HAS_THREAD_API_EXTERNAL': 'libcpp-has-thread-api-external',
'_LIBCPP_HAS_THREAD_API_PTHREAD': 'libcpp-has-thread-api-pthread',
'_LIBCPP_NO_VCRUNTIME': 'libcpp-no-vcruntime',
'_LIBCPP_ABI_VERSION': 'libcpp-abi-version',
'_LIBCPP_ABI_UNSTABLE': 'libcpp-abi-unstable',
'_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY': 'libcpp-has-no-filesystem-library',
'_LIBCPP_HAS_NO_RANDOM_DEVICE': 'libcpp-has-no-random-device',
'_LIBCPP_HAS_NO_LOCALIZATION': 'libcpp-has-no-localization',
'_LIBCPP_HAS_NO_WIDE_CHARACTERS': 'libcpp-has-no-wide-characters',
'_LIBCPP_HAS_NO_INCOMPLETE_FORMAT': 'libcpp-has-no-incomplete-format',
'_LIBCPP_HAS_NO_INCOMPLETE_RANGES': 'libcpp-has-no-incomplete-ranges',
'_LIBCPP_HAS_NO_UNICODE': 'libcpp-has-no-unicode',
}
for macro, feature in macros.items():
DEFAULT_FEATURES += [
Feature(name=lambda cfg, m=macro, f=feature: f + (
'={}'.format(compilerMacros(cfg)[m]) if compilerMacros(cfg)[m] else ''
),
when=lambda cfg, m=macro: m in compilerMacros(cfg),
# FIXME: This is a hack that should be fixed using module maps.
# If modules are enabled then we have to lift all of the definitions
# in <__config_site> onto the command line.
actions=lambda cfg, m=macro: [
AddCompileFlag('-Wno-macro-redefined -D{}'.format(m) + (
'={}'.format(compilerMacros(cfg)[m]) if compilerMacros(cfg)[m] else ''
))
]
)
]
# Mapping from canonical locale names (used in the tests) to possible locale
# names on various systems. Each locale is considered supported if any of the
# alternative names is supported.
locales = {
'en_US.UTF-8': ['en_US.UTF-8', 'en_US.utf8', 'English_United States.1252'],
'fr_FR.UTF-8': ['fr_FR.UTF-8', 'fr_FR.utf8', 'French_France.1252'],
'ru_RU.UTF-8': ['ru_RU.UTF-8', 'ru_RU.utf8', 'Russian_Russia.1251'],
'zh_CN.UTF-8': ['zh_CN.UTF-8', 'zh_CN.utf8', 'Chinese_China.936'],
'fr_CA.ISO8859-1': ['fr_CA.ISO8859-1', 'French_Canada.1252'],
'cs_CZ.ISO8859-2': ['cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250']
}
for locale, alts in locales.items():
# Note: Using alts directly in the lambda body here will bind it to the value at the
# end of the loop. Assigning it to a default argument works around this issue.
DEFAULT_FEATURES.append(Feature(name='locale.{}'.format(locale),
when=lambda cfg, alts=alts: hasAnyLocale(cfg, alts)))
# Add features representing the platform name: darwin, linux, windows, etc...
DEFAULT_FEATURES += [
Feature(name='darwin', when=lambda cfg: '__APPLE__' in compilerMacros(cfg)),
Feature(name='windows', when=lambda cfg: '_WIN32' in compilerMacros(cfg)),
Feature(name='windows-dll', when=lambda cfg: '_WIN32' in compilerMacros(cfg) and not '_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS' in compilerMacros(cfg)),
Feature(name='linux', when=lambda cfg: '__linux__' in compilerMacros(cfg)),
Feature(name='netbsd', when=lambda cfg: '__NetBSD__' in compilerMacros(cfg)),
Feature(name='freebsd', when=lambda cfg: '__FreeBSD__' in compilerMacros(cfg))
]
# Add features representing the build host platform name.
# The build host could differ from the target platform for cross-compilation.
DEFAULT_FEATURES += [
Feature(name='buildhost={}'.format(sys.platform.lower().strip())),
# sys.platform can be represented by "sub-system" on Windows host, such as 'win32', 'cygwin', 'mingw' & etc.
# Here is a consolidated feature for the build host plaform name on Windows.
Feature(name='buildhost=windows', when=lambda cfg: platform.system().lower().startswith('windows'))
]
# Detect whether GDB is on the system, has Python scripting and supports
# adding breakpoint commands. If so add a substitution to access it.
def check_gdb(cfg):
gdb_path = shutil.which('gdb')
if gdb_path is None:
return False
# Check that we can set breakpoint commands, which was added in 8.3.
# Using the quit command here means that gdb itself exits, not just
# the "python <...>" command.
test_src = """\
try:
gdb.Breakpoint(\"main\").commands=\"foo\"
except AttributeError:
gdb.execute(\"quit 1\")
gdb.execute(\"quit\")"""
try:
stdout = subprocess.check_output(
[gdb_path, "-ex", "python " + test_src, "--batch"],
stderr=subprocess.DEVNULL, universal_newlines=True)
except subprocess.CalledProcessError:
# We can't set breakpoint commands
return False
# Check we actually ran the Python
return not "Python scripting is not supported" in stdout
DEFAULT_FEATURES += [
Feature(name='host-has-gdb-with-python',
when=check_gdb,
actions=[AddSubstitution('%{gdb}', lambda cfg: shutil.which('gdb'))]
)
]
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from DataUtility import get_column_names
class LiPolymerDataScaler:
"""
a special class to scale the lithium polymer database
"""
def __init__(self):
self.scaling_dict = {}
self.main_val_params = ["SMILES_wt", "wt_ratio", "inorg_contain_ratio"]
self.main_txt_params = ["structureList", "inorg_name"]
self.main_params = self.main_val_params+self.main_txt_params
self.target_param = "Conductivity"
def mutual_process(self, df):
"""
convert values (to log, etc)
"""
df["Conductivity"] = np.log10(df["Conductivity"].astype('float'))
df["Temperature"] = np.log10(df["Temperature"].astype('float')+273)
# fill Nan by zero
for c in self.main_params:
target_columns = get_column_names(df, c)
df[target_columns] = df[target_columns].fillna(0)
# convert molecular weight
self.mw_column_list = get_column_names(df, "MWList")
for c in self.mw_column_list:
df[c] = np.log10(df[c].astype('float'))
return df
def fit_transform(self, original_df):
"""
scaling data, etc
Parameters
----------------
original_df: dataframe
dataframe to be scaled
Returns
----------------
df: dataframe
scaled dataframe
"""
df = original_df.copy()
df = self.mutual_process(df)
# fill lacking Molecular weight with average value
self.average_mw = sum(df[self.mw_column_list].sum()) / \
sum(df[self.mw_column_list].count())
for c in self.mw_column_list:
df[c] = df[c].fillna(self.average_mw)
# scaling
for v in self.main_val_params + ["Conductivity", "Temperature"]+self.mw_column_list:
for c in get_column_names(df, v):
sc = StandardScaler()
df[c] = sc.fit_transform(
df[c].astype('float').values.reshape(-1, 1))
self.scaling_dict[c] = sc
# onehot encoding
for v in self.main_txt_params:
df = pd.get_dummies(df, columns=get_column_names(df, v))
self.use_columns = []
for c in ["Conductivity", "Temperature"]+self.main_params + self.mw_column_list+["fp_list"]:
self.use_columns.extend(get_column_names(df, c))
"""
**********************************************************
delete some columns for easiness of machine learning
following parameters can be useful for machine learning (10.1021/jacs.9b11442), but ignored in this project.
"""
for remove_targets in ["MWList", "wt_ratio", "inorg", "structure", "Temperature"]:
del_columns = get_column_names(df, remove_targets)
for i in del_columns:
self.use_columns.remove(i)
self.tr_df = df
return df
def transform(self, original_df):
"""
scaling data, etc
Parameters
----------------
original_df: dataframe
dataframe to be scaled
Returns
----------------
df: dataframe
scaled dataframe
"""
df = original_df.copy()
df = self.mutual_process(df)
for c in self.mw_column_list:
df[c] = df[c].fillna(self.average_mw)
# scaling
for v in self.main_val_params + ["Conductivity", "Temperature"]+self.mw_column_list:
for c in get_column_names(df, v):
df[c] = self.scaling_dict[c].transform(
df[c].astype('float').values.reshape(-1, 1))
# onehot encoding
for v in self.main_txt_params:
df = pd.get_dummies(df, columns=get_column_names(df, v))
# for lacking columns, add the most frequent vals
lacking_columns = set(self.use_columns)-set(df.columns)
for i in lacking_columns:
df[i] = self.tr_df[i].mode()
return df
|
def test_func(i):
print(i)
if i>10:
return
else:
test_func(i+1)
if __name__ == "__main__":
test_func(2)
|
"""
Format String 2D array
2d array for compositing term-formated strings
-autoexpanding vertically
-interesting get_item behavior (renders fmtstrs)
-caching behavior eventually
>>> a = FSArray(10, 14)
>>> a.shape
(10, 14)
>>> a[1] = 'i'
>>> a[3:4, :] = ['i' * 14]
>>> a[16:17, :] = ['j' * 14]
>>> a.shape, a[16, 0]
((17, 14), ['j'])
>>> a[200, 1] = ['i']
>>> a[200, 1]
['i']
"""
import sys
import logging
from .formatstring import fmtstr
from .formatstring import normalize_slice
from .formatstring import FmtStr
from typing import (
Any,
Union,
Text,
List,
Sequence,
overload,
Tuple,
cast,
no_type_check,
)
actualize = str
logger = logging.getLogger(__name__)
# TODO check that strings used in arrays don't have tabs or spaces in them!
def slicesize(s):
# type: (slice) -> int
return int((s.stop - s.start) / (s.step if s.step else 1))
def fsarray(strings, *args, **kwargs):
# type: (List[Union[FmtStr, Text]], *Any, **Any) -> FSArray
"""fsarray(list_of_FmtStrs_or_strings, width=None) -> FSArray
Returns a new FSArray of width of the maximum size of the provided
strings, or width provided, and height of the number of strings provided.
If a width is provided, raises a ValueError if any of the strings
are of length greater than this width"""
strings = list(strings)
if "width" in kwargs:
width = kwargs["width"]
del kwargs["width"]
if strings and any(len(s) > width for s in strings):
raise ValueError(f"Those strings won't fit for width {width}")
else:
width = max(len(s) for s in strings) if strings else 0
fstrings = [
s if isinstance(s, FmtStr) else fmtstr(s, *args, **kwargs) for s in strings
]
arr = FSArray(len(fstrings), width, *args, **kwargs)
rows = [
fs.setslice_with_length(0, len(s), s, width)
for fs, s in zip(arr.rows, fstrings)
]
arr.rows = rows
return arr
class FSArray(Sequence):
"""A 2D array of colored text.
Internally represented by a list of FmtStrs of identical size."""
# TODO add constructor that takes fmtstrs instead of dims
def __init__(self, num_rows, num_columns, *args, **kwargs):
# type: (int, int, *Any, **Any) -> None
self.saved_args, self.saved_kwargs = args, kwargs
self.rows = [
fmtstr("", *args, **kwargs) for _ in range(num_rows)
] # type: List[FmtStr]
self.num_columns = num_columns
@overload
def __getitem__(self, slicetuple):
# type: (int) -> FmtStr
pass
@overload
def __getitem__(self, slicetuple):
# type: (slice) -> List[FmtStr]
pass
@overload
def __getitem__(self, slicetuple):
# type: (Tuple[Union[slice, int], Union[slice, int]]) -> List[FmtStr]
pass
def __getitem__(self, slicetuple):
# type: (Union[int, slice, Tuple[Union[int, slice], Union[int, slice]]]) -> Union[FmtStr, List[FmtStr]]
if isinstance(slicetuple, int):
if slicetuple < 0:
slicetuple = len(self.rows) - slicetuple
if slicetuple < 0 or slicetuple >= len(self.rows):
raise IndexError("out of bounds")
return self.rows[slicetuple]
if isinstance(slicetuple, slice):
rowslice = normalize_slice(len(self.rows), slicetuple)
return self.rows[rowslice]
(
row_slice_or_int,
col_slice_or_int,
) = slicetuple # type: Tuple[Union[int, slice], Union[int, slice]]
rowslice = normalize_slice(len(self.rows), row_slice_or_int)
colslice = normalize_slice(self.num_columns, col_slice_or_int)
# TODO clean up slices
return [fs[colslice] for fs in self.rows[rowslice]]
def __len__(self):
# type: () -> int
return len(self.rows)
@property
def shape(self):
# type: () -> Tuple[int, int]
"""Tuple of (len(rows, len(num_columns)) numpy-style shape"""
return len(self.rows), self.num_columns
@property
def height(self):
# type: () -> int
"""The number of rows"""
return len(self.rows)
@property
def width(self):
# type: () -> int
"""The number of columns"""
return self.num_columns
# TODO rework this next major version bump
@no_type_check
def __setitem__(self, slicetuple, value):
"""Place a FSArray in a FSArray"""
logger.debug("slice: %r", slicetuple)
if isinstance(slicetuple, slice):
rowslice, colslice = slicetuple, slice(None)
if isinstance(value, (bytes, str)):
raise ValueError(
"if slice is 2D, value must be 2D as in of list type []"
)
elif isinstance(slicetuple, int):
normalize_slice(self.height, slicetuple)
self.rows[slicetuple] = value
return
else:
rowslice, colslice = slicetuple
# temp shim to allow numpy arrays as values
if value.__class__.__name__ == "ndarray":
value = [fmtstr("".join(line)) for line in value]
rowslice = normalize_slice(sys.maxsize, rowslice)
additional_rows = max(0, rowslice.stop - len(self.rows))
self.rows.extend(
[
fmtstr("", *self.saved_args, **self.saved_kwargs)
for _ in range(additional_rows)
]
)
logger.debug("num columns: %r", self.num_columns)
logger.debug("colslice: %r", colslice)
colslice = normalize_slice(self.num_columns, colslice)
if slicesize(colslice) == 0 or slicesize(rowslice) == 0:
return
if slicesize(colslice) > 1 and isinstance(value, str):
raise ValueError(
"""You cannot replace a multi column slice with a
string please use a list [] with strings for the
contents of each row"""
)
if slicesize(rowslice) != len(value):
area = slicesize(rowslice) * slicesize(colslice)
val_len = sum(len(i) for i in value)
grid_value = [fmtstr(" ", bg="cyan") * slicesize(colslice)] * slicesize(
rowslice
)
grid_fsarray = (
self.rows[: rowslice.start]
+ [
fs.setslice_with_length(
colslice.start, colslice.stop, v, self.num_columns
)
for fs, v in zip(self.rows[rowslice], grid_value)
]
+ self.rows[rowslice.stop :]
)
msg = "You are trying to fit this value {} into the region {}: {}".format(
fmtstr("".join(value), bg="cyan"),
fmtstr("").join(grid_value),
"\n ".join(grid_fsarray[x] for x in range(len(self.rows))),
)
raise ValueError(
"""Error you are trying to replace a region of {} rows by {}
columns for and area of {} with a value of len {}. The value
used to replace the region must equal the area of the region
replace.
{}""".format(
rowslice.stop - rowslice.start,
colslice.stop - colslice.start,
area,
val_len,
msg,
)
)
self.rows = (
self.rows[: rowslice.start]
+ [
fs.setslice_with_length(
colslice.start, colslice.stop, v, self.num_columns
)
for fs, v in zip(self.rows[rowslice], value)
]
+ self.rows[rowslice.stop :]
)
def dumb_display(self):
# type: () -> None
"""Prints each row followed by a newline without regard for the terminal window size"""
for line in self.rows:
print(line)
@classmethod
def diff(cls, a, b, ignore_formatting=False):
# type: (FSArray, FSArray, bool) -> Text
"""Returns two FSArrays with differences underlined"""
def underline(x):
# type: (Text) -> Text
return f"\x1b[4m{x}\x1b[0m"
def blink(x):
# type: (Text) -> Text
return f"\x1b[5m{x}\x1b[0m"
a_rows = []
b_rows = []
max_width = max([len(row) for row in a] + [len(row) for row in b])
a_lengths = []
b_lengths = []
for a_row, b_row in zip(a, b):
a_lengths.append(len(a_row))
b_lengths.append(len(b_row))
extra_a = "`" * (max_width - len(a_row))
extra_b = "`" * (max_width - len(b_row))
a_line = ""
b_line = ""
for a_char, b_char in zip(a_row + extra_a, b_row + extra_b):
if ignore_formatting:
a_char_for_eval = a_char.s if isinstance(a_char, FmtStr) else a_char
b_char_for_eval = b_char.s if isinstance(b_char, FmtStr) else b_char
else:
a_char_for_eval = a_char
b_char_for_eval = b_char
if a_char_for_eval == b_char_for_eval:
a_line += actualize(a_char)
b_line += actualize(b_char)
else:
a_line += underline(blink(actualize(a_char)))
b_line += underline(blink(actualize(b_char)))
a_rows.append(a_line)
b_rows.append(b_line)
hdiff = "\n".join(
a_line + " %3d | %3d " % (a_len, b_len) + b_line
for a_line, b_line, a_len, b_len in zip(
a_rows, b_rows, a_lengths, b_lengths
)
)
return hdiff
def simple_format(x):
# type: (Union[FSArray, List[FmtStr]]) -> Text
return "\n".join(actualize(l) for l in x)
if __name__ == "__main__":
a = FSArray(3, 14, bg="blue")
a[0:2, 5:11] = cast(
Tuple[FmtStr, ...],
(fmtstr("hey", "on_blue") + " " + fmtstr("yo", "on_red"), fmtstr("qwe qw")),
)
a.dumb_display()
a = fsarray(["hey", "there"], bg="cyan")
a.dumb_display()
print(FSArray.diff(a, fsarray(["hey", "there "]), ignore_formatting=True))
|
import heapq
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
heap = []
root = res = ListNode(None)
for i in range(len(lists)):
heapq.heappush(heap, (lists[i].val, i, lists[i]))
print(heap)
while heap:
m = heapq.heappop(heap)
idx = m[1]
res.next = m[2]
res = res.next
if res.next:
heapq.heappush(heap, (res.next.val, idx, res.next))
return root.next
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Union
import numpy as np
import torch
from monai.metrics.utils import *
from monai.utils import MetricReduction
class SurfaceDistanceMetric:
"""
Compute Surface Distance between two tensors. It can support both multi-classes and multi-labels tasks.
It supports both symmetric and asymmetric surface distance calculation.
Input `y_pred` (BNHW[D] where N is number of classes) is compared with ground truth `y` (BNHW[D]).
`y_preds` is expected to have binarized predictions and `y` should be in one-hot format.
You can use suitable transforms in ``monai.transforms.post`` first to achieve binarized values.
Args:
include_background: whether to skip distance computation on the first channel of
the predicted output. Defaults to ``False``.
symmetric: whether to calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
"""
def __init__(
self,
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
) -> None:
super().__init__()
self.include_background = include_background
self.distance_metric = distance_metric
self.symmetric = symmetric
self.reduction = reduction
def __call__(self, y_pred: torch.Tensor, y: torch.Tensor):
"""
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute the distance. It must be one-hot format and first dim is batch.
The values should be binarized.
Raises:
ValueError: when `y` is not a binarized tensor.
ValueError: when `y_pred` has less than three dimensions.
"""
if not torch.all(y_pred.byte() == y_pred):
warnings.warn("y_pred is not a binarized tensor here!")
if not torch.all(y.byte() == y):
raise ValueError("y should be a binarized tensor.")
dims = y_pred.ndimension()
if dims < 3:
raise ValueError("y_pred should have at least three dimensions.")
# compute (BxC) for each channel for each batch
f = compute_average_surface_distance(
y_pred=y_pred,
y=y,
include_background=self.include_background,
symmetric=self.symmetric,
distance_metric=self.distance_metric,
)
# do metric reduction
f, not_nans = do_metric_reduction(f, self.reduction)
return f, not_nans
def compute_average_surface_distance(
y_pred: Union[np.ndarray, torch.Tensor],
y: Union[np.ndarray, torch.Tensor],
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
):
"""
This function is used to compute the Average Surface Distance from `y_pred` to `y`
under the default setting.
In addition, if sets ``symmetric = True``, the average symmetric surface distance between
these two inputs will be returned.
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute mean the distance. It must be one-hot format and first dim is batch.
The values should be binarized.
include_background: whether to skip distance computation on the first channel of
the predicted output. Defaults to ``False``.
symmetric: whether to calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
"""
if not include_background:
y_pred, y = ignore_background(
y_pred=y_pred,
y=y,
)
y = y.float()
y_pred = y_pred.float()
if y.shape != y_pred.shape:
raise ValueError("y_pred and y should have same shapes.")
batch_size, n_class = y_pred.shape[:2]
asd = np.empty((batch_size, n_class))
for b, c in np.ndindex(batch_size, n_class):
(edges_pred, edges_gt) = get_mask_edges(y_pred[b, c], y[b, c])
surface_distance = get_surface_distance(edges_pred, edges_gt, distance_metric=distance_metric)
if surface_distance.shape == (0,):
avg_surface_distance = np.nan
else:
avg_surface_distance = surface_distance.mean()
if not symmetric:
asd[b, c] = avg_surface_distance
else:
surface_distance_2 = get_surface_distance(edges_gt, edges_pred, distance_metric=distance_metric)
if surface_distance_2.shape == (0,):
avg_surface_distance_2 = np.nan
else:
avg_surface_distance_2 = surface_distance_2.mean()
asd[b, c] = np.mean((avg_surface_distance, avg_surface_distance_2))
return torch.from_numpy(asd)
|
# -*- coding: utf-
"""
wsgi.py
:Created: 12 Jun 2014
:Author: tim
"""
from spyne.server.wsgi import WsgiApplication as _SpyneWsgiApplication
from spyne_smev.server import _AllYourInterfaceDocuments
class WsgiApplication(_SpyneWsgiApplication):
def __init__(self, app, chunked=True, max_content_length=2 * 1024 * 1024,
block_length=8 * 1024):
super(WsgiApplication, self).__init__(app, chunked, max_content_length,
block_length)
self.doc = _AllYourInterfaceDocuments(app.interface)
|
class Node:
def __init__(self, next: int):
self.next = next
self.up = False
def MakeNodes(data: str):
values = [int(ch) - 1 for ch in data]
nodes = []
for value in range(len(values)):
index = values.index(value)
next = values[(index + 1) % len(values)]
nodes.append(Node(next))
return nodes, values[0]
def MakeNodes2(data: str):
nodes, current = MakeNodes(data)
next = nodes[current].next
for _ in range(len(nodes) - 2):
next = nodes[next].next
nodes[next].next = len(nodes)
for value in range(len(nodes), 1_000_000):
nodes.append(Node(value + 1))
nodes[999_999].next = current
return nodes, current
def Turn(current: int, nodes):
up = nodes[current].next
firstUp = up
for _ in range(3):
nodes[up].up = True
lastUp = up
up = nodes[up].next
destination = (current - 1) % len(nodes)
while nodes[destination].up:
destination = (destination - 1) % len(nodes)
nodes[current].next = nodes[lastUp].next
nodes[lastUp].next = nodes[destination].next
nodes[destination].next = firstUp
up = firstUp
for _ in range(3):
nodes[up].up = False
up = nodes[up].next
return nodes[current].next
def PrintNodes(current: int, nodes):
print(f"({current + 1})", end='')
index = nodes[current].next
for _ in range(min(len(nodes) - 1, 20)):
print(f" {index + 1}", end='')
index = nodes[index].next
print()
def Answer(nodes):
answer = ''
node = nodes[0].next
for _ in range(len(nodes) - 1):
answer += str(node + 1)
node = nodes[node].next
return answer
def Answer2(nodes):
cup1 = nodes[0].next
cup2 = nodes[cup1].next
return (cup1 + 1) * (cup2 + 1)
TEST = "389125467"
DATA = "487912365"
testNodes, current = MakeNodes(TEST)
for _ in range(100):
current = Turn(current, testNodes)
assert Answer(testNodes) == '67384529'
nodes, current = MakeNodes(DATA)
for _ in range(100):
current = Turn(current, nodes)
print(Answer(nodes))
assert Answer(nodes) == '89573246'
testNodes, current = MakeNodes2(TEST)
for _ in range(10_000_000):
current = Turn(current, testNodes)
assert Answer2(testNodes) == 149245887792
nodes, current = MakeNodes2(DATA)
for _ in range(10_000_000):
current = Turn(current, nodes)
print(Answer2(nodes))
assert Answer2(nodes == 2029056128)
|
# coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import compat_str
from ..utils import int_or_none, str_or_none, try_get
from .common import InfoExtractor
class PalcoMP3BaseIE(InfoExtractor):
_GQL_QUERY_TMPL = """{
artist(slug: "%s") {
%s
}
}"""
_ARTIST_FIELDS_TMPL = """music(slug: "%%s") {
%s
}"""
_MUSIC_FIELDS = """duration
hls
mp3File
musicID
plays
title"""
def _call_api(self, artist_slug, artist_fields):
return self._download_json(
"https://www.palcomp3.com.br/graphql/",
artist_slug,
query={
"query": self._GQL_QUERY_TMPL % (artist_slug, artist_fields),
},
)["data"]
def _parse_music(self, music):
music_id = compat_str(music["musicID"])
title = music["title"]
formats = []
hls_url = music.get("hls")
if hls_url:
formats.append(
{
"url": hls_url,
"protocol": "m3u8_native",
"ext": "mp4",
}
)
mp3_file = music.get("mp3File")
if mp3_file:
formats.append(
{
"url": mp3_file,
}
)
return {
"id": music_id,
"title": title,
"formats": formats,
"duration": int_or_none(music.get("duration")),
"view_count": int_or_none(music.get("plays")),
}
def _real_initialize(self):
self._ARTIST_FIELDS_TMPL = self._ARTIST_FIELDS_TMPL % self._MUSIC_FIELDS
def _real_extract(self, url):
artist_slug, music_slug = re.match(self._VALID_URL, url).groups()
artist_fields = self._ARTIST_FIELDS_TMPL % music_slug
music = self._call_api(artist_slug, artist_fields)["artist"]["music"]
return self._parse_music(music)
class PalcoMP3IE(PalcoMP3BaseIE):
IE_NAME = "PalcoMP3:song"
_VALID_URL = (
r"https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)"
)
_TESTS = [
{
"url": "https://www.palcomp3.com/maiaraemaraisaoficial/nossas-composicoes-cuida-bem-dela/",
"md5": "99fd6405b2d8fd589670f6db1ba3b358",
"info_dict": {
"id": "3162927",
"ext": "mp3",
"title": "Nossas Composições - CUIDA BEM DELA",
"duration": 210,
"view_count": int,
},
}
]
@classmethod
def suitable(cls, url):
return (
False
if PalcoMP3VideoIE.suitable(url)
else super(PalcoMP3IE, cls).suitable(url)
)
class PalcoMP3ArtistIE(PalcoMP3BaseIE):
IE_NAME = "PalcoMP3:artist"
_VALID_URL = r"https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<id>[^/?&#]+)"
_TESTS = [
{
"url": "https://www.palcomp3.com.br/condedoforro/",
"info_dict": {
"id": "358396",
"title": "Conde do Forró",
},
"playlist_mincount": 188,
}
]
_ARTIST_FIELDS_TMPL = """artistID
musics {
nodes {
%s
}
}
name"""
@classmethod
def suitable(cls, url):
return (
False
if re.match(PalcoMP3IE._VALID_URL, url)
else super(PalcoMP3ArtistIE, cls).suitable(url)
)
def _real_extract(self, url):
artist_slug = self._match_id(url)
artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)["artist"]
def entries():
for music in try_get(artist, lambda x: x["musics"]["nodes"], list) or []:
yield self._parse_music(music)
return self.playlist_result(
entries(), str_or_none(artist.get("artistID")), artist.get("name")
)
class PalcoMP3VideoIE(PalcoMP3BaseIE):
IE_NAME = "PalcoMP3:video"
_VALID_URL = r"https?://(?:www\.)?palcomp3\.com(?:\.br)?/(?P<artist>[^/]+)/(?P<id>[^/?&#]+)/?#clipe"
_TESTS = [
{
"url": "https://www.palcomp3.com/maiaraemaraisaoficial/maiara-e-maraisa-voce-faz-falta-aqui-ao-vivo-em-vicosa-mg/#clipe",
"add_ie": ["Youtube"],
"info_dict": {
"id": "_pD1nR2qqPg",
"ext": "mp4",
"title": "Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande",
"description": "md5:7043342c09a224598e93546e98e49282",
"upload_date": "20161107",
"uploader_id": "maiaramaraisaoficial",
"uploader": "Maiara e Maraisa",
},
}
]
_MUSIC_FIELDS = "youtubeID"
def _parse_music(self, music):
youtube_id = music["youtubeID"]
return self.url_result(youtube_id, "Youtube", youtube_id)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data and labels file for various datasets.
"""
import json
import logging
import os
from typing import List
import numpy as np
from fvcore.common.file_io import PathManager
from vissl.data.datasets import get_coco_imgs_labels_info, get_voc_images_labels_info
from vissl.utils.misc import get_json_data_catalog_file
from vissl.utils.slurm import get_slurm_dir
class VisslDatasetCatalog(object):
"""
A catalog that stores information about the datasets and how to obtain them.
It contains a mapping from strings (which are names that identify a dataset,
e.g. "imagenet1k") to a `dict` which contains:
1) mapping of various data splits (train, test, val) to the data source
(path on the disk whether a folder path or a filelist)
2) source of the data (disk_filelist | disk_folder)
The purpose of having this catalog is to make it easy to choose different datasets,
by just using the strings in the config.
"""
__REGISTERED_DATASETS = {}
@staticmethod
def register_json(json_catalog_path):
"""
Args:
filepath: a .json filepath that contains the data to be registered
"""
with PathManager.open(json_catalog_path) as fopen:
data_catalog = json.load(fopen)
for key, value in data_catalog.items():
VisslDatasetCatalog.register_data(key, value)
@staticmethod
def register_dict(dict_catalog):
"""
Args:
dict: a dict with a bunch of datasets to be registered
"""
for key, value in dict_catalog.items():
VisslDatasetCatalog.register_data(key, value)
@staticmethod
def register_data(name, data_dict):
"""
Args:
name (str): the name that identifies a dataset, e.g. "imagenet1k_folder".
func (callable): a callable which takes no arguments and returns a list of dicts.
It must return the same results if called multiple times.
"""
assert isinstance(
data_dict, dict
), "You must register a dictionary with VisslDatasetCatalog.register_dict"
assert (
name not in VisslDatasetCatalog.__REGISTERED_DATASETS
), "Dataset '{}' is already registered!".format(name)
VisslDatasetCatalog.__REGISTERED_DATASETS[name] = data_dict
@staticmethod
def get(name):
"""
Get the registered dict and return it.
Args:
name (str): the name that identifies a dataset, e.g. "imagenet1k".
Returns:
dict: dataset information (paths, source)
"""
try:
info = VisslDatasetCatalog.__REGISTERED_DATASETS[name]
except KeyError:
raise KeyError(
"Dataset '{}' is not registered! Available datasets are: {}".format(
name, ", ".join(VisslDatasetCatalog.__REGISTERED_DATASETS.keys())
)
)
return info
@staticmethod
def list() -> List[str]:
"""
List all registered datasets.
Returns:
list[str]
"""
return list(VisslDatasetCatalog.__REGISTERED_DATASETS.keys())
@staticmethod
def clear():
"""
Remove all registered dataset.
"""
VisslDatasetCatalog.__REGISTERED_DATASETS.clear()
@staticmethod
def remove(name):
"""
Remove the dataset registered by ``name``.
"""
VisslDatasetCatalog.__REGISTERED_DATASETS.pop(name)
@staticmethod
def has_data(name):
"""
Check whether the data with ``name`` exists.
"""
data_found = name in VisslDatasetCatalog.__REGISTERED_DATASETS
return data_found
def get_local_path(input_file, dest_dir):
"""
If user specified copying data to a local directory,
get the local path where the data files were copied.
- If input_file is just a file, we return the dest_dir/filename
- If the intput_file is a directory, then we check if the
environemt is SLURM and use slurm_dir or otherwise dest_dir
to look up copy_complete file is available.
If available, we return the directory.
- If both above fail, we return the input_file as is.
"""
out = ""
if PathManager.isfile(input_file):
out = os.path.join(dest_dir, os.path.basename(input_file))
elif PathManager.isdir(input_file):
data_name = input_file.strip("/").split("/")[-1]
if "SLURM_JOBID" in os.environ:
dest_dir = get_slurm_dir(dest_dir)
dest_dir = os.path.join(dest_dir, data_name)
complete_flag = os.path.join(dest_dir, "copy_complete")
if PathManager.isfile(complete_flag):
out = dest_dir
if PathManager.exists(out):
return out
else:
return input_file
def get_local_output_filepaths(input_files, dest_dir):
"""
If we have copied the files to local disk as specified in the config, we
return those local paths. Otherwise return the original paths.
"""
output_files = []
for item in input_files:
if isinstance(item, list):
out = get_local_output_filepaths(item, dest_dir)
else:
out = get_local_path(item, dest_dir)
output_files.append(out)
return output_files
def check_data_exists(data_files):
"""
Check that the input data files exist. If the data_files is a list,
we iteratively check for each file in the list.
"""
if isinstance(data_files, list):
return np.all([PathManager.exists(item) for item in data_files])
else:
return PathManager.exists(data_files)
def register_pascal_voc():
"""
Register PASCAL VOC 2007 and 2012 datasets to the data catalog.
We first look up for these datasets paths in the dataset catalog,
if the paths exist, we register, otherwise we remove the voc_data
from the catalog registry.
"""
voc_datasets = ["voc2007_folder", "voc2012_folder"]
for voc_data in voc_datasets:
data_info = VisslDatasetCatalog.get(voc_data)
data_folder = data_info["train"][0]
if PathManager.exists(data_folder):
train_data_info = get_voc_images_labels_info("train", data_folder)
test_data_info = get_voc_images_labels_info("val", data_folder)
data_info["train"] = train_data_info
data_info["val"] = test_data_info
VisslDatasetCatalog.remove(voc_data)
VisslDatasetCatalog.register_data(voc_data, data_info)
else:
VisslDatasetCatalog.remove(voc_data)
def register_coco():
"""
Register COCO 2004 datasets to the data catalog.
We first look up for these datasets paths in the dataset catalog,
if the paths exist, we register, otherwise we remove the
coco2014_folder from the catalog registry.
"""
data_info = VisslDatasetCatalog.get("coco2014_folder")
data_folder = data_info["train"][0]
if PathManager.exists(data_folder):
train_data_info = get_coco_imgs_labels_info("train", data_folder)
test_data_info = get_coco_imgs_labels_info("val", data_folder)
data_info["train"] = train_data_info
data_info["val"] = test_data_info
VisslDatasetCatalog.remove("coco2014_folder")
VisslDatasetCatalog.register_data("coco2014_folder", data_info)
else:
VisslDatasetCatalog.remove("coco2014_folder")
def register_datasets(json_catalog_path):
"""
If the json dataset_catalog file is found, we register
the datasets specified in the catalog with VISSL.
If the catalog also specified VOC or coco datasets, we resister them
Args:
json_catalog_path (str): the path to the json dataset catalog
"""
if PathManager.exists(json_catalog_path):
logging.info(f"Registering datasets: {json_catalog_path}")
VisslDatasetCatalog.clear()
VisslDatasetCatalog.register_json(json_catalog_path)
if VisslDatasetCatalog.has_data("voc2007_folder") or VisslDatasetCatalog.has_data(
"voc2012_folder"
):
register_pascal_voc()
if VisslDatasetCatalog.has_data("coco2014_folder"):
register_coco()
def get_data_files(split, dataset_config):
"""
Get the path to the dataset (images and labels).
1. If the user has explicitly specified the data_sources, we simply
use those and don't do lookup in the datasets registered with VISSL
from the dataset catalog.
2. If the user hasn't specified the path, look for the dataset in
the datasets catalog registered with VISSL. For a given list of datasets
and a given partition (train/test), we first verify that we have the
dataset and the correct source as specified by the user.
Then for each dataset in the list, we get the data path (make sure it
exists, sources match). For the label file, the file is optional.
Once we have the dataset original paths, we replace the path with the local paths
if the data was copied to local disk.
"""
assert len(dataset_config[split].DATASET_NAMES) == len(
dataset_config[split].DATA_SOURCES
), "len(data_sources) != len(dataset_names)"
if len(dataset_config[split].DATA_PATHS) > 0:
assert len(dataset_config[split].DATA_SOURCES) == len(
dataset_config[split].DATA_PATHS
), "len(data_sources) != len(data_paths)"
data_files, label_files = [], []
data_names = dataset_config[split].DATASET_NAMES
data_sources = dataset_config[split].DATA_SOURCES
data_split = "train" if split == "TRAIN" else "val"
for idx in range(len(data_sources)):
# if there are synthetic data sources, we set the filepaths as none
if data_sources[idx] == "synthetic":
data_files.append("")
continue
# if user has specified the data path explicitly, we use it
elif len(dataset_config[split].DATA_PATHS) > 0:
data_files.append(dataset_config[split].DATA_PATHS[idx])
# otherwise retrieve from the cataloag based on the dataset name
else:
data_info = VisslDatasetCatalog.get(data_names[idx])
assert (
len(data_info[data_split]) > 0
), f"data paths list for split: { data_split } is empty"
check_data_exists(
data_info[data_split][0]
), f"Some data files dont exist: {data_info[data_split][0]}"
data_files.append(data_info[data_split][0])
# labels are optional and hence we append if we find them
if len(dataset_config[split].LABEL_PATHS) > 0:
if check_data_exists(dataset_config[split].LABEL_PATHS[idx]):
label_files.append(dataset_config[split].LABEL_PATHS[idx])
else:
label_data_info = VisslDatasetCatalog.get(data_names[idx])
if check_data_exists(label_data_info[data_split][1]):
label_files.append(label_data_info[data_split][1])
output = [data_files, label_files]
if dataset_config[split].COPY_TO_LOCAL_DISK:
dest_dir = dataset_config[split]["COPY_DESTINATION_DIR"]
local_data_files = get_local_output_filepaths(data_files, dest_dir)
local_label_files = get_local_output_filepaths(label_files, dest_dir)
output = [local_data_files, local_label_files]
return output
# get the path to dataset_catalog.json file
json_catalog_file = get_json_data_catalog_file()
# register the datasets specified in the catalog with VISSL
register_datasets(json_catalog_file)
|
'''
Created on Nov 20, 2019
@author: Melody Griesen
'''
if __name__ == '__main__':
pass
|
import sys
import os
from datetime import datetime
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.types import (StructType, StructField as Fld, DoubleType as Dbl,
IntegerType as Int, DateType as Date,
BooleanType as Boolean, FloatType as Float,
LongType as Long, StringType as String,
ArrayType as Array)
from pyspark.sql.functions import (col, year, month, dayofmonth, weekofyear, quarter,
explode, from_json)
def create_spark_session(aws_key, aws_secret_key):
"""
Description: Creates spark session.
Returns:
spark session object
"""
spark = SparkSession \
.builder \
.config("spark.executor.heartbeatInterval", "40s") \
.getOrCreate()
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.impl",
"org.apache.hadoop.fs.s3a.S3AFileSystem")
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.access.key", aws_key)
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.secret.key", aws_secret_key)
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.endpoint", "s3.amazonaws.com")
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.connection.timeout", "100")
spark.sparkContext._jsc.hadoopConfiguration().set("fs.s3a.connection.maximum", "5000")
spark.conf.set("spark.sql.shuffle.partitions", 4)
return spark
def format_datetime(ts):
return datetime.fromtimestamp(ts/1000.0)
if __name__ == "__main__":
s3_bucket = sys.argv[1]
s3_key = sys.argv[2]
aws_key = sys.argv[3]
aws_secret_key = sys.argv[4]
redshift_conn_string = sys.argv[5]
db_user = sys.argv[6]
db_pass = sys.argv[7]
spark = create_spark_session(aws_key, aws_secret_key)
movies_schema = StructType([
Fld("adult", String()),
Fld("belongs_to_collection", Long()),
Fld("budget", Long()),
Fld("genres", String()),
Fld("homepage", String()),
Fld("id", Int()),
Fld("imdb_id", String()),
Fld("original_language", String()),
Fld("original_title", String()),
Fld("overview", String()),
Fld("popularity", Dbl()),
Fld("poster_path", String()),
Fld("production_company", String()),
Fld("production_country", String()),
Fld("release_date", Date()),
Fld("revenue", Long()),
Fld("runtime", Float()),
Fld("spoken_languages", String()),
Fld("status", String()),
Fld("tagline", String()),
Fld("title", String()),
Fld("video", Boolean()),
Fld("vote_average", Float()),
Fld("vote_count", Int())
])
movies_df = spark.read.option("header", "true") \
.csv("s3a://{}/{}/movies_metadata.csv".format(s3_bucket, s3_key),
schema=movies_schema)
genre_schema = Array(StructType([Fld("id", Int()), Fld("name", String())]))
movies_df = movies_df.withColumn("genres", explode(from_json("genres", genre_schema))) \
.withColumn("genre_id", col("genres.id")) \
.withColumn("genre_name", col("genres.name")) \
movie_genre = movies_df.select("id", "genre_id").distinct()
movie_genre = movie_genre.select(col("id").alias("movie_id"), col("genre_id"))
genre = movies_df.select("genre_id", "genre_name").distinct()
genre = genre.na.drop()
# Load data into staging:
genre.write \
.format("jdbc") \
.option("url", redshift_conn_string) \
.option("dbtable", "movies.stage_genre") \
.option("user", sys.argv[6]) \
.option("password", sys.argv[7]) \
.option("driver", "com.amazon.redshift.jdbc42.Driver") \
.mode("append") \
.save()
movie_genre.write \
.format("jdbc") \
.option("url", redshift_conn_string) \
.option("dbtable", "movies.stage_movie_genre") \
.option("user", sys.argv[6]) \
.option("password", sys.argv[7]) \
.option("driver", "com.amazon.redshift.jdbc42.Driver") \
.mode("append") \
.save()
|
#!/usr/bin/env python3
"""
Generates Pulr "pull" config section from JSON, created with fetch-tags.py
"""
import sys
import argparse
from textwrap import dedent
try:
import rapidjson as json
except:
import json
import yaml
DEFAULT_FREQ = 1
DEFAULT_PATH = '1,0'
DEFAULT_CPU = 'LGX'
DEFAULT_TIMEOUT = 2
def generate(tag_list,
tag_file=None,
tag_data=None,
exclude=None,
config=None,
id_prefix='',
id_suffix='',
print_stats=False,
print_config=False):
def find_tag_in_struct(tag, data):
if '.' in tag:
tag_to_find, rest = tag.split('.', 1)
else:
tag_to_find = tag
rest = None
t = data[tag_to_find]
if rest is None:
return t
else:
if t['tag_type'] != 'struct':
raise ValueError(f'{tag_to_find} is not a struct!')
return find_tag_in_struct(
rest,
t['data_type']['internal_tags'],
)
def find_tag(tag, data):
if '.' in tag:
tag_to_find, rest = tag.split('.', 1)
else:
tag_to_find = tag
rest = None
for t in data:
if t['tag_name'] == tag_to_find:
if rest is None:
return t
else:
if t['tag_type'] != 'struct':
raise ValueError(f'{tag_to_find} is not a struct!')
else:
return find_tag_in_struct(
rest, t['data_type']['internal_tags'])
if tag_data is None:
if tag_file:
with open(tag_file) as fh:
tags = json.loads(fh.read())
else:
tags = json.loads(sys.stdin.read())
else:
tags = tag_data
DATA_TYPES = {
'BOOL': 'uint8',
'BYTE': 'byte',
'WORD': 'word',
'DWORD': 'dword',
'LWORD': 'qword',
'SINT': 'sint8',
'USINT': 'uint8',
'INT': 'sint16',
'UINT': 'uint16',
'DINT': 'sint32',
'UDINT': 'uint32',
'LINT': 'sint64',
'ULINT': 'uint64',
'REAL': 'real32',
'LREAL': 'real64'
}
DATA_TYPE_SIZE = {
'BOOL': 1,
'BYTE': 1,
'WORD': 2,
'DWORD': 4,
'LWORD': 8,
'SINT': 1,
'USINT': 1,
'INT': 2,
'UINT': 2,
'DINT': 4,
'UDINT': 4,
'LINT': 8,
'ULINT': 8,
'REAL': 4,
'LREAL': 8
}
def gen_offset(o1, o2, int_if_possible=False):
if o1:
o = f'{o1}+{o2}'
else:
o = o2 if int_if_possible else f'{o2}'
return o
def add_tag_info(tag_name, tag_data, coll, offset=0, base_offset=0):
nonlocal tags_count
if exclude:
for x in exclude:
if x.startswith('*'):
if tag_name.endswith(x[1:]):
return
elif x.endswith('*'):
if tag_name.startswith(x[:-1]):
return
else:
if tag_name == x:
return
arr = tag_data.get('array', 0)
if arr:
for aofs in range(0, arr):
tags_count += 1
coll.append({
'offset':
gen_offset(base_offset,
offset +
aofs * DATA_TYPE_SIZE[tag_data['data_type']],
int_if_possible=True),
'set-id':
f'{id_prefix}{tag_name}{id_suffix}[{aofs}]',
'type':
DATA_TYPES[tag_data['data_type']]
})
else:
tags_count += 1
coll.append({
'offset': gen_offset(base_offset, offset, int_if_possible=True),
'set-id': f'{id_prefix}{tag_name}{id_suffix}',
'type': DATA_TYPES[tag_data['data_type']]
})
tags_count = 0
pulls = []
def parse_offset(offset):
if isinstance(offset, int):
return offset
elif '+' in offset:
o = offset.split('+')
result = 0
for i in o:
o += int(i)
return result
else:
return int(offset)
def gen_process(data, offset, tag_name, result=[]):
for tag, d in data.items():
if d['tag_type'] == 'struct':
if d['array'] == 0:
gen_process(d['data_type']['internal_tags'],
gen_offset(offset, d['offset']),
tag_name + '.' + tag, result)
else:
for aofs in range(0, d['array']):
gen_process(
d['data_type']['internal_tags'],
gen_offset(
parse_offset(offset) + aofs *
d['data_type']['template']['structure_size'],
d['offset']), f'{tag_name}.{tag}[{aofs}]',
result)
else:
add_tag_info(f'{tag_name}.{tag}',
d,
result,
offset=d['offset'],
base_offset=offset)
return result
for TAG in tag_list:
data = find_tag(TAG, tags)
if data is None:
raise ValueError(f'Tag not found: {TAG}')
if data['tag_type'] == 'struct':
pulls.append({
'1tag':
TAG,
'process':
gen_process(data['data_type']['internal_tags'], 0, TAG, [])
})
else:
result = []
add_tag_info(TAG, data, result)
pulls.append({'1tag': TAG, 'process': result})
CFG = ''
if config:
CFG += dedent(f"""
version: 2
timeout: {config.get("timeout", DEFAULT_TIMEOUT)}
freq: {config.get("freq", DEFAULT_FREQ)}
proto:
name: enip/ab_eip
source: {config["source"]}
path: {config.get("path", DEFAULT_PATH)}
cpu: {config.get("cpu", DEFAULT_CPU)}
""").lstrip()
CFG += yaml.dump(dict(pull=pulls),
default_flow_style=False).replace('\n- 1tag', '\n- tag')
if print_config:
print(CFG)
if print_stats:
print(f'{tags_count} tag(s) generated', file=sys.stderr)
return CFG
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('tag',
metavar='TAG',
help='Tags to parse (comma separated)')
ap.add_argument('-F',
'--tag_file',
metavar='FILE',
help='JSON tags file (default: stdin)')
ap.add_argument('-s',
'--source',
metavar='ADDR',
help='PLC IP[:port] (full config is generated is defined')
ap.add_argument(
'-x',
'--exclude',
metavar='TAGS',
help='Tags to exclude (comma separated, star masks possible)')
ap.add_argument('-f',
'--freq',
metavar='HERZ',
help='Pull frequency',
default=DEFAULT_FREQ,
type=int)
ap.add_argument('--path',
metavar='PATH',
help='PLC path',
default=DEFAULT_PATH)
ap.add_argument('--cpu', metavar='CPU', help='CPU', default=DEFAULT_CPU)
ap.add_argument('--timeout',
metavar='SEC',
help='PLC TIMEOUT',
type=float,
default=DEFAULT_TIMEOUT)
ap.add_argument('--id-prefix',
metavar='VALUE',
help='ID prefix',
default='')
ap.add_argument('--id-suffix',
metavar='VALUE',
help='ID suffix',
default='')
a = ap.parse_args()
if a.source:
config = dict(source=a.source,
freq=a.freq,
path=a.path,
cpu=a.cpu,
timeout=a.timeout)
else:
config = None
generate(tag_file=a.tag_file,
tag_list=a.tag.split(','),
config=config,
exclude=a.exclude.split(',') if a.exclude else None,
id_prefix=a.id_prefix,
id_suffix=a.id_suffix,
print_stats=True,
print_config=True)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
import sys
import os.path
import re
from pprint import pprint
from subprocess import Popen, PIPE
readme = open('README.md', 'w')
readme.write("# Free Hack Quest 2016\n")
def getListOfDirsWithTasks():
result = []
dirs = os.listdir('./');
for d in dirs:
print(d);
if os.path.isdir(d):
subdirs = os.listdir('./' + d)
subdirs.sort()
for sd in subdirs:
path = './' + d + '/' + sd
if os.path.isdir(path):
if os.path.isfile(path + '/main.json'):
result.append(path)
print("Found: " + path);
return result
dirs = getListOfDirsWithTasks();
dirs.sort()
game_name = 'Free Hack Quest 2016'
stat_tasks = []
table_tasks = []
errors = {}
def append_errors(path, text):
if path not in errors:
errors[path] = []
errors[path].append(text)
possible_categories = ["admin", "web", "pwn", "crypto", "forensic", "misc", "ppc", "recon", "reverse", "stego"]
def detectEncoding(path):
p = Popen(['file', '-i', path], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
pattern = re.compile('.*charset=(.*).*')
m = pattern.match(output)
if m:
return m.group(1)
return 'unknown'
def parseAuthor(path):
author = ''
with open(path) as f:
content = ''.join(f.readlines())
content = content.replace('\r', '')
content = content.replace('\n', '')
content = content.replace('\t', '')
pattern = re.compile('.*"nick"[ ]*\:[ ]*"([A-Z-a-z@!._]*)".*')
m = pattern.match(content)
if m:
author = m.group(1)
contacts = []
pattern = re.compile('.*"contacts"[ ]*\:[ ]*\[[ ]*"([A-Z-a-z@/!._]*)"[ ]*,[ ]*"([A-Z-a-z@/!._]*)".*')
m = pattern.match(content)
if m:
contacts.append(m.group(1));
contacts.append(m.group(2));
return author + '(' + ', '.join(contacts) + ')'
def appendStatCat(category, value):
for cat in stat_tasks:
if cat['category'] == category:
cat['count'] = cat['count'] + 1
cat['value'] = cat['value'] + value
return
stat_tasks.append({'category': category, 'count': 1, 'value': value})
def checkWriteUpFile(folder):
path = folder + '/WRITEUP.md'
if not os.path.isfile(path):
append_errors(folder, 'Missing file WRITEUP.md')
def getCategoryFromTask(data, folder):
category = 'unknown'
if 'category' not in data:
append_errors(folder, 'main.json: Missing field "category"')
else:
category = data['category']
if category not in possible_categories:
append_errors(folder, 'main.json: Field "category" has wrong value')
return category;
def getStatusFromTask(data, folder):
status = 'need verify'
if 'status' not in data:
append_errors(folder, 'main.json: Missing field "status"')
else:
status = data['status']
return status;
def getValueFromTask(data, folder):
value = 0
if 'value' not in data:
append_errors(folder, 'main.json: Missing field "value"')
else:
value = data['value']
if value == 0:
append_errors(folder, 'main.json: Task has 0 value')
return value
def getDescriptionFromTask(data, folder):
description = {"RU" : "", "EN": ""}
if 'name' not in data:
append_errors(folder, 'main.json: Missing field "name"')
else:
description = data['description']
if 'RU' not in description:
append_errors(folder, 'main.json: Missing subfield description "RU"')
else:
if description["RU"] == "":
append_errors(folder, 'main.json: Empty field in description "RU"')
if 'EN' not in description:
append_errors(folder, 'main.json: Missing subfield description "EN"')
else:
if description["EN"] == "":
append_errors(folder, 'main.json: Empty field in description "EN"')
return description
def getAuthorsFromTask(data, path):
authors = []
if 'authors' not in data:
append_errors(path, 'main.json: Missing field "authors"')
else:
if not isinstance(data['authors'], list):
append_errors(path, 'main.json: Field "authors" must be list')
else:
authors_ = data['authors']
for author in authors_:
name = ""
team = ""
contacts = []
if "name" not in author:
append_errors(path, 'main.json: Missing subfield author "name"')
else:
name = author["name"]
if name == "":
append_errors(path, 'main.json: Subfield author "name" is empty')
if "team" not in author:
append_errors(path, 'main.json: Missing subfield author "team"')
else:
team = author["team"]
if team == "":
append_errors(path, 'main.json: Subfield author "team" is empty')
if "contacts" not in author:
append_errors(path, 'main.json: Missing subfield author "contacts"')
else:
if not isinstance(author['contacts'], list):
append_errors(path, 'main.json: Subfield author "contacts" must be list')
else:
for c in author['contacts']:
if c == "":
append_errors(path, 'main.json: Empty field in author "contacts"')
else:
contacts.append(c);
contacts = ', '.join(contacts)
if contacts == "":
append_errors(path, 'main.json: Missing data in subfield authors "contacts"')
authors.append('[' + team + '] ' + name + ' (' + contacts + ')')
return authors
def getNameFromTask(data, folder):
name = path
if 'name' not in data:
append_errors(folder, 'main.json: Missing field "name"')
else:
name = data['name']
if name == "":
append_errors(folder, 'main.json: Field "name" is empty')
dirname = folder.split("/")[-1];
if name != dirname:
append_errors(folder, 'main.json: Field "name" has wrong value must like dirname "' + dirname + '" be "' + folder + '"')
return name
def getFlagKeyFromTask(data, folder):
flag_key = ''
if 'flag_key' not in data:
append_errors(path, 'main.json: Missing field "flag_key"')
else:
flag_key = data['flag_key']
pattern = re.compile('FHQ\(.*\)')
pattern2 = re.compile('FHQ\{.*\}')
m = pattern.match(flag_key)
m2 = pattern2.match(flag_key)
if flag_key == "":
append_errors(folder, 'main.json: Field "flag_key" is empty')
elif not m and not m2:
append_errors(folder, 'main.json: Wrong value of field "flag_key" must be format "FHQ(`md5`) or FHQ(`sometext`)"')
return flag_key
def getGameFromTask(data, folder):
game = ''
if 'game' not in data:
append_errors(folder, 'main.json: Missing field "game"')
else:
game = data['game']
if game != game_name:
append_errors(folder, 'main.json: Wrong game name "' + game + '" Please change to "' + game_name + '"')
return game
def getHintsFromTask(data, folder):
hints = []
if 'hints' not in data:
append_errors(d, 'main.json: Missing field "hints"')
else:
if not isinstance(data['hints'], list):
append_errors(d, 'main.json: Field "hints" must be list')
else:
hints = data['hints']
for hint in hints:
if 'RU' not in hint:
append_errors(folder, 'main.json: Missing subfield hint "RU"')
else:
if hint["RU"] == "":
append_errors(folder, 'main.json: Empty field in hint "RU"')
if 'EN' not in hint:
append_errors(folder, 'main.json: Missing subfield hint "EN"')
else:
if hint["EN"] == "":
append_errors(folder, 'main.json: Empty field in hint "EN"')
return hints;
for d in dirs:
path = d + '/main.json'
#encoding = detectEncoding(path);
if os.path.isfile(path):
try:
checkWriteUpFile(d);
with open(path) as main_json:
data = json.load(main_json)
category = getCategoryFromTask(data, d)
value = getValueFromTask(data, d)
status = getStatusFromTask(data, d);
authors = getAuthorsFromTask(data, d)
name = getNameFromTask(data, d)
getDescriptionFromTask(data, d)
getFlagKeyFromTask(data, d)
appendStatCat(category, value);
table_tasks.append({
'category': category,
'value': value,
'name': name,
'path': d,
'status': status,
'authors': ', '.join(authors) } )
getGameFromTask(data, d)
getHintsFromTask(data, d)
except Exception:
status = ''
encoding = detectEncoding(path);
if encoding != 'utf-8':
status = encoding
append_errors(path, 'Wrong encoding in "' + path + '", expected "utf-8", got "' + encoding + '"')
author = parseAuthor(path);
# print sys.exc_info()
table_tasks.append({'category': 'unknown', 'value': 0, 'name': d, 'status': 'invalid json', 'authors': author } )
appendStatCat('unknown', 0);
readme.write("\n## Short list of tasks\n\n")
for row in table_tasks:
readme.write(' * ' + row['category'] + ' ' + str(row['value']) + ' "' + row['name'] + '" by ' + row['authors'] + "\n")
if len(errors) > 0:
readme.write("\n\n## Errors\n\n")
for path in errors:
print(' * ' + path)
readme.write(' * ' + path + "\n")
for e in errors[path]:
print("\t * " + e)
readme.write('\t * ' + e + "\n")
readme.write("\n## Statistics by categories\n\n")
readme.write("|Category|Count|Summary value\n")
readme.write("|---|---|---\n")
stat_tasks.sort(key=lambda x: x['category'])
tasks_count_all=0
tasks_value_all=0
for cat in stat_tasks:
readme.write("|" + cat['category'] + "|" + str(cat['count']) + "|" + str(cat['value']) + "\n")
tasks_count_all = tasks_count_all + cat['count'];
tasks_value_all = tasks_value_all + cat['value'];
readme.write("|All|" + str(tasks_count_all) + "|" + str(tasks_value_all) + "\n")
# sort table
table_tasks.sort(key=lambda x: x['category'] + ' ' + str(x['value']).zfill(4))
readme.write("\n\n## Status table\n\n")
readme.write("|Category&Value|Name|Status|Author(s)\n")
readme.write("|---|---|---|---\n")
for row in table_tasks:
readme.write('|' + row['category'] + ' ' + str(row['value']) + '|' + row['name'] + '|' + row['status'] + '|' + row['authors'] + "\n")
|
import sys
import click
import json
from urllib.request import urlopen
from urllib.parse import quote
RESPONSES_CODE = {
200 : "SMS sent",
400 : "One parameter is missing (identifier, password or message).",
402 : "Too many SMS sent.",
403 : "Service not activated or false login/key.",
500 : "Server Error. Please try again later."
}
#---------------------------------------
# CREATION & CONFIGURATION DU MESSAGE
#---------------------------------------
@click.command()
@click.option("-m", "--message",
prompt="SMS content: ",
help="the message to be sent")
@click.option("-c", "--config",
type=click.Path(exists=True),
prompt="Path of the config file",
help="parse JSON file to get id and password keys")
@click.option("-v", "--verbose",
is_flag=True,
help="Print the HTTP response code of the request")
def sms(message, config, verbose):
(user, password) = getKeys(config)
url = f"https://smsapi.free-mobile.fr/sendmsg?&user={user}&pass={password}&msg={quote(message)}"
response = urlopen(url)
if verbose:
status = response.getcode()
print(f"{status} : {RESPONSES_CODE[status]}")
def getKeys(config):
with open(config) as f:
credential = json.loads(f.read())
return (credential["user"], credential["password"])
if __name__ == "__main__":
sms()
|
import os
from tqdm import tqdm
# dataset = [['Bike','NYC','all','365','sum','0.1'],['DiDi','Xian','all','all','sum','0.1'],
# ['Metro','Chongqing','all','all','sum','0.1'],['ChargeStation','Beijing','all','all','max','0.1'],
# ['METR','LA','all','all','average','0.2'],['PEMS','BAY','all','all','average','0.2']]
dataset = [['METR','LA','all','all','average','0.2'],['PEMS','BAY','all','all','average','0.2']]
with open("ARIMAresult3.txt","w") as fp:
for index in tqdm(range(len(dataset))):
fp.write("*********************************************************\n")
fp.write("Processing city----------------{}---using ARIMA-------MergeIndex 12 --".format(dataset[index]))
f_tmp = os.popen("python -W ignore ARIMA.py --dataset {} --city {} --MergeIndex 12 --DataRange {} --TrainDays {} --MergeWay {} --test_ratio {}".format(dataset[index][0],dataset[index][1],dataset[index][2],dataset[index][3],dataset[index][4],dataset[index][5]), "r")
# to record ouput
fp.write(f_tmp.read())
fp.flush()
f_tmp.close()
fp.write("\n")
|
import cv2
import sys
import pyglet
cascade_path = sys.argv[1]
classifier = cv2.CascadeClassifier(cascade_path)
# https://realpython.com/blog/python/face-detection-in-python-using-a-webcam/
video_capture = cv2.VideoCapture(0)
# Scale the video down to size so as not to break performance
video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 480)
video_capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 360)
window = pyglet.window.Window(width=480, height=360)
image = pyglet.resource.image('player.png')
@window.event
def on_draw():
window.clear()
ret, frame = video_capture.read()
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(
gray_frame,
scaleFactor=1.1,
minNeighbors=2,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
print x, y, w, h
image.blit(480 - x, 360 - y)
image.blit(480 - x - w , 360 - y)
image.blit(480 - x - w, 360 - y - h)
image.blit(480 - x, 360 - y - h)
def update(dt):
on_draw()
if __name__ == "__main__":
pyglet.clock.schedule_interval(update, 1/30)
pyglet.app.run()
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.project.overview import views
urlpatterns = [
url(r'^$', views.ProjectOverview.as_view(), name='index'),
url(r'^warning$', views.WarningView.as_view(), name='warning'),
]
|
class handler():
def __init__(self):
self.greeting = "Hello World"
def __repr__(self):
return self.greeting
if __name__ == "__main__":
pass
|
#!/usr/bin/env python
from gimpfu import *
from gimpenums import *
import sys
import os
def color2id(color):
a = (color[0]<<16) | (color[1]<<8) | color[2]
b = (a & 0xF00000) >> 12 | (a & 0xF000) >> 8 | (a & 0xF00) << 4 | \
(a & 0xF0) >> 4
c = (b & 0xF000) | (b & 0x800) >> 11 | (b & 0x400) >> 7 | \
(b & 0x200) >> 3 | (b & 0x100) << 1 | (b & 0x80) >> 6 | \
(b & 0x40) >> 2 | (b & 0x20) << 2 | (b & 0x10) << 6 | \
(b & 0x8) >> 1 | (b & 0x4) << 3 | (b & 0x2) << 7 | (b & 0x1) << 11
return (c)
def gimp_histemul(img, layer):
idd = color2id(gimp.get_foreground())
gimp.pdb.gimp_message_set_handler (MESSAGE_BOX)
gimp.pdb.gimp_message (idd)
register(
"python_fu_histemul_id",
"",
"",
"matteli",
"matteli",
"",
"<Image>/Filters/Histemul/_id",
"RGB*",
[],
[],
gimp_histemul)
main()
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
FORCE = "force"
ID = "id"
LINK = "link"
V = "v"
class Output:
SUCCESS = "success"
class ContainerRemoveInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"force": {
"type": "boolean",
"title": "Force Removal",
"description": "Force the removal of a running container (uses SIGKILL)",
"default": true,
"order": 4
},
"id": {
"type": "string",
"title": "ID",
"description": "Container ID",
"order": 1
},
"link": {
"type": "boolean",
"title": "Link Removal",
"description": "Remove the specified link and not the underlying container",
"default": false,
"order": 3
},
"v": {
"type": "boolean",
"title": "Volume Removal",
"description": "Remove the volumes associated with the container",
"default": false,
"order": 2
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ContainerRemoveOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"success": {
"type": "boolean",
"title": "Success",
"description": "True if successful",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
import functools
import numpy as np
import math
import types
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a NumPy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
# Make See Also linking for our local copy work properly
def _copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
trapz = _copy_func(trapz)
if trapz.__doc__:
trapz.__doc__ = trapz.__doc__.replace('sum, cumsum', 'numpy.cumsum')
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
(0.1110884353741496, None)
>>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
(0.11111111111111102, None)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
(0.9999999771971152, None)
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
(1.000000000039565, None)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in range(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.quadrature(f, 0.0, 1.0)
(0.11111111111111106, 4.163336342344337e-17)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.quadrature(np.cos, 0.0, np.pi/2)
(0.9999999999999536, 3.9611425250996035e-11)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in range(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, insert this value at the beginning of the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even-spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `x`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simps(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simps(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simps(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(10, 14.25, 0.25)
>>> y = np.arange(3, 12)
>>> integrate.romb(y)
56.0
>>> y = np.sin(np.power(x, 2.5))
>>> integrate.romb(y)
-0.742561336672229
>>> integrate.romb(y, show=True)
Richardson Extrapolation Table for Romberg Integration
====================================================================
-0.81576
4.63862 6.45674
-1.10581 -3.02062 -3.65245
-2.57379 -3.06311 -3.06595 -3.05664
-1.34093 -0.92997 -0.78776 -0.75160 -0.74256
====================================================================
-0.742561336672229
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in range(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in range(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="\n")
for i in range(k+1):
for j in range(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to SciPy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e., whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in range(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in range(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Newton-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
r"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
where :math:`\xi \in [x_0,x_N]`
and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Examples
--------
Compute the integral of sin(x) in [0, :math:`\pi`]:
>>> from scipy.integrate import newton_cotes
>>> def f(x):
... return np.sin(x)
>>> a = 0
>>> b = np.pi
>>> exact = 2
>>> for N in [2, 4, 6, 8, 10]:
... x = np.linspace(a, b, N + 1)
... an, B = newton_cotes(N, 1)
... dx = (b - a) / N
... quad = dx * np.sum(an * f(x))
... error = abs(quad - exact)
... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
...
2 2.094395102 9.43951e-02
4 1.998570732 1.42927e-03
6 2.000017814 1.78136e-05
8 1.999999835 1.64725e-07
10 2.000000001 1.14677e-09
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except Exception:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
|
from abc import abstractmethod, ABCMeta
from indy_common.authorize.auth_actions import split_action_id
from indy_common.authorize.auth_constraints import AbstractAuthConstraint, AbstractConstraintSerializer
from indy_common.state import config
from plenum.common.metrics_collector import MetricsName, MetricsCollector
from state.pruning_state import PruningState
from stp_core.common.log import getlogger
logger = getlogger()
class AbstractAuthStrategy(metaclass=ABCMeta):
def __init__(self, auth_map):
self.auth_map = auth_map
@abstractmethod
def get_auth_constraint(self, action_id) -> AbstractAuthConstraint:
raise NotImplementedError()
@abstractmethod
def _find_auth_constraint_key(self, action_id, auth_map):
raise NotImplementedError()
@staticmethod
def is_accepted_action_id(from_auth_map, from_req):
am = split_action_id(from_auth_map)
r = split_action_id(from_req)
if r.prefix != am.prefix:
return False
if r.txn_type != am.txn_type:
return False
if r.field != am.field and \
am.field != '*':
return False
if r.old_value != am.old_value and \
am.old_value != '*':
return False
if r.new_value != am.new_value and \
am.new_value != '*':
return False
return True
class LocalAuthStrategy(AbstractAuthStrategy):
def get_auth_constraint(self, action_id) -> AbstractAuthConstraint:
am_id = self._find_auth_constraint_key(action_id, self.auth_map)
return self.auth_map.get(am_id)
def _find_auth_constraint_key(self, action_id, auth_map):
for am_id in auth_map.keys():
if self.is_accepted_action_id(am_id, action_id):
return am_id
class ConfigLedgerAuthStrategy(AbstractAuthStrategy):
def __init__(self,
auth_map,
state: PruningState,
serializer: AbstractConstraintSerializer,
metrics: MetricsCollector = None):
super().__init__(auth_map=auth_map)
self.state = state
self.serializer = serializer
self.metrics = metrics
self.from_state_count = 0
def get_auth_constraint(self, action_id: str) -> AbstractAuthConstraint:
"""
Find rule_id for incoming action_id and return AuthConstraint instance
"""
return self._find_auth_constraint(action_id, self.auth_map)
def _find_auth_constraint(self, action_id, auth_map):
am_id = self._find_auth_constraint_key(action_id, auth_map)
if am_id:
constraint = self.get_from_state(key=config.make_state_path_for_auth_rule(am_id))
if not constraint:
return auth_map.get(am_id)
logger.debug("Using auth constraint from state")
if self.metrics:
self.from_state_count += 1
self.metrics.add_event(MetricsName.AUTH_RULES_FROM_STATE_COUNT, self.from_state_count)
return constraint
def _find_auth_constraint_key(self, action_id, auth_map):
for am_id in auth_map.keys():
if self.is_accepted_action_id(am_id, action_id):
return am_id
def get_from_state(self, key, isCommitted=False):
from_state = self.state.get(key=key,
isCommitted=isCommitted)
if not from_state:
return None
return self.serializer.deserialize(from_state)
|
"""Validate some things around restore."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from ..backups.const import BackupType
from ..const import (
ATTR_ADDONS,
ATTR_COMPRESSED,
ATTR_CRYPTO,
ATTR_DATE,
ATTR_DOCKER,
ATTR_FOLDERS,
ATTR_HOMEASSISTANT,
ATTR_NAME,
ATTR_PROTECTED,
ATTR_REPOSITORIES,
ATTR_SIZE,
ATTR_SLUG,
ATTR_TYPE,
ATTR_VERSION,
CRYPTO_AES128,
FOLDER_ADDONS,
FOLDER_HOMEASSISTANT,
FOLDER_MEDIA,
FOLDER_SHARE,
FOLDER_SSL,
)
from ..validate import SCHEMA_DOCKER_CONFIG, repositories, version_tag
ALL_FOLDERS = [
FOLDER_SHARE,
FOLDER_ADDONS,
FOLDER_SSL,
FOLDER_MEDIA,
]
def unique_addons(addons_list):
"""Validate that an add-on is unique."""
single = {addon[ATTR_SLUG] for addon in addons_list}
if len(single) != len(addons_list):
raise vol.Invalid("Invalid addon list in backup!") from None
return addons_list
def v1_homeassistant(
homeassistant_data: dict[str, Any] | None
) -> dict[str, Any] | None:
"""Cleanup homeassistant artefacts from v1."""
if not homeassistant_data:
return None
if homeassistant_data.get(ATTR_VERSION) is None:
return None
return homeassistant_data
def v1_folderlist(folder_data: list[str]) -> list[str]:
"""Cleanup folder artefacts from v1."""
if FOLDER_HOMEASSISTANT in folder_data:
folder_data.remove(FOLDER_HOMEASSISTANT)
return folder_data
def v1_protected(protected: bool | str) -> bool:
"""Cleanup old protected handling."""
if isinstance(protected, bool):
return protected
return True
# pylint: disable=no-value-for-parameter
SCHEMA_BACKUP = vol.Schema(
{
vol.Optional(ATTR_VERSION, default=1): vol.All(vol.Coerce(int), vol.In((1, 2))),
vol.Required(ATTR_SLUG): str,
vol.Required(ATTR_TYPE): vol.Coerce(BackupType),
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_DATE): str,
vol.Optional(ATTR_COMPRESSED, default=True): vol.Boolean(),
vol.Optional(ATTR_PROTECTED, default=False): vol.All(
v1_protected, vol.Boolean()
),
vol.Optional(ATTR_CRYPTO, default=None): vol.Maybe(CRYPTO_AES128),
vol.Optional(ATTR_HOMEASSISTANT, default=None): vol.All(
v1_homeassistant,
vol.Maybe(
vol.Schema(
{
vol.Required(ATTR_VERSION): version_tag,
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
},
extra=vol.REMOVE_EXTRA,
)
),
),
vol.Optional(ATTR_DOCKER, default=dict): SCHEMA_DOCKER_CONFIG,
vol.Optional(ATTR_FOLDERS, default=list): vol.All(
v1_folderlist, [vol.In(ALL_FOLDERS)], vol.Unique()
),
vol.Optional(ATTR_ADDONS, default=list): vol.All(
[
vol.Schema(
{
vol.Required(ATTR_SLUG): str,
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_VERSION): version_tag,
vol.Optional(ATTR_SIZE, default=0): vol.Coerce(float),
},
extra=vol.REMOVE_EXTRA,
)
],
unique_addons,
),
vol.Optional(ATTR_REPOSITORIES, default=list): repositories,
},
extra=vol.ALLOW_EXTRA,
)
|
import heapq
from typing import Iterable
class HeapQueue:
def __init__(self, init_h: Iterable):
self.h = [(-val, index) for index, val in init_h]
heapq.heapify(self.h)
def replace_largest(self, new_val):
heapq.heapreplace(self.h, (-new_val, self.max_index))
def pop(self):
heapq.heappop(self.h)
@property
def max_index(self):
return self.h[0][1]
@property
def max_val(self):
return -self.h[0][0]
def __repr__(self):
return "HeapQueue instance containing data {}.".format(self.h)
|
##
##
# File auto-generated against equivalent DynamicSerialize Java class
class LockChangeRequest(object):
def __init__(self):
self.requests = None
self.workstationID = None
self.siteID = None
def getRequests(self):
return self.requests
def setRequests(self, requests):
self.requests = requests
def getWorkstationID(self):
return self.workstationID
def setWorkstationID(self, workstationID):
self.workstationID = workstationID
def getSiteID(self):
return self.siteID
def setSiteID(self, siteID):
self.siteID = siteID
|
import os
from xua import helpers
from xua.constants import CLI, BUILD
from xua.exceptions import UserError
from xua.builders.doc import htmlOld
def getBuildEngine(project, config):
if project == CLI.PROJECT_SERVER_PHP:
# @TODO
return None
elif project == CLI.PROJECT_MARSHAL_DART:
# @TODO
return None
elif project == CLI.PROJECT_DOC_HTML:
return htmlOld.BuildEngine(config)
# return html.engine(config)
elif project == CLI.PROJECT_DOC_LATEX:
# @TODO
return None
else:
raise UserError(f"Unknown project {project}.")
def buildRecursive(path, buildEngine):
if os.path.isfile(path):
if buildEngine.config.isToBuild(path, buildEngine.project):
destination = buildEngine.config.getCorrespondingPath(
buildEngine.project, path, BUILD.MAP_PROJECT_EXTENSION[buildEngine.project])
try:
helpers.write(buildEngine.build(path), destination)
except UserError as e:
helpers.Logger.log(helpers.Logger.ERROR,
buildEngine.project, path + ": " + str(e))
else:
helpers.Logger.log(helpers.Logger.SUCCESS,
buildEngine.project, destination + ' built.')
elif buildEngine.config.isToCopy(path, buildEngine.project):
helpers.copy(path, buildEngine.config.getCorrespondingPath(
buildEngine.project, path))
elif os.path.isdir(path):
for child in os.listdir(path):
buildRecursive(os.path.join(path, child), buildEngine)
|
from random import shuffle
import time
import logger
from couchbase_helper.cluster import Cluster
from membase.api.exception import StatsUnavailableException, \
ServerAlreadyJoinedException, RebalanceFailedException, \
FailoverFailedException, InvalidArgumentException, ServerSelfJoinException, \
AddNodeException
from membase.api.rest_client import RestConnection, RestHelper, Bucket
from membase.helper.bucket_helper import BucketOperationHelper
from memcached.helper.data_helper import MemcachedClientHelper, VBucketAwareMemcached
from mc_bin_client import MemcachedClient, MemcachedError
log = logger.Logger.get_logger()
class RebalanceHelper():
@staticmethod
#bucket is a json object that contains name,port,password
def wait_for_mc_stats_all_nodes(master, bucket, stat_key, stat_value, timeout_in_seconds=120, verbose=True):
log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \
stat_value, master.ip))
time_to_timeout = 0
previous_stat_value = -1
curr_stat_value = -1
verified = False
all_stats = {}
while not verified:
rest = RestConnection(master)
nodes = rest.node_statuses()
for node in nodes:
_server = {"ip": node.ip, "port": node.port, "username": master.rest_username,
"password": master.rest_password}
#failed over node is part of node_statuses but since its failed over memcached connections
#to this node will fail
node_self = RestConnection(_server).get_nodes_self()
if node_self.clusterMembership == 'active':
mc = MemcachedClientHelper.direct_client(_server, bucket)
n_stats = mc.stats("")
mc.close()
all_stats[node.id] = n_stats
actual_stat_value = -1
for k in all_stats:
if all_stats[k] and stat_key in all_stats[k]:
if actual_stat_value == -1:
log.info(all_stats[k][stat_key])
actual_stat_value = int(all_stats[k][stat_key])
else:
actual_stat_value += int(all_stats[k][stat_key])
if actual_stat_value == stat_value:
log.info("{0} : {1}".format(stat_key, actual_stat_value))
verified = True
break
else:
if verbose:
log.info("{0} : {1}".format(stat_key, actual_stat_value))
curr_stat_value = actual_stat_value
# values are changing so clear any timeout
if curr_stat_value != previous_stat_value:
time_to_timeout = 0
else:
if time_to_timeout == 0:
time_to_timeout = time.time() + timeout_in_seconds
if time_to_timeout < time.time():
log.info("no change in {0} stat after {1} seconds (value = {2})".format(stat_key, timeout_in_seconds, curr_stat_value))
break
previous_stat_value = curr_stat_value
if not verbose:
time.sleep(0.1)
else:
time.sleep(2)
return verified
@staticmethod
def wait_for_replication(servers, cluster_helper=None, timeout=600):
if cluster_helper is None:
cluster = Cluster()
else:
cluster = cluster_helper
tasks = []
rest = RestConnection(servers[0])
buckets = rest.get_buckets()
for server in servers:
for bucket in buckets:
for server_repl in list(set(servers) - set([server])):
tasks.append(cluster.async_wait_for_stats([server], bucket, 'tap',
'eq_tapq:replication_ns_1@' + server_repl.ip + ':idle', '==', 'true'))
tasks.append(cluster.async_wait_for_stats([server], bucket, 'tap',
'eq_tapq:replication_ns_1@' + server_repl.ip + ':backfill_completed', '==', 'true'))
try:
for task in tasks:
task.result(timeout)
finally:
if cluster_helper is None:
# stop all newly created task manager threads
cluster.shutdown()
return True
@staticmethod
#bucket is a json object that contains name,port,password
def wait_for_stats(master, bucket, stat_key, stat_value, timeout_in_seconds=120, verbose=True):
log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \
stat_value, master.ip))
time_to_timeout = 0
previous_stat_value = -1
curr_stat_value = -1
verified = False
while not verified:
rest = RestConnection(master)
try:
stats = rest.get_bucket_stats(bucket)
if stats and stat_key in stats and stats[stat_key] == stat_value:
log.info("{0} : {1}".format(stat_key, stats[stat_key]))
verified = True
break
else:
if stats and stat_key in stats:
if verbose:
log.info("{0} : {1}".format(stat_key, stats[stat_key]))
curr_stat_value = stats[stat_key]
# values are changing so clear any timeout
if curr_stat_value != previous_stat_value:
time_to_timeout = 0
else:
if time_to_timeout == 0:
time_to_timeout = time.time() + timeout_in_seconds
if time_to_timeout < time.time():
log.info("no change in {0} stat after {1} seconds (value = {2})".format(stat_key, timeout_in_seconds, curr_stat_value))
break
previous_stat_value = curr_stat_value
if not verbose:
time.sleep(0.1)
else:
time.sleep(2)
except:
log.info("unable to collect stats from server {0}".format(master))
verified = True #TODO: throw ex and assume caller catches
break
# wait for 5 seconds for the next check
time.sleep(5)
return verified
@staticmethod
def wait_for_stats_no_timeout(master, bucket, stat_key, stat_value, timeout_in_seconds=-1, verbose=True):
log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \
stat_value, master.ip))
rest = RestConnection(master)
stats = rest.get_bucket_stats(bucket)
while stats.get(stat_key, -1) != stat_value:
stats = rest.get_bucket_stats(bucket)
if verbose:
log.info("{0} : {1}".format(stat_key, stats.get(stat_key, -1)))
time.sleep(5)
return True
@staticmethod
#bucket is a json object that contains name,port,password
def wait_for_mc_stats(master, bucket, stat_key, stat_value, timeout_in_seconds=120, verbose=True):
log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \
stat_value, master.ip))
start = time.time()
verified = False
while (time.time() - start) <= timeout_in_seconds:
c = MemcachedClient(master.ip, 11210)
stats = c.stats()
c.close()
if stats and stat_key in stats and str(stats[stat_key]) == str(stat_value):
log.info("{0} : {1}".format(stat_key, stats[stat_key]))
verified = True
break
else:
if stats and stat_key in stats:
if verbose:
log.info("{0} : {1}".format(stat_key, stats[stat_key]))
if not verbose:
time.sleep(0.1)
else:
time.sleep(2)
return verified
@staticmethod
def wait_for_mc_stats_no_timeout(master, bucket, stat_key, stat_value, timeout_in_seconds=-1, verbose=True):
log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \
stat_value, master.ip))
# keep retrying until reaches the server
stats = {}
while not stats:
try:
c = MemcachedClient(master.ip, 11210)
c.sasl_auth_plain(bucket, '')
stats = c.stats()
except Exception as e:
log.info("Exception: {0}, retry in 2 seconds ...".format(str(e)))
stats = {}
time.sleep(2)
finally:
c.close()
while str(stats[stat_key]) != str(stat_value):
c = MemcachedClient(master.ip, 11210)
c.sasl_auth_plain(bucket, '')
stats = c.stats()
c.close()
if verbose:
log.info("{0} : {1}".format(stat_key, stats[stat_key]))
time.sleep(5)
return True
@staticmethod
#bucket is a json object that contains name,port,password
def wait_for_stats_int_value(master, bucket, stat_key, stat_value, option="==", timeout_in_seconds=120, verbose=True):
log.info("waiting for bucket {0} stat : {1} to {2} {3} on {4}".format(bucket, stat_key, option, \
stat_value, master.ip))
start = time.time()
verified = False
while (time.time() - start) <= timeout_in_seconds:
rest = RestConnection(master)
stats = rest.get_bucket_stats(bucket)
#some stats are in memcached
if stats and stat_key in stats:
actual = int(stats[stat_key])
if option == "==":
verified = stat_value == actual
elif option == ">":
verified = stat_value > actual
elif option == "<":
verified = stat_value < actual
elif option == ">=":
verified = stat_value >= actual
elif option == "<=":
verified = stat_value <= actual
if verified:
log.info("verified {0} : {1}".format(stat_key, actual))
break
if verbose:
log.info("{0} : {1} isn't {2} {3}".format(stat_key, stat_value, option, actual))
time.sleep(2)
return verified
@staticmethod
#bucket is a json object that contains name,port,password
def wait_for_stats_on_all(master, bucket, stat_key, stat_value, timeout_in_seconds=120,
fn=None):
fn = fn or RebalanceHelper.wait_for_stats
rest = RestConnection(master)
servers = rest.get_nodes()
verified = False
start_time = time.time()
for server in servers:
verified = fn(server, bucket, stat_key, stat_value, \
timeout_in_seconds=timeout_in_seconds)
if not verified:
log.info("bucket {0}: stat_key {1} for server {2} timed out in {3}".format(bucket, stat_key, \
server.ip, time.time() - start_time))
break
return verified
@staticmethod
def wait_till_total_numbers_match(master,
bucket,
timeout_in_seconds=120):
log.info('waiting for sum_of_curr_items == total_items....')
start = time.time()
verified = False
while (time.time() - start) <= timeout_in_seconds:
try:
if RebalanceHelper.verify_items_count(master, bucket):
verified = True
break
else:
time.sleep(2)
except StatsUnavailableException:
log.error("unable to retrieve stats for any node! Print taps for all nodes:")
break
if not verified:
rest = RestConnection(master)
RebalanceHelper.print_taps_from_all_nodes(rest, bucket)
return verified
@staticmethod
def wait_for_persistence(master, bucket, bucket_type='memcache', timeout=120):
if bucket_type == 'ephemeral':
return True
verified = True
verified &= RebalanceHelper.wait_for_mc_stats_all_nodes(
master, bucket, "ep_queue_size", 0,
timeout_in_seconds=timeout)
verified &= RebalanceHelper.wait_for_mc_stats_all_nodes(
master, bucket, "ep_flusher_todo", 0,
timeout_in_seconds=timeout)
verified &= RebalanceHelper.wait_for_mc_stats_all_nodes(
master, bucket, "ep_uncommitted_items", 0,
timeout_in_seconds=timeout)
return verified
@staticmethod
#TODO: add password and port
def print_taps_from_all_nodes(rest, bucket='default'):
#get the port number from rest ?
log = logger.Logger.get_logger()
nodes_for_stats = rest.get_nodes()
for node_for_stat in nodes_for_stats:
try:
client = MemcachedClientHelper.direct_client(node_for_stat, bucket)
log.info("getting tap stats... for {0}".format(node_for_stat.ip))
tap_stats = client.stats('tap')
if tap_stats:
RebalanceHelper.log_interesting_taps(node_for_stat, tap_stats, log)
client.close()
except Exception as ex:
log.error("error {0} while getting stats...".format(ex))
@staticmethod
def log_interesting_taps(node, tap_stats, logger):
interesting_stats = ['ack_log_size', 'ack_seqno', 'ack_window_full', 'has_item', 'has_queued_item',
'idle', 'paused', 'backfill_completed', 'pending_backfill', 'pending_disk_backfill', 'recv_ack_seqno',
'ep_num_new_']
for name in tap_stats:
for interesting_stat in interesting_stats:
if name.find(interesting_stat) != -1:
logger.info("TAP {0} :{1} {2}".format(node.id, name, tap_stats[name]))
break
@staticmethod
def verify_items_count(master, bucket, num_attempt=3, timeout=2):
#get the #of buckets from rest
rest = RestConnection(master)
if isinstance(bucket, Bucket):
bucket = bucket.name
bucket_info = rest.get_bucket(bucket, num_attempt, timeout)
replica_factor = bucket_info.numReplicas
vbucket_active_sum = 0
vbucket_replica_sum = 0
vbucket_pending_sum = 0
kv_nodes = 0
all_server_stats = []
stats_received = True
nodes = rest.get_nodes()
nodes_services = rest.get_nodes_services()
for node in nodes_services:
if 'kv' in nodes_services[node]:
kv_nodes += 1
for server in nodes:
#get the stats
server_stats = rest.get_bucket_stats_for_node(bucket, server)
if not server_stats:
log.info("unable to get stats from {0}:{1}".format(server.ip, server.port))
stats_received = False
all_server_stats.append((server, server_stats))
if not stats_received:
raise StatsUnavailableException()
sum = 0
for server, single_stats in all_server_stats:
if not single_stats or "curr_items" not in single_stats:
continue
sum += single_stats["curr_items"]
log.info("curr_items from {0}:{1} : {2}".format(server.ip, server.port, \
single_stats["curr_items"]))
if 'vb_pending_num' in single_stats:
vbucket_pending_sum += single_stats['vb_pending_num']
log.info(
"vb_pending_num from {0}:{1} : {2}".format(server.ip, server.port, \
single_stats["vb_pending_num"]))
if 'vb_active_num' in single_stats:
vbucket_active_sum += single_stats['vb_active_num']
log.info(
"vb_active_num from {0}:{1} : {2}".format(server.ip, server.port, \
single_stats["vb_active_num"]))
if 'vb_replica_num' in single_stats:
vbucket_replica_sum += single_stats['vb_replica_num']
log.info(
"vb_replica_num from {0}:{1} : {2}".format(server.ip, server.port, \
single_stats["vb_replica_num"]))
msg = "summation of vb_active_num : {0} vb_pending_num : {1} vb_replica_num : {2}"
log.info(msg.format(vbucket_active_sum, vbucket_pending_sum, vbucket_replica_sum))
msg = 'sum : {0} and sum * (replica_factor + 1) ({1}) : {2}'
log.info(msg.format(sum, replica_factor + 1, (sum * (replica_factor + 1))))
master_stats = rest.get_bucket_stats(bucket)
if "curr_items_tot" in master_stats:
log.info('curr_items_tot from master: {0}'.format(master_stats["curr_items_tot"]))
else:
raise Exception("bucket {0} stats doesnt contain 'curr_items_tot':".format(bucket))
if replica_factor >= kv_nodes:
log.warn("the number of nodes is less than replica requires")
delta = sum * (kv_nodes) - master_stats["curr_items_tot"]
else:
delta = sum * (replica_factor + 1) - master_stats["curr_items_tot"]
delta = abs(delta)
if delta > 0:
if sum == 0:
missing_percentage = 0
else:
missing_percentage = delta * 1.0 / (sum * (replica_factor + 1))
log.info("Nodes stats are: {0}".format([node.ip for node in nodes]))
else:
missing_percentage = 1
log.info("delta : {0} missing_percentage : {1} replica_factor : {2}".format(delta, \
missing_percentage, replica_factor))
# If no items missing then, return True
kv_nodes = 0
if not delta:
return True
return False
@staticmethod
def verify_maps(vbucket_map_before, vbucket_map_after):
#for each bucket check the replicas
for i in range(0, len(vbucket_map_before)):
if not vbucket_map_before[i].master == vbucket_map_after[i].master:
log.error(
'vbucket[{0}].master mismatch {1} vs {2}'.format(i, vbucket_map_before[i].master,
vbucket_map_after[i].master))
return False
for j in range(0, len(vbucket_map_before[i].replica)):
if not (vbucket_map_before[i].replica[j]) == (vbucket_map_after[i].replica[j]):
log.error('vbucket[{0}].replica[{1} mismatch {2} vs {3}'.format(i, j,
vbucket_map_before[i].replica[j],
vbucket_map_after[i].replica[j]))
return False
return True
#read the current nodes
# if the node_ip already added then just
#silently return
#if its not added then let try to add this and then rebalance
#we should alo try to get the bucket information from
#rest api instead of passing it to the fucntions
@staticmethod
def rebalance_in(servers, how_many, do_shuffle=True, monitor=True, do_check=True):
servers_rebalanced = []
log = logger.Logger.get_logger()
rest = RestConnection(servers[0])
nodes = rest.node_statuses()
#are all ips the same
nodes_on_same_ip = True
firstIp = nodes[0].ip
if len(nodes) == 1:
nodes_on_same_ip = False
else:
for node in nodes:
if node.ip != firstIp:
nodes_on_same_ip = False
break
nodeIps = ["{0}:{1}".format(node.ip, node.port) for node in nodes]
log.info("current nodes : {0}".format(nodeIps))
toBeAdded = []
master = servers[0]
selection = servers[1:]
if do_shuffle:
shuffle(selection)
for server in selection:
if nodes_on_same_ip:
if not "{0}:{1}".format(firstIp, server.port) in nodeIps:
toBeAdded.append(server)
servers_rebalanced.append(server)
log.info("choosing {0}:{1}".format(server.ip, server.port))
elif not "{0}:{1}".format(server.ip, server.port) in nodeIps:
toBeAdded.append(server)
servers_rebalanced.append(server)
log.info("choosing {0}:{1}".format(server.ip, server.port))
if len(toBeAdded) == int(how_many):
break
if do_check and len(toBeAdded) < how_many:
raise Exception("unable to find {0} nodes to rebalance_in".format(how_many))
for server in toBeAdded:
otpNode = rest.add_node(master.rest_username, master.rest_password,
server.ip, server.port)
otpNodes = [node.id for node in rest.node_statuses()]
started = rest.rebalance(otpNodes, [])
msg = "rebalance operation started ? {0}"
log.info(msg.format(started))
if monitor is not True:
return True, servers_rebalanced
if started:
try:
result = rest.monitorRebalance()
except RebalanceFailedException as e:
log.error("rebalance failed: {0}".format(e))
return False, servers_rebalanced
msg = "successfully rebalanced in selected nodes from the cluster ? {0}"
log.info(msg.format(result))
return result, servers_rebalanced
return False, servers_rebalanced
@staticmethod
def rebalance_out(servers, how_many, monitor=True):
rest = RestConnection(servers[0])
cur_ips = map(lambda node: node.ip, rest.node_statuses())
servers = filter(lambda server: server.ip in cur_ips, servers) or servers
if len(cur_ips) <= how_many or how_many < 1:
log.error("failed to rebalance %s servers out: not enough servers"
% how_many)
return False, []
ejections = servers[1:how_many + 1]
log.info("rebalancing out %s" % ejections)
RebalanceHelper.begin_rebalance_out(servers[0], ejections)
if not monitor:
return True, ejections
try:
return rest.monitorRebalance(), ejections
except RebalanceFailedException, e:
log.error("failed to rebalance %s servers out: %s" % (how_many, e))
return False, ejections
@staticmethod
def rebalance_swap(servers, how_many, monitor=True):
if how_many < 1:
log.error("failed to swap rebalance %s servers - invalid count"
% how_many)
return False, []
rest = RestConnection(servers[0])
cur_nodes = rest.node_statuses()
cur_ips = map(lambda node: node.ip, cur_nodes)
cur_ids = map(lambda node: node.id, cur_nodes)
free_servers = filter(lambda server: server.ip not in cur_ips, servers)
if len(cur_ids) <= how_many or len(free_servers) < how_many:
log.error("failed to swap rebalance %s servers - not enough servers"
% how_many)
return False, []
ejections = cur_ids[-how_many:]
additions = free_servers[:how_many]
log.info("swap rebalance: cur: %s, eject: %s, add: %s"
% (cur_ids, ejections, additions))
try:
map(lambda server: rest.add_node(servers[0].rest_username,
servers[0].rest_password,
server.ip, server.port), additions)
except (ServerAlreadyJoinedException,
ServerSelfJoinException, AddNodeException), e:
log.error("failed to swap rebalance - addition failed %s: %s"
% (additions, e))
return False, []
cur_ids = map(lambda node: node.id, rest.node_statuses())
try:
rest.rebalance(otpNodes=cur_ids, ejectedNodes=ejections)
except InvalidArgumentException, e:
log.error("failed to swap rebalance - rebalance failed :%s" % e)
return False, []
if not monitor:
return True, ejections + additions
try:
return rest.monitorRebalance(), ejections + additions
except RebalanceFailedException, e:
log.error("failed to swap rebalance %s servers: %s" % (how_many, e))
return False, ejections + additions
@staticmethod
def begin_rebalance_in(master, servers, timeout=5):
log = logger.Logger.get_logger()
rest = RestConnection(master)
otpNode = None
for server in servers:
if server == master:
continue
log.info("adding node {0}:{1} to cluster".format(server.ip, server.port))
try:
otpNode = rest.add_node(master.rest_username, master.rest_password, server.ip, server.port)
msg = "unable to add node {0}:{1} to the cluster"
assert otpNode, msg.format(server.ip, server.port)
except ServerAlreadyJoinedException:
log.info("server {0} already joined".format(server))
log.info("beginning rebalance in")
try:
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
except:
log.error("rebalance failed, trying again after {0} seconds".format(timeout))
@staticmethod
def begin_rebalance_out(master, servers, timeout=5):
log = logger.Logger.get_logger()
rest = RestConnection(master)
master_node = rest.get_nodes_self()
allNodes = []
ejectedNodes = []
nodes = rest.node_statuses()
for server in servers:
server_node = RestConnection(server).get_nodes_self()
if server_node == master_node:
continue
log.info("removing node {0}:{1} from cluster".format(server_node.ip, server_node.port))
for node in nodes:
if "{0}:{1}".format(node.ip, node.port) == "{0}:{1}".format(server_node.ip, server_node.port):
ejectedNodes.append(node.id)
log.info("beginning rebalance out")
try:
rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=ejectedNodes)
except:
log.error("rebalance failed, trying again after {0} seconds".format(timeout))
@staticmethod
def end_rebalance(master):
log = logger.Logger.get_logger()
rest = RestConnection(master)
result = False
try:
result = rest.monitorRebalance()
except RebalanceFailedException as e:
log.error("rebalance failed: {0}".format(e))
assert result, "rebalance operation failed after adding nodes"
log.info("rebalance finished")
@staticmethod
def getOtpNodeIds(master):
rest = RestConnection(master)
nodes = rest.node_statuses()
otpNodeIds = [node.id for node in nodes]
return otpNodeIds
@staticmethod
def verify_vBuckets_info(master, bucket="default"):
'''
verify vBuckets' state and items count(for active/replica) in them related to vBucketMap for all nodes in cluster
'''
awareness = VBucketAwareMemcached(RestConnection(master), bucket)
vb_map = awareness.vBucketMap
vb_mapReplica = awareness.vBucketMapReplica
replica_num = len(vb_mapReplica[0])
#get state and count items for all vbuckets for each node
node_stats = RebalanceHelper.get_vBuckets_info(master)
state = True
#iterate throught all vbuckets by their numbers
for num in vb_map:
#verify that active vbucket in memcached is also active in stats("hash)
if(node_stats[vb_map[num]]["vb_" + str(num)][0] != "active"):
log.info("vBucket {0} in {1} node has wrong state {3}".format("vb_" + str(num), vb_map[num], node_stats[vb_map[num]]["vb_" + str(num)]));
state = False
#number of active items for num vBucket
vb = node_stats[vb_map[num]]["vb_" + str(num)][1]
active_vb = vb_map[num]
#list of nodes for wich num vBucket is replica
replica_vbs = vb_mapReplica[key]
sum_items_replica = 0
#sum of replica items for all nodes for num vBucket
for i in range(replica_num):
if(node_stats[vb_mapReplica[num][i]]["vb_" + str(num)][0] != "replica"):
log.info("vBucket {0} in {1} node has wrong state {3}".format("vb_" + str(num), vb_mapReplica[num], node_stats[vb_mapReplica[num]]["vb_" + str(num)]));
state = False
sum_items_replica += int(node_stats[replica_vbs[i]]["vb_" + str(num)][1])
#print information about the discrepancy of the number of replica and active items for num vBucket
if (int(vb) * len(vb_mapReplica[num]) != sum_items_replica):
log.info("sum of active items doesn't correspond to replica's vBucets in {0} vBucket:".format("vb_" + str(num)))
log.info("items in active vBucket {0}:{1}".format(vb_map[num], node_stats[vb_map[num]]["vb_" + str(num)]))
for j in range(replica):
log.info("items in replica vBucket {0}: {1}".format(vb_mapReplica[num][j], node_stats[vb_mapReplica[num][j]]["vb_" + str(num)]))
log.info(node_stats[vb_mapReplica[num][0]])
state = False
if not state:
log.error("Something is wrong, see log above. See details:")
log.error("vBucetMap: {0}".format(vb_map))
log.error("vBucetReplicaMap: {0}".format(vb_mapReplica))
log.error("node_stats: {0}".format(node_stats))
return state
@staticmethod
def get_vBuckets_info(master):
"""
return state and count items for all vbuckets for each node
format: dict: {u'1node_ip1': {'vb_79': ['replica', '0'], 'vb_78': ['active', '0']..}, u'1node_ip1':....}
"""
rest = RestConnection(master)
port = rest.get_nodes_self().memcached
nodes = rest.node_statuses()
_nodes_stats = {}
for node in nodes:
stat = {}
buckets = []
_server = {"ip": node.ip, "port": node.port, "username": master.rest_username,
"password": master.rest_password}
try:
buckets = rest.get_buckets()
mc = MemcachedClient(node.ip, port)
stat_hash = mc.stats("hash")
except Exception:
if not buckets:
log.error("There are not any buckets in {0}:{1} node".format(node.ip, node.port))
else:
log.error("Impossible to get vBucket's information for {0}:{1} node".format(node.ip, node.port))
_nodes_stats[node.ip + ":" + str(node.port)]
continue
mc.close()
vb_names = [key[:key.index(":")] for key in stat_hash.keys()]
for name in vb_names:
stat[name] = [stat_hash[name + ":state"], stat_hash[name + ":counted"]]
_nodes_stats[node.ip + ":" + str(port)] = stat
log.info(_nodes_stats)
return _nodes_stats
@staticmethod
def pick_node(master):
rest = RestConnection(master)
nodes = rest.node_statuses()
node_picked = None
nodes_on_same_ip = True
firstIp = nodes[0].ip
for node in nodes:
if node.ip != firstIp:
nodes_on_same_ip = False
break
for node in nodes:
node_picked = node
if not nodes_on_same_ip:
if node_picked.ip != master.ip:
log.info(
"Picked node ... {0}:{1}".format(node_picked.ip, node_picked.port))
break
else:
# temp fix - port numbers of master(machine ip and localhost: 9000 match
if int(node_picked.port) == int(
master.port):
log.info("Not picking the master node {0}:{1}.. try again...".format(node_picked.ip,
node_picked.port))
else:
log.info(
"Picked node {0}:{1}".format(node_picked.ip, node_picked.port))
break
return node_picked
@staticmethod
def pick_nodes(master, howmany=1, target_node = None):
rest = RestConnection(master)
nodes = rest.node_statuses()
picked = []
for node_for_stat in nodes:
if (node_for_stat.ip != master.ip or str(node_for_stat.port) != master.port) and node_for_stat.replication > 0:
if target_node == None:
picked.append(node_for_stat)
elif target_node.ip == node_for_stat.ip:
picked.append(node_for_stat)
return picked
if len(picked) == howmany:
break
return picked
|
"""
HTML5 Push Messaging notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.html5/
"""
import datetime
import json
import logging
import time
import uuid
from aiohttp.hdrs import AUTHORIZATION
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.util.json import load_json, save_json
from homeassistant.exceptions import HomeAssistantError
from homeassistant.components.frontend import add_manifest_json_key
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TITLE, ATTR_TARGET, PLATFORM_SCHEMA, ATTR_TITLE_DEFAULT,
BaseNotificationService)
from homeassistant.const import (
URL_ROOT, HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED, HTTP_INTERNAL_SERVER_ERROR)
from homeassistant.helpers import config_validation as cv
from homeassistant.util import ensure_unique_string
REQUIREMENTS = ['pywebpush==1.6.0']
DEPENDENCIES = ['frontend']
_LOGGER = logging.getLogger(__name__)
REGISTRATIONS_FILE = 'html5_push_registrations.conf'
ATTR_GCM_SENDER_ID = 'gcm_sender_id'
ATTR_GCM_API_KEY = 'gcm_api_key'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(ATTR_GCM_SENDER_ID): cv.string,
vol.Optional(ATTR_GCM_API_KEY): cv.string,
})
ATTR_SUBSCRIPTION = 'subscription'
ATTR_BROWSER = 'browser'
ATTR_NAME = 'name'
ATTR_ENDPOINT = 'endpoint'
ATTR_KEYS = 'keys'
ATTR_AUTH = 'auth'
ATTR_P256DH = 'p256dh'
ATTR_EXPIRATIONTIME = 'expirationTime'
ATTR_TAG = 'tag'
ATTR_ACTION = 'action'
ATTR_ACTIONS = 'actions'
ATTR_TYPE = 'type'
ATTR_URL = 'url'
ATTR_JWT = 'jwt'
# The number of days after the moment a notification is sent that a JWT
# is valid.
JWT_VALID_DAYS = 7
KEYS_SCHEMA = vol.All(
dict, vol.Schema({
vol.Required(ATTR_AUTH): cv.string,
vol.Required(ATTR_P256DH): cv.string,
})
)
SUBSCRIPTION_SCHEMA = vol.All(
dict, vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_ENDPOINT): vol.Url(),
vol.Required(ATTR_KEYS): KEYS_SCHEMA,
vol.Optional(ATTR_EXPIRATIONTIME): vol.Any(None, cv.positive_int),
})
)
REGISTER_SCHEMA = vol.Schema({
vol.Required(ATTR_SUBSCRIPTION): SUBSCRIPTION_SCHEMA,
vol.Required(ATTR_BROWSER): vol.In(['chrome', 'firefox']),
vol.Optional(ATTR_NAME): cv.string
})
CALLBACK_EVENT_PAYLOAD_SCHEMA = vol.Schema({
vol.Required(ATTR_TAG): cv.string,
vol.Required(ATTR_TYPE): vol.In(['received', 'clicked', 'closed']),
vol.Required(ATTR_TARGET): cv.string,
vol.Optional(ATTR_ACTION): cv.string,
vol.Optional(ATTR_DATA): dict,
})
NOTIFY_CALLBACK_EVENT = 'html5_notification'
# Badge and timestamp are Chrome specific (not in official spec)
HTML5_SHOWNOTIFICATION_PARAMETERS = (
'actions', 'badge', 'body', 'dir', 'icon', 'image', 'lang',
'renotify', 'requireInteraction', 'tag', 'timestamp', 'vibrate')
def get_service(hass, config, discovery_info=None):
"""Get the HTML5 push notification service."""
json_path = hass.config.path(REGISTRATIONS_FILE)
registrations = _load_config(json_path)
if registrations is None:
return None
hass.http.register_view(
HTML5PushRegistrationView(registrations, json_path))
hass.http.register_view(HTML5PushCallbackView(registrations))
gcm_api_key = config.get(ATTR_GCM_API_KEY)
gcm_sender_id = config.get(ATTR_GCM_SENDER_ID)
if gcm_sender_id is not None:
add_manifest_json_key(
ATTR_GCM_SENDER_ID, config.get(ATTR_GCM_SENDER_ID))
return HTML5NotificationService(gcm_api_key, registrations, json_path)
def _load_config(filename):
"""Load configuration."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
class HTML5PushRegistrationView(HomeAssistantView):
"""Accepts push registrations from a browser."""
url = '/api/notify.html5'
name = 'api:notify.html5'
def __init__(self, registrations, json_path):
"""Init HTML5PushRegistrationView."""
self.registrations = registrations
self.json_path = json_path
async def post(self, request):
"""Accept the POST request for push registrations from a browser."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
try:
data = REGISTER_SCHEMA(data)
except vol.Invalid as ex:
return self.json_message(
humanize_error(data, ex), HTTP_BAD_REQUEST)
devname = data.get(ATTR_NAME)
data.pop(ATTR_NAME, None)
name = self.find_registration_name(data, devname)
previous_registration = self.registrations.get(name)
self.registrations[name] = data
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
return self.json_message(
'Push notification subscriber registered.')
except HomeAssistantError:
if previous_registration is not None:
self.registrations[name] = previous_registration
else:
self.registrations.pop(name)
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
def find_registration_name(self, data, suggested=None):
"""Find a registration name matching data or generate a unique one."""
endpoint = data.get(ATTR_SUBSCRIPTION).get(ATTR_ENDPOINT)
for key, registration in self.registrations.items():
subscription = registration.get(ATTR_SUBSCRIPTION)
if subscription.get(ATTR_ENDPOINT) == endpoint:
return key
return ensure_unique_string(suggested or 'unnamed device',
self.registrations)
async def delete(self, request):
"""Delete a registration."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
subscription = data.get(ATTR_SUBSCRIPTION)
found = None
for key, registration in self.registrations.items():
if registration.get(ATTR_SUBSCRIPTION) == subscription:
found = key
break
if not found:
# If not found, unregistering was already done. Return 200
return self.json_message('Registration not found.')
reg = self.registrations.pop(found)
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
except HomeAssistantError:
self.registrations[found] = reg
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
return self.json_message('Push notification subscriber unregistered.')
class HTML5PushCallbackView(HomeAssistantView):
"""Accepts push registrations from a browser."""
requires_auth = False
url = '/api/notify.html5/callback'
name = 'api:notify.html5/callback'
def __init__(self, registrations):
"""Init HTML5PushCallbackView."""
self.registrations = registrations
def decode_jwt(self, token):
"""Find the registration that signed this JWT and return it."""
import jwt
# 1. Check claims w/o verifying to see if a target is in there.
# 2. If target in claims, attempt to verify against the given name.
# 2a. If decode is successful, return the payload.
# 2b. If decode is unsuccessful, return a 401.
target_check = jwt.decode(token, verify=False)
if target_check.get(ATTR_TARGET) in self.registrations:
possible_target = self.registrations[target_check[ATTR_TARGET]]
key = possible_target[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
try:
return jwt.decode(token, key, algorithms=["ES256", "HS256"])
except jwt.exceptions.DecodeError:
pass
return self.json_message('No target found in JWT',
status_code=HTTP_UNAUTHORIZED)
# The following is based on code from Auth0
# https://auth0.com/docs/quickstart/backend/python
def check_authorization_header(self, request):
"""Check the authorization header."""
import jwt
auth = request.headers.get(AUTHORIZATION, None)
if not auth:
return self.json_message('Authorization header is expected',
status_code=HTTP_UNAUTHORIZED)
parts = auth.split()
if parts[0].lower() != 'bearer':
return self.json_message('Authorization header must '
'start with Bearer',
status_code=HTTP_UNAUTHORIZED)
if len(parts) != 2:
return self.json_message('Authorization header must '
'be Bearer token',
status_code=HTTP_UNAUTHORIZED)
token = parts[1]
try:
payload = self.decode_jwt(token)
except jwt.exceptions.InvalidTokenError:
return self.json_message('token is invalid',
status_code=HTTP_UNAUTHORIZED)
return payload
async def post(self, request):
"""Accept the POST request for push registrations event callback."""
auth_check = self.check_authorization_header(request)
if not isinstance(auth_check, dict):
return auth_check
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
event_payload = {
ATTR_TAG: data.get(ATTR_TAG),
ATTR_TYPE: data[ATTR_TYPE],
ATTR_TARGET: auth_check[ATTR_TARGET],
}
if data.get(ATTR_ACTION) is not None:
event_payload[ATTR_ACTION] = data.get(ATTR_ACTION)
if data.get(ATTR_DATA) is not None:
event_payload[ATTR_DATA] = data.get(ATTR_DATA)
try:
event_payload = CALLBACK_EVENT_PAYLOAD_SCHEMA(event_payload)
except vol.Invalid as ex:
_LOGGER.warning("Callback event payload is not valid: %s",
humanize_error(event_payload, ex))
event_name = '{}.{}'.format(NOTIFY_CALLBACK_EVENT,
event_payload[ATTR_TYPE])
request.app['hass'].bus.fire(event_name, event_payload)
return self.json({'status': 'ok', 'event': event_payload[ATTR_TYPE]})
class HTML5NotificationService(BaseNotificationService):
"""Implement the notification service for HTML5."""
def __init__(self, gcm_key, registrations, json_path):
"""Initialize the service."""
self._gcm_key = gcm_key
self.registrations = registrations
self.registrations_json_path = json_path
@property
def targets(self):
"""Return a dictionary of registered targets."""
targets = {}
for registration in self.registrations:
targets[registration] = registration
return targets
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
import jwt
from pywebpush import WebPusher
timestamp = int(time.time())
tag = str(uuid.uuid4())
payload = {
'badge': '/static/images/notification-badge.png',
'body': message,
ATTR_DATA: {},
'icon': '/static/icons/favicon-192x192.png',
ATTR_TAG: tag,
'timestamp': (timestamp*1000), # Javascript ms since epoch
ATTR_TITLE: kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
}
data = kwargs.get(ATTR_DATA)
if data:
# Pick out fields that should go into the notification directly vs
# into the notification data dictionary.
data_tmp = {}
for key, val in data.items():
if key in HTML5_SHOWNOTIFICATION_PARAMETERS:
payload[key] = val
else:
data_tmp[key] = val
payload[ATTR_DATA] = data_tmp
if (payload[ATTR_DATA].get(ATTR_URL) is None and
payload.get(ATTR_ACTIONS) is None):
payload[ATTR_DATA][ATTR_URL] = URL_ROOT
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = self.registrations.keys()
for target in list(targets):
info = self.registrations.get(target)
if info is None:
_LOGGER.error("%s is not a valid HTML5 push notification"
" target", target)
continue
jwt_exp = (datetime.datetime.fromtimestamp(timestamp) +
datetime.timedelta(days=JWT_VALID_DAYS))
jwt_secret = info[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
jwt_claims = {'exp': jwt_exp, 'nbf': timestamp,
'iat': timestamp, ATTR_TARGET: target,
ATTR_TAG: payload[ATTR_TAG]}
jwt_token = jwt.encode(jwt_claims, jwt_secret).decode('utf-8')
payload[ATTR_DATA][ATTR_JWT] = jwt_token
# Only pass the gcm key if we're actually using GCM
# If we don't, notifications break on FireFox
gcm_key = self._gcm_key \
if 'googleapis.com' in info[ATTR_SUBSCRIPTION][ATTR_ENDPOINT] \
else None
response = WebPusher(info[ATTR_SUBSCRIPTION]).send(
json.dumps(payload), gcm_key=gcm_key, ttl='86400'
)
if response.status_code == 410:
_LOGGER.info("Notification channel has expired")
reg = self.registrations.pop(target)
if not save_json(self.registrations_json_path,
self.registrations):
self.registrations[target] = reg
_LOGGER.error("Error saving registration")
else:
_LOGGER.info("Configuration saved")
|
from django.urls import path
from mysit.views import *
app_name = 'mysit'
urlpatterns = [
path('',index_views, name='index'),
path('about',about_views, name='about'),
path('contact',contact_views, name='contact'),
path('gallery',gallery_views, name='gallery'),
path('menu',menu_views, name='menu'),
path('reservation',reservation_views, name='reservation'),
]
|
#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from std_msgs.msg import Int64
class NumberPublisher(Node):
def __init__(self):
super().__init__('number_publisher')
self.publisher_ = self.create_publisher(Int64, 'numbers', 10)
timer_period = 0.5 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
def timer_callback(self):
msg = Int64()
msg.data = self.i
self.publisher_.publish(msg)
self.get_logger().info('Publishing: "%s"' % msg.data)
self.i += 1
def main(args=None):
rclpy.init(args=args)
number_publisher = NumberPublisher()
rclpy.spin(number_publisher)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
number_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
from functools import partial
import click
from odc import dscache
from odc.dscache.tools.tiling import (
bin_by_native_tile,
web_gs,
extract_native_albers_tile,
parse_gridspec)
from odc.dscache._dscache import mk_group_name
from odc.index import bin_dataset_stream
@click.command('dstiler')
@click.option('--native', is_flag=True, help='Use Landsat Path/Row as grouping')
@click.option('--native-albers', is_flag=True, help='When datasets are in Albers (AU) grid already')
@click.option('--web', type=int, help='Use web map tiling regime at supplied zoom level')
@click.option('--grid', type=str,
help="Grid spec or name 'crs;pixel_resolution;shape_in_pixels'|albers_au_25",
default='albers_au_25')
@click.argument('dbfile', type=str, nargs=1)
def cli(native, native_albers, web, grid, dbfile):
"""Add spatial grouping to file db.
Default grid is Australian Albers (EPSG:3577) with 100k by 100k tiles. But
you can also group by Landsat path/row (--native), or Google's map tiling
regime (--web zoom_level)
\b
Example for custom --grid:
- rectangular: 'epsg:6933;-10x10;2000x3000'
^crs ^y ^x ^ny ^nx
- square : 'epsg:3857;10;10000'
- named : albers_au_25
albers_africa_10 (20,30,60 are also available)
"""
cache = dscache.open_rw(dbfile)
label = 'Processing {} ({:,d} datasets)'.format(dbfile, cache.count)
group_prefix = 'grid'
gs = None
cells = {}
if native:
group_prefix = 'native'
binner = partial(bin_by_native_tile, cells=cells)
elif native_albers:
group_prefix = 'albers'
binner = lambda dss: bin_by_native_tile(dss, cells, native_tile_id=extract_native_albers_tile)
elif web is not None:
gs = web_gs(web)
group_prefix = 'web_' + str(web)
binner = lambda dss: bin_dataset_stream(gs, dss, cells)
else:
gs = parse_gridspec(grid)
group_prefix = f"epsg{gs.crs.epsg:d}"
binner = lambda dss: bin_dataset_stream(gs, dss, cells)
if gs is not None:
click.echo(f'Using gridspec: {gs}')
cache.add_grid(gs, group_prefix)
with click.progressbar(cache.get_all(), length=cache.count, label=label) as dss:
for ds in binner(dss):
pass
click.echo('Total bins: {:d}'.format(len(cells)))
with click.progressbar(cells.values(), length=len(cells), label='Saving') as groups:
for group in groups:
cache.add_grid_tile(group_prefix, group.idx, group.dss)
if __name__ == '__main__':
cli()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
from pygame.locals import *
import codecs
import os
import random
import struct
import sys
SCR_RECT = Rect(0, 0, 640, 480)
GS = 32
DOWN,LEFT,RIGHT,UP = 0,1,2,3
STOP, MOVE = 0, 1 # 移動タイプ
PROB_MOVE = 0.005 # 移動確率
TRANS_COLOR = (190,179,145) # マップチップの透明色
sounds = {} # サウンド
def main():
pygame.init()
screen = pygame.display.set_mode(SCR_RECT.size)
pygame.display.set_caption(u"PyRPG 25 パーティー")
# サウンドをロード
load_sounds("data", "sound.dat")
# キャラクターチップをロード
load_charachips("data", "charachip.dat")
# マップチップをロード
load_mapchips("data", "mapchip.dat")
party = Party() # パーティ
# プレイヤーを複数作成
player1 = Player("swordman_female", (3,5), DOWN, True, party)
player2 = Player("elf_female2", (3,4), DOWN, False, party)
player3 = Player("priestess", (3,3), DOWN, False, party)
player4 = Player("magician_female", (3,2), DOWN, False, party)
# パーティへキャラクターを追加
party.add(player1)
party.add(player2)
party.add(player3)
party.add(player4)
# マップの作成
map = Map("field", party)
# メッセージエンジン
msg_engine = MessageEngine()
# メッセージウィンドウ
msgwnd = MessageWindow(Rect(140,334,360,140), msg_engine)
# コマンドウィンドウ
cmdwnd = CommandWindow(Rect(16,16,216,160), msg_engine)
clock = pygame.time.Clock()
while True:
clock.tick(60)
# 更新
if not msgwnd.is_visible and not cmdwnd.is_visible:
map.update()
party.update(map)
msgwnd.update()
# 描画
offset = calc_offset(party.member[0])
map.draw(screen, offset)
party.draw(screen, offset)
msgwnd.draw(screen)
cmdwnd.draw(screen)
show_info(screen, msg_engine, party.member[0], map) # デバッグ情報を画面に表示
# 表示
pygame.display.update()
# イベントハンドラ
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN and event.key == K_ESCAPE:
sys.exit()
# 表示されているウィンドウに応じてイベントハンドラを変更
if cmdwnd.is_visible:
# コマンドを実行するのは先頭キャラなのでparty.member[0]を渡す
cmdwnd_handler(event, cmdwnd, msgwnd, party.member[0], map)
elif msgwnd.is_visible:
if event.type == KEYDOWN and event.key == K_SPACE:
msgwnd.next() # 次ページへ
else:
if event.type == KEYDOWN and event.key == K_SPACE:
sounds["pi"].play()
cmdwnd.show()
def cmdwnd_handler(event, cmdwnd, msgwnd, player, map):
"""コマンドウィンドウが開いているときのイベント処理"""
# 矢印キーでコマンド選択
if event.type == KEYDOWN and event.key == K_LEFT:
if cmdwnd.command <= 3: return
cmdwnd.command -= 4
elif event.type == KEYDOWN and event.key == K_RIGHT:
if cmdwnd.command >= 4: return
cmdwnd.command += 4
elif event.type == KEYUP and event.key == K_UP:
if cmdwnd.command == 0 or cmdwnd.command == 4: return
cmdwnd.command -= 1
elif event.type == KEYDOWN and event.key == K_DOWN:
if cmdwnd.command == 3 or cmdwnd.command == 7: return
cmdwnd.command += 1
# スペースキーでコマンド実行
if event.type == KEYDOWN and event.key == K_SPACE:
if cmdwnd.command == CommandWindow.TALK: # はなす
sounds["pi"].play()
cmdwnd.hide()
chara = player.talk(map)
if chara != None:
msgwnd.set(chara.message)
else:
msgwnd.set(u"そのほうこうには だれもいない。")
elif cmdwnd.command == CommandWindow.STATUS: # つよさ
# TODO: ステータスウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"つよさウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.EQUIPMENT: # そうび
# TODO: そうびウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"そうびウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.DOOR: # とびら
sounds["pi"].play()
cmdwnd.hide()
door = player.open(map)
if door != None:
door.open()
map.remove_event(door)
else:
msgwnd.set(u"そのほうこうに とびらはない。")
elif cmdwnd.command == CommandWindow.SPELL: # じゅもん
# TODO: じゅもんウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"じゅもんウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.ITEM: # どうぐ
# TODO: どうぐウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"どうぐウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.TACTICS: # さくせん
# TODO: さくせんウィンドウ表示
sounds["pi"].play()
cmdwnd.hide()
msgwnd.set(u"さくせんウィンドウが ひらくよてい。")
elif cmdwnd.command == CommandWindow.SEARCH: # しらべる
sounds["pi"].play()
cmdwnd.hide()
treasure = player.search(map)
if treasure != None:
treasure.open()
msgwnd.set(u"%s をてにいれた。" % treasure.item)
map.remove_event(treasure)
else:
msgwnd.set(u"しかし なにもみつからなかった。")
def show_info(screen, msg_engine, player, map):
"""デバッグ情報を表示"""
msg_engine.draw_string(screen, (300,10), map.name.upper()) # マップ名
msg_engine.draw_string(screen, (300,40), player.name.upper()) # プレイヤー名
msg_engine.draw_string(screen, (300,70), "%d_%d" % (player.x, player.y)) # プレイヤー座標
def load_sounds(dir, file):
"""サウンドをロードしてsoundsに格納"""
file = os.path.join(dir, file)
fp = open(file, "r")
for line in fp:
line = line.rstrip()
data = line.split(",")
se_name = data[0]
se_file = os.path.join("se", data[1])
sounds[se_name] = pygame.mixer.Sound(se_file)
fp.close()
def load_charachips(dir, file):
"""キャラクターチップをロードしてCharacter.imagesに格納"""
file = os.path.join(dir, file)
fp = open(file, "r")
for line in fp:
line = line.rstrip()
data = line.split(",")
chara_id = int(data[0])
chara_name = data[1]
Character.images[chara_name] = split_image(load_image("charachip", "%s.png" % chara_name))
fp.close()
def load_mapchips(dir, file):
"""マップチップをロードしてMap.imagesに格納"""
file = os.path.join(dir, file)
fp = open(file, "r")
for line in fp:
line = line.rstrip()
data = line.split(",")
mapchip_id = int(data[0])
mapchip_name = data[1]
movable = int(data[2]) # 移動可能か?
transparent = int(data[3]) # 背景を透明にするか?
if transparent == 0:
Map.images.append(load_image("mapchip", "%s.png" % mapchip_name))
else:
Map.images.append(load_image("mapchip", "%s.png" % mapchip_name, TRANS_COLOR))
Map.movable_type.append(movable)
fp.close()
def calc_offset(player):
"""オフセットを計算する"""
offsetx = player.rect.topleft[0] - SCR_RECT.width/2
offsety = player.rect.topleft[1] - SCR_RECT.height/2
return offsetx, offsety
def load_image(dir, file, colorkey=None):
file = os.path.join(dir, file)
try:
image = pygame.image.load(file)
except pygame.error, message:
print "Cannot load image:", file
raise SystemExit, message
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
return image
def split_image(image):
"""128x128のキャラクターイメージを32x32の16枚のイメージに分割
分割したイメージを格納したリストを返す"""
imageList = []
for i in range(0, 128, GS):
for j in range(0, 128, GS):
surface = pygame.Surface((GS,GS))
surface.blit(image, (0,0), (j,i,GS,GS))
surface.set_colorkey(surface.get_at((0,0)), RLEACCEL)
surface.convert()
imageList.append(surface)
return imageList
class Map:
# main()のload_mapchips()でセットされる
images = [] # マップチップ(ID->イメージ)
movable_type = [] # マップチップが移動可能か?(0:移動不可, 1:移動可)
def __init__(self, name, party):
self.name = name
self.row = -1 # 行数
self.col = -1 # 列数
self.map = [] # マップデータ(2次元リスト)
self.charas = [] # マップにいるキャラクターリスト
self.events = [] # マップにあるイベントリスト
self.party = party # Partyの登録(衝突判定用)
self.load() # マップをロード
self.load_event() # イベントをロード
def create(self, dest_map):
"""dest_mapでマップを初期化"""
self.name = dest_map
self.charas = []
self.events = []
self.load()
self.load_event()
def add_chara(self, chara):
"""キャラクターをマップに追加する"""
self.charas.append(chara)
def update(self):
"""マップの更新"""
# マップにいるキャラクターの更新
for chara in self.charas:
chara.update(self) # mapを渡す
def draw(self, screen, offset):
"""マップを描画する"""
offsetx, offsety = offset
# マップの描画範囲を計算
startx = offsetx / GS
endx = startx + SCR_RECT.width/GS + 1
starty = offsety / GS
endy = starty + SCR_RECT.height/GS + 1
# マップの描画
for y in range(starty, endy):
for x in range(startx, endx):
# マップの範囲外はデフォルトイメージで描画
# この条件がないとマップの端に行くとエラー発生
if x < 0 or y < 0 or x > self.col-1 or y > self.row-1:
screen.blit(self.images[self.default], (x*GS-offsetx,y*GS-offsety))
else:
screen.blit(self.images[self.map[y][x]], (x*GS-offsetx,y*GS-offsety))
# このマップにあるイベントを描画
for event in self.events:
event.draw(screen, offset)
# このマップにいるキャラクターを描画
for chara in self.charas:
chara.draw(screen, offset)
def is_movable(self, x, y):
"""(x,y)は移動可能か?"""
# マップ範囲内か?
if x < 0 or x > self.col-1 or y < 0 or y > self.row-1:
return False
# マップチップは移動可能か?
if self.movable_type[self.map[y][x]] == 0:
return False
# キャラクターと衝突しないか?
for chara in self.charas:
if chara.x == x and chara.y == y:
return False
# イベントと衝突しないか?
for event in self.events:
if self.movable_type[event.mapchip] == 0:
if event.x == x and event.y == y:
return False
# 先頭プレイヤーと衝突しないか?
# 先頭プレイヤー以外は無視
player = self.party.member[0]
if player.x == x and player.y == y:
return False
return True
def get_chara(self, x, y):
"""(x,y)にいるキャラクターを返す。いなければNone"""
for chara in self.charas:
if chara.x == x and chara.y == y:
return chara
return None
def get_event(self, x, y):
"""(x,y)にあるイベントを返す。なければNone"""
for event in self.events:
if event.x == x and event.y == y:
return event
return None
def remove_event(self, event):
"""eventを削除する"""
self.events.remove(event)
def load(self):
"""バイナリファイルからマップをロード"""
file = os.path.join("data", self.name + ".map")
fp = open(file, "rb")
# unpack()はタプルが返されるので[0]だけ抽出
self.row = struct.unpack("i", fp.read(struct.calcsize("i")))[0] # 行数
self.col = struct.unpack("i", fp.read(struct.calcsize("i")))[0] # 列数
self.default = struct.unpack("B", fp.read(struct.calcsize("B")))[0] # デフォルトマップチップ
# マップ
self.map = [[0 for c in range(self.col)] for r in range(self.row)]
for r in range(self.row):
for c in range(self.col):
self.map[r][c] = struct.unpack("B", fp.read(struct.calcsize("B")))[0]
fp.close()
def load_event(self):
"""ファイルからイベントをロード"""
file = os.path.join("data", self.name + ".evt")
# テキスト形式のイベントを読み込む
fp = codecs.open(file, "r", "utf-8")
for line in fp:
line = line.rstrip() # 改行除去
if line.startswith("#"): continue # コメント行は無視
if line == "": continue # 空行は無視
data = line.split(",")
event_type = data[0]
if event_type == "BGM": # BGMイベント
self.play_bgm(data)
elif event_type == "CHARA": # キャラクターイベント
self.create_chara(data)
elif event_type == "MOVE": # 移動イベント
self.create_move(data)
elif event_type == "TREASURE": # 宝箱
self.create_treasure(data)
elif event_type == "DOOR": # とびら
self.create_door(data)
elif event_type == "OBJECT": # 一般オブジェクト(玉座など)
self.create_obj(data)
fp.close()
def play_bgm(self, data):
"""BGMを鳴らす"""
bgm_file = "%s.mp3" % data[1]
bgm_file = os.path.join("bgm", bgm_file)
pygame.mixer.music.load(bgm_file)
pygame.mixer.music.play(-1)
def create_chara(self, data):
"""キャラクターを作成してcharasに追加する"""
name = data[1]
x, y = int(data[2]), int(data[3])
direction = int(data[4])
movetype = int(data[5])
message = data[6]
chara = Character(name, (x,y), direction, movetype, message)
self.charas.append(chara)
def create_move(self, data):
"""移動イベントを作成してeventsに追加する"""
x, y = int(data[1]), int(data[2])
mapchip = int(data[3])
dest_map = data[4]
dest_x, dest_y = int(data[5]), int(data[6])
move = MoveEvent((x,y), mapchip, dest_map, (dest_x,dest_y))
self.events.append(move)
def create_treasure(self, data):
"""宝箱を作成してeventsに追加する"""
x, y = int(data[1]), int(data[2])
item = data[3]
treasure = Treasure((x,y), item)
self.events.append(treasure)
def create_door(self, data):
"""とびらを作成してeventsに追加する"""
x, y = int(data[1]), int(data[2])
door = Door((x,y))
self.events.append(door)
def create_obj(self, data):
"""一般オブジェクトを作成してeventsに追加する"""
x, y = int(data[1]), int(data[2])
mapchip = int(data[3])
obj = Object((x,y), mapchip)
self.events.append(obj)
class Character:
"""一般キャラクタークラス"""
speed = 4 # 1フレームの移動ピクセル数
animcycle = 24 # アニメーション速度
frame = 0
# キャラクターイメージ(mainで初期化)
# キャラクター名 -> 分割画像リストの辞書
images = {}
def __init__(self, name, pos, dir, movetype, message):
self.name = name # キャラクター名(ファイル名と同じ)
self.image = self.images[name][0] # 描画中のイメージ
self.x, self.y = pos[0], pos[1] # 座標(単位:マス)
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
self.vx, self.vy = 0, 0 # 移動速度
self.moving = False # 移動中か?
self.direction = dir # 向き
self.movetype = movetype # 移動タイプ
self.message = message # メッセージ
def update(self, map):
"""キャラクター状態を更新する。
mapは移動可能かの判定に必要。"""
# プレイヤーの移動処理
if self.moving == True:
# ピクセル移動中ならマスにきっちり収まるまで移動を続ける
self.rect.move_ip(self.vx, self.vy)
if self.rect.left % GS == 0 and self.rect.top % GS == 0: # マスにおさまったら移動完了
self.moving = False
self.x = self.rect.left / GS
self.y = self.rect.top / GS
elif self.movetype == MOVE and random.random() < PROB_MOVE:
# 移動中でないならPROB_MOVEの確率でランダム移動開始
self.direction = random.randint(0, 3) # 0-3のいずれか
if self.direction == DOWN:
if map.is_movable(self.x, self.y+1):
self.vx, self.vy = 0, self.speed
self.moving = True
elif self.direction == LEFT:
if map.is_movable(self.x-1, self.y):
self.vx, self.vy = -self.speed, 0
self.moving = True
elif self.direction == RIGHT:
if map.is_movable(self.x+1, self.y):
self.vx, self.vy = self.speed, 0
self.moving = True
elif self.direction == UP:
if map.is_movable(self.x, self.y-1):
self.vx, self.vy = 0, -self.speed
self.moving = True
# キャラクターアニメーション(frameに応じて描画イメージを切り替える)
self.frame += 1
self.image = self.images[self.name][self.direction*4+self.frame/self.animcycle%4]
def draw(self, screen, offset):
"""オフセットを考慮してプレイヤーを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def set_pos(self, x, y, dir):
"""キャラクターの位置と向きをセット"""
self.x, self.y = x, y
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
self.direction = dir
def __str__(self):
return "CHARA,%s,%d,%d,%d,%d,%s" % (self.name,self.x,self.y,self.direction,self.movetype,self.message)
class Player(Character):
"""プレイヤークラス"""
def __init__(self, name, pos, dir, leader, party):
Character.__init__(self, name, pos, dir, False, None)
self.leader = leader
self.party = party
def update(self, map):
"""プレイヤー状態を更新する。
mapは移動可能かの判定に必要。"""
# プレイヤーの移動処理
if self.moving == True:
# ピクセル移動中ならマスにきっちり収まるまで移動を続ける
self.rect.move_ip(self.vx, self.vy)
if self.rect.left % GS == 0 and self.rect.top % GS == 0: # マスにおさまったら移動完了
self.moving = False
self.x = self.rect.left / GS
self.y = self.rect.top / GS
# TODO: ここに接触イベントのチェックを入れる
if not self.leader: return # リーダーでなければイベントは無視
event = map.get_event(self.x, self.y)
if isinstance(event, MoveEvent): # MoveEventなら
sounds["step"].play()
dest_map = event.dest_map
dest_x = event.dest_x
dest_y = event.dest_y
map.create(dest_map)
for player in self.party.member:
player.set_pos(dest_x, dest_y, DOWN) # プレイヤーを移動先座標へ
player.moving = False
# キャラクターアニメーション(frameに応じて描画イメージを切り替える)
self.frame += 1
self.image = self.images[self.name][self.direction*4+self.frame/self.animcycle%4]
def move_to(self, destx, desty):
"""現在位置から(destx,desty)への移動を開始"""
dx = destx - self.x
dy = desty - self.y
# 向きを変える
if dx == 1: self.direction = RIGHT
elif dx == -1: self.direction = LEFT
elif dy == -1: self.direction = UP
elif dy == 1: self.direction = DOWN
# 速度をセット
self.vx, self.vy = dx*self.speed, dy*self.speed
# 移動開始
self.moving = True
def talk(self, map):
"""キャラクターが向いている方向のとなりにキャラクターがいるか調べる"""
# 向いている方向のとなりの座標を求める
nextx, nexty = self.x, self.y
if self.direction == DOWN:
nexty = self.y + 1
event = map.get_event(nextx, nexty)
if isinstance(event, Object) and event.mapchip == 41:
nexty += 1 # テーブルがあったらさらに隣
elif self.direction == LEFT:
nextx = self.x - 1
event = map.get_event(nextx, nexty)
if isinstance(event, Object) and event.mapchip == 41:
nextx -= 1
elif self.direction == RIGHT:
nextx = self.x + 1
event = map.get_event(nextx, nexty)
if isinstance(event, Object) and event.mapchip == 41:
nextx += 1
elif self.direction == UP:
nexty = self.y - 1
event = map.get_event(nextx, nexty)
if isinstance(event, Object) and event.mapchip == 41:
nexty -= 1
# その方向にキャラクターがいるか?
chara = map.get_chara(nextx, nexty)
# キャラクターがいればプレイヤーの方向へ向ける
if chara != None:
if self.direction == DOWN:
chara.direction = UP
elif self.direction == LEFT:
chara.direction = RIGHT
elif self.direction == RIGHT:
chara.direction = LEFT
elif self.direction == UP:
chara.direction = DOWN
chara.update(map) # 向きを変えたので更新
return chara
def search(self, map):
"""足もとに宝箱があるか調べる"""
event = map.get_event(self.x, self.y)
if isinstance(event, Treasure):
return event
return None
def open(self, map):
"""目の前にとびらがあるか調べる"""
# 向いている方向のとなりの座標を求める
nextx, nexty = self.x, self.y
if self.direction == DOWN:
nexty = self.y + 1
elif self.direction == LEFT:
nextx = self.x - 1
elif self.direction == RIGHT:
nextx = self.x + 1
elif self.direction == UP:
nexty = self.y - 1
# その場所にとびらがあるか?
event = map.get_event(nextx, nexty)
if isinstance(event, Door):
return event
return None
class Party:
def __init__(self):
# Partyのメンバーリスト
self.member = []
def add(self, player):
"""Partyにplayerを追加"""
self.member.append(player)
def update(self, map):
# Party全員を更新
for player in self.member:
player.update(map)
# 移動中でないときにキー入力があったらParty全員を移動開始
if not self.member[0].moving:
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_DOWN]:
# 先頭キャラは移動できなくても向きは変える
self.member[0].direction = DOWN
# 先頭キャラが移動できれば
if map.is_movable(self.member[0].x, self.member[0].y+1):
# 後ろにいる仲間から1つ前の仲間の位置へ移動開始
for i in range(len(self.member)-1,0,-1):
self.member[i].move_to(self.member[i-1].x,self.member[i-1].y)
# 先頭キャラを最後に移動開始
self.member[0].move_to(self.member[0].x,self.member[0].y+1)
elif pressed_keys[K_LEFT]:
self.member[0].direction = LEFT
if map.is_movable(self.member[0].x-1, self.member[0].y):
for i in range(len(self.member)-1,0,-1):
self.member[i].move_to(self.member[i-1].x,self.member[i-1].y)
self.member[0].move_to(self.member[0].x-1,self.member[0].y)
elif pressed_keys[K_RIGHT]:
self.member[0].direction = RIGHT
if map.is_movable(self.member[0].x+1, self.member[0].y):
for i in range(len(self.member)-1,0,-1):
self.member[i].move_to(self.member[i-1].x,self.member[i-1].y)
self.member[0].move_to(self.member[0].x+1,self.member[0].y)
elif pressed_keys[K_UP]:
self.member[0].direction = UP
if map.is_movable(self.member[0].x, self.member[0].y-1):
for i in range(len(self.member)-1,0,-1):
self.member[i].move_to(self.member[i-1].x,self.member[i-1].y)
self.member[0].move_to(self.member[0].x,self.member[0].y-1)
def draw(self, screen, offset):
# Partyの全員を描画
# 重なったとき先頭キャラが表示されるように後ろの人から描画
for player in self.member[::-1]:
player.draw(screen, offset)
class MessageEngine:
FONT_WIDTH = 16
FONT_HEIGHT = 22
WHITE, RED, GREEN, BLUE = 0, 160, 320, 480
def __init__(self):
self.image = load_image("data", "font.png", -1)
self.color = self.WHITE
self.kana2rect = {}
self.create_hash()
def set_color(self, color):
"""文字色をセット"""
self.color = color
# 変な値だったらWHITEにする
if not self.color in [self.WHITE,self.RED,self.GREEN,self.BLUE]:
self.color = self.WHITE
def draw_character(self, screen, pos, ch):
"""1文字だけ描画する"""
x, y = pos
try:
rect = self.kana2rect[ch]
screen.blit(self.image, (x,y), (rect.x+self.color,rect.y,rect.width,rect.height))
except KeyError:
print "描画できない文字があります:%s" % ch
return
def draw_string(self, screen, pos, str):
"""文字列を描画"""
x, y = pos
for i, ch in enumerate(str):
dx = x + self.FONT_WIDTH * i
self.draw_character(screen, (dx,y), ch)
def create_hash(self):
"""文字から座標への辞書を作成"""
filepath = os.path.join("data", "kana2rect.dat")
fp = codecs.open(filepath, "r", "utf-8")
for line in fp.readlines():
line = line.rstrip()
d = line.split(" ")
kana, x, y, w, h = d[0], int(d[1]), int(d[2]), int(d[3]), int(d[4])
self.kana2rect[kana] = Rect(x, y, w, h)
fp.close()
class Window:
"""ウィンドウの基本クラス"""
EDGE_WIDTH = 4 # 白枠の幅
def __init__(self, rect):
self.rect = rect # 一番外側の白い矩形
self.inner_rect = self.rect.inflate(-self.EDGE_WIDTH*2, -self.EDGE_WIDTH*2) # 内側の黒い矩形
self.is_visible = False # ウィンドウを表示中か?
def draw(self, screen):
"""ウィンドウを描画"""
if self.is_visible == False: return
pygame.draw.rect(screen, (255,255,255), self.rect, 0)
pygame.draw.rect(screen, (0,0,0), self.inner_rect, 0)
def show(self):
"""ウィンドウを表示"""
self.is_visible = True
def hide(self):
"""ウィンドウを隠す"""
self.is_visible = False
class MessageWindow(Window):
"""メッセージウィンドウ"""
MAX_CHARS_PER_LINE = 20 # 1行の最大文字数
MAX_LINES_PER_PAGE = 3 # 1行の最大行数(4行目は▼用)
MAX_CHARS_PER_PAGE = 20*3 # 1ページの最大文字数
MAX_LINES = 30 # メッセージを格納できる最大行数
LINE_HEIGHT = 8 # 行間の大きさ
animcycle = 24
def __init__(self, rect, msg_engine):
Window.__init__(self, rect)
self.text_rect = self.inner_rect.inflate(-32, -32) # テキストを表示する矩形
self.text = [] # メッセージ
self.cur_page = 0 # 現在表示しているページ
self.cur_pos = 0 # 現在ページで表示した最大文字数
self.next_flag = False # 次ページがあるか?
self.hide_flag = False # 次のキー入力でウィンドウを消すか?
self.msg_engine = msg_engine # メッセージエンジン
self.cursor = load_image("data", "cursor.png", -1) # カーソル画像
self.frame = 0
def set(self, message):
"""メッセージをセットしてウィンドウを画面に表示する"""
self.cur_pos = 0
self.cur_page = 0
self.next_flag = False
self.hide_flag = False
# 全角スペースで初期化
self.text = [u' '] * (self.MAX_LINES*self.MAX_CHARS_PER_LINE)
# メッセージをセット
p = 0
for i in range(len(message)):
ch = message[i]
if ch == "/": # /は改行文字
self.text[p] = "/"
p += self.MAX_CHARS_PER_LINE
p = (p/self.MAX_CHARS_PER_LINE)*self.MAX_CHARS_PER_LINE
elif ch == "%": # \fは改ページ文字
self.text[p] = "%"
p += self.MAX_CHARS_PER_PAGE
p = (p/self.MAX_CHARS_PER_PAGE)*self.MAX_CHARS_PER_PAGE
else:
self.text[p] = ch
p += 1
self.text[p] = "$" # 終端文字
self.show()
def update(self):
"""メッセージウィンドウを更新する
メッセージが流れるように表示する"""
if self.is_visible:
if self.next_flag == False:
self.cur_pos += 1 # 1文字流す
# テキスト全体から見た現在位置
p = self.cur_page * self.MAX_CHARS_PER_PAGE + self.cur_pos
if self.text[p] == "/": # 改行文字
self.cur_pos += self.MAX_CHARS_PER_LINE
self.cur_pos = (self.cur_pos/self.MAX_CHARS_PER_LINE) * self.MAX_CHARS_PER_LINE
elif self.text[p] == "%": # 改ページ文字
self.cur_pos += self.MAX_CHARS_PER_PAGE
self.cur_pos = (self.cur_pos/self.MAX_CHARS_PER_PAGE) * self.MAX_CHARS_PER_PAGE
elif self.text[p] == "$": # 終端文字
self.hide_flag = True
# 1ページの文字数に達したら▼を表示
if self.cur_pos % self.MAX_CHARS_PER_PAGE == 0:
self.next_flag = True
self.frame += 1
def draw(self, screen):
"""メッセージを描画する
メッセージウィンドウが表示されていないときは何もしない"""
Window.draw(self, screen)
if self.is_visible == False: return
# 現在表示しているページのcur_posまでの文字を描画
for i in range(self.cur_pos):
ch = self.text[self.cur_page*self.MAX_CHARS_PER_PAGE+i]
if ch == "/" or ch == "%" or ch == "$": continue # 制御文字は表示しない
dx = self.text_rect[0] + MessageEngine.FONT_WIDTH * (i % self.MAX_CHARS_PER_LINE)
dy = self.text_rect[1] + (self.LINE_HEIGHT+MessageEngine.FONT_HEIGHT) * (i / self.MAX_CHARS_PER_LINE)
self.msg_engine.draw_character(screen, (dx,dy), ch)
# 最後のページでない場合は▼を表示
if (not self.hide_flag) and self.next_flag:
if self.frame / self.animcycle % 2 == 0:
dx = self.text_rect[0] + (self.MAX_CHARS_PER_LINE/2) * MessageEngine.FONT_WIDTH - MessageEngine.FONT_WIDTH/2
dy = self.text_rect[1] + (self.LINE_HEIGHT + MessageEngine.FONT_HEIGHT) * 3
screen.blit(self.cursor, (dx,dy))
def next(self):
"""メッセージを先に進める"""
# 現在のページが最後のページだったらウィンドウを閉じる
if self.hide_flag:
self.hide()
# ▼が表示されてれば次のページへ
if self.next_flag:
self.cur_page += 1
self.cur_pos = 0
self.next_flag = False
class CommandWindow(Window):
LINE_HEIGHT = 8 # 行間の大きさ
TALK, STATUS, EQUIPMENT, DOOR, SPELL, ITEM, TACTICS, SEARCH = range(0, 8)
COMMAND = [u"はなす", u"つよさ", u"そうび", u"とびら",
u"じゅもん", u"どうぐ", u"さくせん", u"しらべる"]
def __init__(self, rect, msg_engine):
Window.__init__(self, rect)
self.text_rect = self.inner_rect.inflate(-32, -32)
self.command = self.TALK # 選択中のコマンド
self.msg_engine = msg_engine
self.cursor = load_image("data", "cursor2.png", -1)
self.frame = 0
def draw(self, screen):
Window.draw(self, screen)
if self.is_visible == False: return
# はなす、つよさ、そうび、とびらを描画
for i in range(0, 4):
dx = self.text_rect[0] + MessageEngine.FONT_WIDTH
dy = self.text_rect[1] + (self.LINE_HEIGHT+MessageEngine.FONT_HEIGHT) * (i % 4)
self.msg_engine.draw_string(screen, (dx,dy), self.COMMAND[i])
# じゅもん、どうぐ、さくせん、しらべるを描画
for i in range(4, 8):
dx = self.text_rect[0] + MessageEngine.FONT_WIDTH * 6
dy = self.text_rect[1] + (self.LINE_HEIGHT+MessageEngine.FONT_HEIGHT) * (i % 4)
self.msg_engine.draw_string(screen, (dx,dy), self.COMMAND[i])
# 選択中のコマンドの左側に▶を描画
dx = self.text_rect[0] + MessageEngine.FONT_WIDTH * 5 * (self.command / 4)
dy = self.text_rect[1] + (self.LINE_HEIGHT+MessageEngine.FONT_HEIGHT) * (self.command % 4)
screen.blit(self.cursor, (dx,dy))
def show(self):
"""オーバーライド"""
self.command = self.TALK # 追加
self.is_visible = True
class MoveEvent():
"""移動イベント"""
def __init__(self, pos, mapchip, dest_map, dest_pos):
self.x, self.y = pos[0], pos[1] # イベント座標
self.mapchip = mapchip # マップチップ
self.dest_map = dest_map # 移動先マップ名
self.dest_x, self.dest_y = dest_pos[0], dest_pos[1] # 移動先座標
self.image = Map.images[self.mapchip]
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
def draw(self, screen, offset):
"""オフセットを考慮してイベントを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def __str__(self):
return "MOVE,%d,%d,%d,%s,%d,%d" % (self.x, self.y, self.mapchip, self.dest_map, self.dest_x, self.dest_y)
class Treasure():
"""宝箱"""
def __init__(self, pos, item):
self.x, self.y = pos[0], pos[1] # 宝箱座標
self.mapchip = 46 # 宝箱は46
self.image = Map.images[self.mapchip]
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
self.item = item # アイテム名
def open(self):
"""宝箱をあける"""
sounds["treasure"].play()
# TODO: アイテムを追加する処理
def draw(self, screen, offset):
"""オフセットを考慮してイベントを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def __str__(self):
return "TREASURE,%d,%d,%s" % (self.x, self.y, self.item)
class Door:
"""とびら"""
def __init__(self, pos):
self.x, self.y = pos[0], pos[1]
self.mapchip = 45
self.image = Map.images[self.mapchip]
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
def open(self):
"""とびらをあける"""
sounds["door"].play()
def draw(self, screen, offset):
"""オフセットを考慮してイベントを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def __str__(self):
return "DOOR,%d,%d" % (self.x, self.y)
class Object:
"""一般オブジェクト"""
def __init__(self, pos, mapchip):
self.x, self.y = pos[0], pos[1]
self.mapchip = mapchip
self.image = Map.images[self.mapchip]
self.rect = self.image.get_rect(topleft=(self.x*GS, self.y*GS))
def draw(self, screen, offset):
"""オフセットを考慮してイベントを描画"""
offsetx, offsety = offset
px = self.rect.topleft[0]
py = self.rect.topleft[1]
screen.blit(self.image, (px-offsetx, py-offsety))
def __str__(self):
return "OBJECT,%d,%d,%d" % (self.x, self.y, mapchip)
if __name__ == "__main__":
main()
|
# Date: 09/28/2017
# Author: Ethical-H4CK3R
# Description: A Simple C&C Server
from core.prompt import Prompt
from core.server import Server
from template.design import Designer
from core.console import MainController
from core.communicate import Communicate
__version__ = 0.1
class Flex(Prompt, Server, Designer, MainController, Communicate):
''' A Simple C&C Server '''
def __init__(self):
self.ip = '127.0.0.1'
self.port = 4444
self.botnet = []
Prompt.__init__(self)
Server.__init__(self)
Designer.__init__(self)
Communicate.__init__(self)
MainController.__init__(self)
self.wait = False
self.ping = False
self.alive = True
self.debug = True
self.activeIP = None
self.activePort = None
self.default_to_shell = True
self.prompt = self.getprompt()
def start(self):
try:self.cmdloop()
finally:self.disconnect(True)
if __name__ == '__main__':
Flex().start()
|
import os
from flask import Flask, flash, render_template, request
from helpers import *
app = Flask(__name__)
app.secret_key = 'dkjkffksks'
@app.route('/', methods=["GET", "POST"])
def index():
"""Index page"""
if request.method == "POST":
msg = request.form.get("textarea")
img = request.form.get("output_image")
if msg:
fbpost(msg, img)
flash('Successfully posted!')
return render_template('index.html')
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, unexpected error: {}'.format(e), 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
if __name__ == '__main__':
app.run()
|
# -*- coding: utf-8 -*-
#
# ownCloud Documentation documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 22 23:16:40 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.phpdomain', 'sphinx.ext.todo', 'rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_shared_assets/templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'ownCloud Server Administration Manual'
copyright = u'2012-2015, The ownCloud developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '8.2'
# The full version, including alpha/beta/rc tags.
release = '8.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','_shared_assets','scripts/*']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../_shared_assets/themes']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'owncloud_org'
html_theme_options = {
# "rightsidebar": "true",
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "Server Admin Manual"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_shared_assets/static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ownCloudServerAdminManual'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {'preamble': '\usepackage{morefloats}', 'figure_align': 'H',
}
# latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('contents', 'ownCloudServerAdminManual.tex', u'ownCloud Server Administration Manual', u'The ownCloud developers', 'manual'),]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../_shared_assets/static/logo-blue.pdf'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for pdf page output -----------------------------------------------
pdf_documents = [('contents', u'owncloudServerAdminManual', u'ownCloud Server AdministrationManual', u'The ownCloud developers'),]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [ ('contents', 'owncloudserveradminmanual', u'ownCloud Server Administration Manual', [u'The ownCloud developers'], 1) ]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [ ('contents', 'ownCloudServerAdminManual', u'ownCloud Server Administration Manual', u'The ownCloud developers', 'ownCloud', 'The ownCloud Server Administration Manual.', 'Miscellaneous'), ]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'ownCloud Server Administration Manual'
epub_author = u'The ownCloud developers'
epub_publisher = u'The ownCloud developers'
epub_copyright = u'2012-2015, The ownCloud developers'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Include todos?
todo_include_todos = True
|
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity
import uuid
class PhotoCollectionAzureTable:
_connectionstring = ''
def __init__(self, connectionstring):
self._connectionstring = connectionstring
def fetchall(self):
table_service = TableService(connection_string=self._connectionstring)
photos = table_service.query_entities('phototable').items
[photo.pop('etag', None) for photo in photos]
[photo.pop('Timestamp', None) for photo in photos]
return photos
def fetchone(self, objectID):
table_service = TableService(connection_string=self._connectionstring)
photos = table_service.query_entities('phototable',
"RowKey eq '" + objectID + "'"
).items
[photo.pop('etag', None) for photo in photos]
[photo.pop('Timestamp', None) for photo in photos]
if photos:
return photos[0]
return None
def addone(self, photo):
table_service = TableService(connection_string=self._connectionstring)
photoAzure = photo
photoAzure['PartitionKey'] = photo['taken']
photoAzure['RowKey'] = str(uuid.uuid4())
photoAzure['objectID'] = photoAzure['RowKey']
table_service.insert_entity('phototable', photoAzure)
|
from blocktorch.data_checks import DataCheckAction, DataCheckActionCode
def test_data_check_action_attributes():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
assert data_check_action.action_code == DataCheckActionCode.DROP_COL
assert data_check_action.metadata == {}
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL, {})
assert data_check_action.action_code == DataCheckActionCode.DROP_COL
assert data_check_action.metadata == {}
data_check_action = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"columns": [1, 2]}
)
assert data_check_action.action_code == DataCheckActionCode.DROP_COL
assert data_check_action.metadata == {"columns": [1, 2]}
def test_data_check_action_equality():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
data_check_action_eq = DataCheckAction(DataCheckActionCode.DROP_COL)
assert data_check_action == data_check_action
assert data_check_action == data_check_action_eq
assert data_check_action_eq == data_check_action
data_check_action = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"same detail": "same same same"}
)
data_check_action_eq = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"same detail": "same same same"}
)
assert data_check_action == data_check_action
assert data_check_action == data_check_action_eq
assert data_check_action_eq == data_check_action
def test_data_check_action_inequality():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
data_check_action_diff = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"metadata": ["this is different"]}
)
assert data_check_action != data_check_action_diff
assert data_check_action_diff != data_check_action
def test_data_check_action_to_dict():
data_check_action = DataCheckAction(DataCheckActionCode.DROP_COL)
data_check_action_empty_metadata = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={}
)
data_check_action_with_metadata = DataCheckAction(
DataCheckActionCode.DROP_COL, metadata={"some detail": ["this is different"]}
)
assert data_check_action.to_dict() == {
"code": DataCheckActionCode.DROP_COL.name,
"metadata": {},
}
assert data_check_action_empty_metadata.to_dict() == {
"code": DataCheckActionCode.DROP_COL.name,
"metadata": {},
}
assert data_check_action_with_metadata.to_dict() == {
"code": DataCheckActionCode.DROP_COL.name,
"metadata": {"some detail": ["this is different"]},
}
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""The data layer used during training to train a DA Fast R-CNN network.
RoIDataLayer implements a Caffe Python layer.
"""
import caffe
from fast_rcnn.config import cfg
from roi_data_da_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
from live_dataset import target_file_streamer
class RoIDataDALayer(caffe.Layer):
"""Fast R-CNN data layer used for training."""
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
inds = np.reshape(inds, (-1, 2))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH or cfg.FETCH_TARGETS:
# print self._blob_queue.qsize()
return self._blob_queue.get()
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes)
def set_roidb(self, roidb):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds()
if cfg.TRAIN.USE_PREFETCH or cfg.FETCH_TARGETS:
self._blob_queue = Queue(10)
self._prefetch_process = BlobFetcher(self._blob_queue,
self._roidb,
self._num_classes)
self._prefetch_process.start()
# Terminate the child process when the parent exists
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {}
# data blob: holds a batch of N images, each with 3 channels
idx = 0
top[idx].reshape(cfg.TRAIN.IMS_PER_BATCH, 3,
max(cfg.TRAIN.SCALES), cfg.TRAIN.MAX_SIZE)
self._name_to_top_map['data'] = idx
idx += 1
if cfg.TRAIN.HAS_RPN:
top[idx].reshape(1, 3)
self._name_to_top_map['im_info'] = idx
idx += 1
top[idx].reshape(1, 4)
self._name_to_top_map['gt_boxes'] = idx
idx += 1
top[idx].reshape(1, 1)
self._name_to_top_map['need_backprop'] = idx
idx += 1
top[idx].reshape(1, 1)
self._name_to_top_map['dc_label'] = idx
idx += 1
else: # not using RPN
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[idx].reshape(1, 5)
self._name_to_top_map['rois'] = idx
idx += 1
# labels blob: R categorical labels in [0, ..., K] for K foreground
# classes plus background
top[idx].reshape(1)
self._name_to_top_map['labels'] = idx
idx += 1
if cfg.TRAIN.BBOX_REG:
# bbox_targets blob: R bounding-box regression targets with 4
# targets per class
top[idx].reshape(1, self._num_classes * 4)
self._name_to_top_map['bbox_targets'] = idx
idx += 1
# bbox_inside_weights blob: At most 4 targets per roi are active;
# thisbinary vector sepcifies the subset of active targets
top[idx].reshape(1, self._num_classes * 4)
self._name_to_top_map['bbox_inside_weights'] = idx
idx += 1
top[idx].reshape(1, self._num_classes * 4)
self._name_to_top_map['bbox_outside_weights'] = idx
idx += 1
print 'RoiDataLayer: name_to_top:', self._name_to_top_map
assert len(top) == len(self._name_to_top_map)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
class BlobFetcher(Process):
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, queue, roidb, num_classes):
super(BlobFetcher, self).__init__()
self._queue = queue
self._roidb = roidb
self._num_classes = num_classes
self._perm = None
self._cur = 0
self._shuffle_roidb_inds()
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
self.daemon = True
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
# TODO(rbg): remove duplicated code
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
# TODO(rbg): remove duplicated code
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def run(self):
print 'BlobFetcher started'
target_pic = False
targets = target_roi_gen()
while True:
# Target pictures are intercalated with the source pictures:
if target_pic:
target_roi = next(targets)
minibatch_db = [target_roi]
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs = get_minibatch(minibatch_db, self._num_classes)
self._queue.put(blobs)
target_pic = not target_pic
def target_roi_gen():
for filepath in target_file_streamer():
flipped = np.random.randint(2) == 1
roi = {
'gt_classes': np.array([1], dtype=np.int32),
'max_classes': np.array([1]),
'bbox_targets': np.array([[3.,0.,0.,0.,0.]], dtype=np.float32),
'boxes': np.array([[0,0,0,0]], dtype=np.uint16),
'max_overlaps': np.array([1.], dtype=np.float32),
'gt_overlaps': np.array([1.],dtype=np.float32),
'image' : filepath,
'width': 0,
'height': 0,
'flipped': flipped
}
yield roi
|
# -*- coding: utf-8 -*-
import codecs
import io
import os
import sys
import unittest
import pytest
import pdfgen
from pdfgen.errors import InvalidSourceError
TEST_PATH = os.path.dirname(os.path.realpath(__file__))
EXAMPLE_HTML_FILE = f'{TEST_PATH}/fixtures/example.html'
class TestPdfGenerationSyncApi(unittest.TestCase):
"""Test to_pdf() method in Synchronous world"""
def setUp(self):
pass
def tearDown(self):
if os.path.exists('out.pdf'):
os.remove('out.pdf')
def test_pdf_generation_from_html(self):
pdf = pdfgen.sync.from_string('html', 'out.pdf', options={'format': 'Letter'})
self.assertEqual(pdf, 'out.pdf')
def test_pdf_generation_from_url(self):
pdf = pdfgen.sync.from_url('http://networkcheck.kde.org', 'out.pdf', options={'format': 'Letter'})
self.assertEqual(pdf, 'out.pdf')
def test_raise_error_with_invalid_url(self):
with self.assertRaises(InvalidSourceError):
pdf = pdfgen.sync.from_url('wrongurl.com', 'out.pdf')
def test_raise_error_with_invalid_file_path(self):
paths = ['frongpath.html', 'wrongpath2.html']
with self.assertRaises(InvalidSourceError):
pdfgen.sync.from_file(paths, 'file')
def test_pdf_generation_from_file(self):
pdf = pdfgen.sync.from_file(EXAMPLE_HTML_FILE, 'out.pdf')
self.assertEqual(pdf, 'out.pdf')
def test_pdf_generation_from_file_like(self):
with open(EXAMPLE_HTML_FILE, 'r') as f:
pdf = pdfgen.sync.from_file(f)
self.assertEqual(pdf[:4].decode('utf-8'), '%PDF')
if __name__ == "__main__":
unittest.main()
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load('SimG4CMS.HcalTestBeam.TB2006Geometry33XML_cfi')
process.load('SimGeneral.HepPDTESSource.pdt_cfi')
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("Geometry.EcalCommonData.ecalSimulationParameters_cff")
process.load('Geometry.HcalTestBeamData.hcalDDDSimConstants_cff')
process.load('Configuration.EventContent.EventContent_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.generatorSmeared_cfi')
process.load('SimG4Core.Application.g4SimHits_cfi')
process.load('IOMC.RandomEngine.IOMC_cff')
if hasattr(process,'MessageLogger'):
process.MessageLogger.categories.append('HCalGeom')
process.MessageLogger.categories.append('HcalSim')
process.TFileService = cms.Service("TFileService",
fileName = cms.string('hcaltb06_33.root')
)
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
process.common_beam_direction_parameters = cms.PSet(
MinE = cms.double(50.0),
MaxE = cms.double(50.0),
PartID = cms.vint32(-211),
MinEta = cms.double(0.2175),
MaxEta = cms.double(0.2175),
MinPhi = cms.double(-0.1309),
MaxPhi = cms.double(-0.1309),
BeamPosition = cms.double(-800.0)
)
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1)
)
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
process.common_beam_direction_parameters,
),
Verbosity = cms.untracked.int32(0),
AddAntiParticle = cms.bool(False)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(25000)
)
process.o1 = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('sim2006_33.root')
)
process.Timing = cms.Service("Timing")
from IOMC.EventVertexGenerators.VtxSmearedParameters_cfi import *
process.VtxSmeared = cms.EDProducer("BeamProfileVtxGenerator",
process.common_beam_direction_parameters,
VtxSmearedCommon,
BeamMeanX = cms.double(0.0),
BeamMeanY = cms.double(0.0),
BeamSigmaX = cms.double(0.0001),
BeamSigmaY = cms.double(0.0001),
Psi = cms.double(999.9),
GaussianProfile = cms.bool(False),
BinX = cms.int32(50),
BinY = cms.int32(50),
File = cms.string('beam.profile'),
UseFile = cms.bool(False),
TimeOffset = cms.double(0.)
)
process.testbeam = cms.EDAnalyzer("HcalTB06Analysis",
process.common_beam_direction_parameters,
ECAL = cms.bool(True),
TestBeamAnalysis = cms.PSet(
EHCalMax = cms.untracked.double(400.0),
ETtotMax = cms.untracked.double(400.0),
beamEnergy = cms.untracked.double(50.),
TimeLimit = cms.double(180.0),
EcalWidth = cms.double(0.362),
HcalWidth = cms.double(0.640),
EcalFactor = cms.double(1.0),
HcalFactor = cms.double(100.0),
MIP = cms.double(0.8),
Verbose = cms.untracked.bool(True),
MakeTree = cms.untracked.bool(True)
)
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits*process.testbeam)
#process.outpath = cms.EndPath(process.o1)
process.g4SimHits.NonBeamEvent = True
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/QGSP_FTFP_BERT_EML'
process.g4SimHits.Physics.Region = 'HcalRegion'
process.g4SimHits.Physics.DefaultCutValue = 1.
process.g4SimHits.ECalSD.UseBirkLaw = True
process.g4SimHits.ECalSD.BirkL3Parametrization = True
process.g4SimHits.ECalSD.BirkC1 = 0.033
process.g4SimHits.ECalSD.BirkC2 = 0.0
process.g4SimHits.ECalSD.SlopeLightYield = 0.02
process.g4SimHits.HCalSD.UseBirkLaw = True
process.g4SimHits.HCalSD.BirkC1 = 0.0052
process.g4SimHits.HCalSD.BirkC2 = 0.142
process.g4SimHits.HCalSD.BirkC3 = 1.75
process.g4SimHits.HCalSD.UseLayerWt = False
process.g4SimHits.HCalSD.WtFile = ' '
process.g4SimHits.HCalSD.UseShowerLibrary = False
process.g4SimHits.HCalSD.TestNumberingScheme = False
process.g4SimHits.HCalSD.UseHF = False
process.g4SimHits.HCalSD.ForTBHCAL = True
process.g4SimHits.HCalSD.ForTBH2 = True
process.g4SimHits.CaloSD = cms.PSet(
process.common_beam_direction_parameters,
process.common_heavy_suppression,
EminTrack = cms.double(1.0),
TmaxHit = cms.double(1000.0),
EminHits = cms.vdouble(0.0,0.0,0.0,0.0),
EminHitsDepth = cms.vdouble(0.0,0.0,0.0,0.0),
TmaxHits = cms.vdouble(1000.0,1000.0,1000.0,1000.0),
HCNames = cms.vstring('EcalHitsEB','EcalHitsEE','EcalHitsES','HcalHits'),
UseResponseTables = cms.vint32(0,0,0,0),
SuppressHeavy = cms.bool(False),
CheckHits = cms.untracked.int32(25),
UseMap = cms.untracked.bool(True),
Verbosity = cms.untracked.int32(0),
DetailedTiming = cms.untracked.bool(False),
CorrectTOFBeam = cms.bool(False)
)
process.g4SimHits.CaloTrkProcessing.TestBeam = True
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Comment.reply_text'
db.delete_column(u'canvas_comment', 'reply_text')
def backwards(self, orm):
# Adding field 'Comment.reply_text'
db.add_column(u'canvas_comment', 'reply_text',
self.gf('django.db.models.fields.CharField')(default='', max_length=2000, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.APIApp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'attribution_copy': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'attribution_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Category']"}),
'created_on_iphone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Comment']"}),
'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'star_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'ugq': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['auth.User']"})
},
u'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': u"orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
u'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': u"orm['canvas.Comment']"}),
'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'canvas.commentstickerlog': {
'Meta': {'object_name': 'CommentStickerLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': u"orm['canvas.Content']"}),
'stroke_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"})
},
u'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_by': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['canvas.FacebookUser']", 'symmetrical': 'False', 'blank': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': u"orm['canvas.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': u"orm['auth.User']"})
},
u'canvas.friendjoinednotificationreceipt': {
'Meta': {'unique_together': "(('actor', 'recipient'),)", 'object_name': 'FriendJoinedNotificationReceipt'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"})
},
u'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']", 'null': 'True'}),
'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'follower_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']", 'null': 'True'}),
'trust_changed': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'blank': 'True'}),
'trusted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': u"orm['auth.User']"})
},
u'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': u"orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
u'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'povary.views.home', name='home'),
# url(r'^povary/', include('povary.foo.urls')),
url(r'^recipe_gallery/(?P<recipe_slug>.*)/$',
'gallery.views.recipe_gallery_upload',
name='recipe_gallery_upload'
),
# url(r'^$', 'recipes.views.recipe_list', name='recipe_list'),
# url(r'^(?P<recipe_slug>.*)/$', 'recipes.views.recipe_details', name='recipe_details'),
)
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_9_0_0.models.hdfs_fsimage_job_job import HdfsFsimageJobJob # noqa: F401,E501
class HdfsFsimageJob(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'job': 'HdfsFsimageJobJob'
}
attribute_map = {
'job': 'job'
}
def __init__(self, job=None): # noqa: E501
"""HdfsFsimageJob - a model defined in Swagger""" # noqa: E501
self._job = None
self.discriminator = None
if job is not None:
self.job = job
@property
def job(self):
"""Gets the job of this HdfsFsimageJob. # noqa: E501
Information about job that generates FSImage. # noqa: E501
:return: The job of this HdfsFsimageJob. # noqa: E501
:rtype: HdfsFsimageJobJob
"""
return self._job
@job.setter
def job(self, job):
"""Sets the job of this HdfsFsimageJob.
Information about job that generates FSImage. # noqa: E501
:param job: The job of this HdfsFsimageJob. # noqa: E501
:type: HdfsFsimageJobJob
"""
self._job = job
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HdfsFsimageJob):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#!/usr/bin/env python
__all__ = ['universal_download']
from ..common import *
from .embed import *
def universal_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
try:
content_type = get_head(url, headers=fake_headers)['Content-Type']
except:
content_type = get_head(url, headers=fake_headers, get_method='GET')['Content-Type']
if content_type.startswith('text/html'):
try:
embed_download(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
except Exception:
pass
else:
return
domains = url.split('/')[2].split('.')
if len(domains) > 2: domains = domains[1:]
site_info = '.'.join(domains)
if content_type.startswith('text/html'):
# extract an HTML page
response = get_response(url, faker=True)
page = str(response.data)
page_title = r1(r'<title>([^<]*)', page)
if page_title:
page_title = unescape_html(page_title)
hls_urls = re.findall(r'(https?://[^;"\'\\]+' + '\.m3u8?' +
r'[^;"\'\\]*)', page)
if hls_urls:
for hls_url in hls_urls:
type_, ext, size = url_info(hls_url)
print_info(site_info, page_title, type_, size)
if not info_only:
download_url_ffmpeg(url=hls_url, title=page_title,
ext='mp4', output_dir=output_dir)
return
# most common media file extensions on the Internet
media_exts = ['\.flv', '\.mp3', '\.mp4', '\.webm',
'[-_]1\d\d\d\.jpe?g', '[-_][6-9]\d\d\.jpe?g', # tumblr
'[-_]1\d\d\dx[6-9]\d\d\.jpe?g',
'[-_][6-9]\d\dx1\d\d\d\.jpe?g',
'[-_][6-9]\d\dx[6-9]\d\d\.jpe?g',
's1600/[\w%]+\.jpe?g', # blogger
'img[6-9]\d\d/[\w%]+\.jpe?g' # oricon?
]
urls = []
for i in media_exts:
urls += re.findall(r'(https?://[^;"\'\\]+' + i + r'[^;"\'\\]*)', page)
p_urls = re.findall(r'(https?%3A%2F%2F[^;&]+' + i + r'[^;&]*)', page)
urls += [parse.unquote(url) for url in p_urls]
q_urls = re.findall(r'(https?:\\\\/\\\\/[^;"\']+' + i + r'[^;"\']*)', page)
urls += [url.replace('\\\\/', '/') for url in q_urls]
# a link href to an image is often an interesting one
urls += re.findall(r'href="(https?://[^"]+\.jpe?g)"', page)
urls += re.findall(r'href="(https?://[^"]+\.png)"', page)
urls += re.findall(r'href="(https?://[^"]+\.gif)"', page)
# MPEG-DASH MPD
mpd_urls = re.findall(r'src="(https?://[^"]+\.mpd)"', page)
for mpd_url in mpd_urls:
cont = get_content(mpd_url)
base_url = r1(r'<BaseURL>(.*)</BaseURL>', cont)
urls += [ r1(r'(.*/)[^/]*', mpd_url) + base_url ]
# have some candy!
candies = []
i = 1
for url in set(urls):
filename = parse.unquote(url.split('/')[-1])
if 5 <= len(filename) <= 80:
title = '.'.join(filename.split('.')[:-1])
else:
title = '%s' % i
i += 1
candies.append({'url': url,
'title': title})
for candy in candies:
try:
mime, ext, size = url_info(candy['url'], faker=True)
if not size: size = float('Int')
except:
continue
else:
print_info(site_info, candy['title'], ext, size)
if not info_only:
download_urls([candy['url']], candy['title'], ext, size,
output_dir=output_dir, merge=merge,
faker=True)
return
else:
# direct download
filename = parse.unquote(url.split('/')[-1])
title = '.'.join(filename.split('.')[:-1])
ext = filename.split('.')[-1]
_, _, size = url_info(url, faker=True)
print_info(site_info, title, ext, size)
if not info_only:
download_urls([url], title, ext, size,
output_dir=output_dir, merge=merge,
faker=True)
return
site_info = None
download = universal_download
download_playlist = playlist_not_supported('universal')
|
# coding=utf-8
import pandas as pd
from pathlib import Path
# extract corpus to seprate files
OUT_PUT_DIR = r'D:\data\edgar\example\documents'
df = pd.read_csv(r'D:\data\edgar\example\corpus.csv')
# def write_to_file(cik,filingId,fileName,content):
def write_to_file(cik,filingId,fileName,content):
base_dir = Path(OUT_PUT_DIR)
file_name = str(cik) + '+' + str(filingId) + '+' + str(fileName)
file_name = file_name.replace('.htm', '.txt')
(base_dir/file_name).write_text(content,encoding='utf-8')
df.apply(lambda row: write_to_file(row['CIK'],row['FilingId'],row['FileName'],row['Content']), axis=1)
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAdsplit(RPackage):
"""This package implements clustering of microarray gene expression
profiles according to functional annotations. For each term genes
are annotated to, splits into two subclasses are computed and a
significance of the supporting gene set is determined."""
homepage = "https://www.bioconductor.org/packages/adSplit/"
git = "https://git.bioconductor.org/packages/adSplit.git"
version('1.46.0', commit='7e81a83f34d371447f491b3a146bf6851e260c7c')
depends_on('r@3.4.0:3.4.9', when='@1.46.0')
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-kegg-db', type=('build', 'run'))
depends_on('r-multtest', type=('build', 'run'))
|
class Group:
"""
name: Name of group (String)
deposit: $ Amount required to book the group (Float)
type: Speedball, Recball, Rental (String)
players: ([Object])
paint_bags: list of paint the group has purchased ([Int])
transactions: ([Object])
"""
def __init__(self, name, deposit, type):
self.name = name
self.deposit = deposit
self.type = type
self.players = []
self.paint_bags = []
self.transactions = []
def get_name(self):
return self.name
def get_type(self):
return self.type
def number_of_players(self):
return len(self.players)
def total_spent(self):
total_spent_by_group = 0.0
for transaction in self.transactions:
total_spent_by_group += transaction.amount
return total_spent_by_group
def get_deposit(self):
return self.deposit
def grand_total(self):
return self.total_spent() + self.deposit
def check_if_players_paid(self):
if len(self.players) == 0:
return False
for player in self.players:
if not player.paid:
return False
return True
def number_players_paid(self):
players_who_paid = 0
for player in self.players:
if player.paid:
players_who_paid += 1
return players_who_paid
def total_bags_and_cases(self):
cases = sum(self.paint_bags) // 4
bags = sum(self.paint_bags) % 4
return bags, cases
def get_players(self):
return self.players
def add_player(self, player):
self.players.append(player)
def get_transactions(self):
return self.transactions
def paint_length(self):
return len(self.paint_bags)
def delete_last_paint(self):
del self.paint_bags[-1]
class Player:
def __init__(self, name):
self.name = name
self.paid = False # 2
self.selected = False # 6
def change_select_status(self):
if not self.selected:
self.selected = True
else:
self.selected = False
def get_name(self):
return self.name
def mark_paid(self):
self.paid = True
def mark_unpaid(self):
self.paid = False
def did_pay(self):
return self.paid
def change_pay_status(self):
if self.paid:
self.paid = False
else:
self.paid = True
def is_selected(self):
return self.selected
def deselect(self):
self.selected = False
class Transaction:
def __init__(self, amount, type):
self.amount = amount
self.type = type
self.selected = False
def change_select_status(self):
if not self.selected:
self.selected = True
else:
self.selected = False
def get_type(self):
return self.type
def get_amount(self):
return self.amount
def is_selected(self):
return self.selected
|
from __future__ import division, print_function
import numpy as np
import multiprocessing
from tools import _pickle_method, _unpickle_method
try:
import copy_reg
except:
import copyreg as copy_reg
import types
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
class GeneticAlgorithm(object):
"""Optimization algorithm for the EZ-Climate model.
Parameters
----------
pop_amount : int
number of individuals in the population
num_feature : int
number of elements in each individual, i.e. number of nodes in tree-model
num_generations : int
number of generations of the populations to be evaluated
bound : float
upper bound of mitigation in each node
cx_prob : float
probability of mating
mut_prob : float
probability of mutation.
utility : `Utility` object
object of utility class
fixed_values : ndarray, optional
nodes to keep fixed
fixed_indicies : ndarray, optional
indicies of nodes to keep fixed
print_progress : bool, optional
if the progress of the evolution should be printed
Attributes
----------
pop_amount : int
number of individuals in the population
num_feature : int
number of elements in each individual, i.e. number of nodes in tree-model
num_generations : int
number of generations of the populations to be evaluated
bound : float
upper bound of mitigation in each node
cx_prob : float
probability of mating
mut_prob : float
probability of mutation.
u : `Utility` object
object of utility class
fixed_values : ndarray, optional
nodes to keep fixed
fixed_indicies : ndarray, optional
indicies of nodes to keep fixed
print_progress : bool, optional
if the progress of the evolution should be printed
"""
def __init__(self, pop_amount, num_generations, cx_prob, mut_prob, bound, num_feature, utility,
fixed_values=None, fixed_indicies=None, print_progress=False):
self.num_feature = num_feature
self.pop_amount = pop_amount
self.num_gen = num_generations
self.cx_prob = cx_prob
self.mut_prob = mut_prob
self.u = utility
self.bound = bound
self.fixed_values = fixed_values
self.fixed_indicies = fixed_indicies
self.print_progress = print_progress
def _generate_population(self, size):
"""Return 1D-array of random values in the given bound as the initial population."""
pop = np.random.random([size, self.num_feature])*self.bound
if self.fixed_values is not None:
for ind in pop:
ind[self.fixed_indicies] = self.fixed_values # override fix values
return pop
def _evaluate(self, indvidual):
"""Returns the utility of given individual."""
return self.u.utility(indvidual)
def _select(self, pop, rate):
"""Returns a 1D-array of selected individuals.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
rate : float
the probability of an individual being selected
Returns
-------
ndarray
selected individuals
"""
index = np.random.choice(self.pop_amount, int(rate*self.pop_amount), replace=False)
return pop[index,:] #return a list of random instance of pop
def _random_index(self, individuals, size):
"""Generate a random index of individuals of size 'size'.
Parameters
----------
individuals : ndarray or list
2D-array of individuals
size : int
number of indices to generate
Returns
-------
ndarray
1D-array of indices
"""
inds_size = len(individuals)
return np.random.choice(inds_size, size)
def _selection_tournament(self, pop, k, tournsize, fitness):
"""Select `k` individuals from the input `individuals` using `k`
tournaments of `tournsize` individuals.
Parameters
----------
individuals : ndarray or list
2D-array of individuals to select from
k : int
number of individuals to select
tournsize : int
number of individuals participating in each tournament
fitness :
utility in our model
Returns
-------
ndarray s
selected individuals
"""
chosen = []
# for k times, randomly choose a tournsize number of index and pick up the one with the highest fitness
for i in xrange(k):
index = self._random_index(pop, tournsize)
aspirants = pop[index]
aspirants_fitness = fitness[index]
chosen_index = np.where(aspirants_fitness == np.max(aspirants_fitness))[0]
if len(chosen_index) != 0:
chosen_index = chosen_index[0]
chosen.append(aspirants[chosen_index])
return np.array(chosen)
def _two_point_cross_over(self, pop):
"""Performs a two-point cross-over of the population.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
"""
child_group1 = pop[::2] # instance with even index
child_group2 = pop[1::2]# instance with odd index
for child1, child2 in zip(child_group1, child_group2):
if np.random.random() <= self.cx_prob:
#generates 2 random index for the swap, can be done much better.
cxpoint1 = np.random.randint(1, self.num_feature)
cxpoint2 = np.random.randint(1, self.num_feature - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else: # Swap the two cx points
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
child1[cxpoint1:cxpoint2], child2[cxpoint1:cxpoint2] \
= child2[cxpoint1:cxpoint2].copy(), child1[cxpoint1:cxpoint2].copy()
if self.fixed_values is not None:
child1[self.fixed_indicies] = self.fixed_values
child2[self.fixed_indicies] = self.fixed_values
def _uniform_cross_over(self, pop, ind_prob):
"""Performs a uniform cross-over of the population.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
ind_prob : float
probability of feature cross-over
"""
child_group1 = pop[::2]
child_group2 = pop[1::2]
for child1, child2 in zip(child_group1, child_group2):
size = min(len(child1), len(child2))
for i in range(size):
if np.random.random() < ind_prob:
child1[i], child2[i] = child2[i], child1[i]
def _mutate(self, pop, ind_prob, scale=2.0):
"""Mutates individual's elements. The individual has a probability of `mut_prob` of
beeing selected and every element in this individual has a probability `ind_prob` of beeing
mutated. The mutated value is a random number.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
ind_prob : float
probability of feature mutation
scale : float
scaling constant of the random generated number for mutation
"""
# it is using a expectation of prob. Can be done much better.
pop_tmp = np.copy(pop)
mutate_index = np.random.choice(self.pop_amount, int(self.mut_prob * self.pop_amount), replace=False)
for i in mutate_index:
feature_index = np.random.choice(self.num_feature, int(ind_prob * self.num_feature), replace=False)
for j in feature_index:
if self.fixed_indicies is not None and j in self.fixed_indicies:
continue
else:
pop[i][j] = max(0.0, pop[i][j]+(np.random.random()-0.5)*scale)
def _uniform_mutation(self, pop, ind_prob, scale=2.0):
"""Mutates individual's elements. The individual has a probability of `mut_prob` of
beeing selected and every element in this individual has a probability `ind_prob` of beeing
mutated. The mutated value is the current value plus a scaled uniform [-0.5,0.5] random value.
Parameters
----------
pop : ndarray
population given by 2D-array with shape ('pop_amount', 'num_feature')
ind_prob : float
probability of feature mutation
scale : float
scaling constant of the random generated number for mutation
"""
pop_len = len(pop)
mutate_index = np.random.choice(pop_len, int(self.mut_prob * pop_len), replace=False)
for i in mutate_index:
prob = np.random.random(self.num_feature)
inc = (np.random.random(self.num_feature) - 0.5)*scale
pop[i] += (prob > (1.0-ind_prob)).astype(int)*inc
pop[i] = np.maximum(1e-5, pop[i])
if self.fixed_values is not None:
pop[i][self.fixed_indicies] = self.fixed_values
def _show_evolution(self, fits, pop):
"""Print statistics of the evolution of the population."""
length = len(pop)
mean = fits.mean()
std = fits.std()
min_val = fits.min()
max_val = fits.max()
print (" Min {} \n Max {} \n Avg {}".format(min_val, max_val, mean))
print (" Std {} \n Population Size {}".format(std, length))
print (" Best Individual: ", pop[np.argmax(fits)])
def _survive(self, pop_tmp, fitness_tmp):
"""The 80 percent of the individuals with best fitness survives to
the next generation.
Parameters
----------
pop_tmp : ndarray
population
fitness_tmp : ndarray
fitness values of `pop_temp`
Returns
-------
ndarray
individuals that survived
"""
index_fits = np.argsort(fitness_tmp)[::-1]
fitness = fitness_tmp[index_fits]
pop = pop_tmp[index_fits]
num_survive = int(0.8*self.pop_amount)
survive_pop = np.copy(pop[:num_survive])
survive_fitness = np.copy(fitness[:num_survive])
return np.copy(survive_pop), np.copy(survive_fitness)
def run(self):
"""Start the evolution process.
The evolution steps are:
1. Select the individuals to perform cross-over and mutation.
2. Cross over among the selected candidate.
3. Mutate result as offspring.
4. Combine the result of offspring and parent together. And selected the top
80 percent of original population amount.
5. Random Generate 20 percent of original population amount new individuals
and combine the above new population.
Returns
-------
tuple
final population and the fitness for the final population
Note
----
Uses the :mod:`~multiprocessing` package.
"""
print("----------------Genetic Evolution Starting----------------")
pop = self._generate_population(self.pop_amount)
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
fitness = pool.map(self._evaluate, pop) # how do we know pop[i] belongs to fitness[i]?
fitness = np.array([val[0] for val in fitness])
u_hist = np.zeros(self.num_gen) # not been used ...
for g in range(0, self.num_gen):
print ("-- Generation {} --".format(g+1))
pop_select = self._select(np.copy(pop), rate=1)
self._uniform_cross_over(pop_select, 0.50)
self._uniform_mutation(pop_select, 0.25, np.exp(-float(g)/self.num_gen)**2)
#self._mutate(pop_select, 0.05)
fitness_select = pool.map(self._evaluate, pop_select)
fitness_select = np.array([val[0] for val in fitness_select])
pop_tmp = np.append(pop, pop_select, axis=0)
fitness_tmp = np.append(fitness, fitness_select, axis=0)
pop_survive, fitness_survive = self._survive(pop_tmp, fitness_tmp)
pop_new = self._generate_population(self.pop_amount - len(pop_survive))
fitness_new = pool.map(self._evaluate, pop_new)
fitness_new = np.array([val[0] for val in fitness_new])
pop = np.append(pop_survive, pop_new, axis=0)
fitness = np.append(fitness_survive, fitness_new, axis=0)
if self.print_progress:
self._show_evolution(fitness, pop)
u_hist[g] = fitness[0]
fitness = pool.map(self._evaluate, pop)
fitness = np.array([val[0] for val in fitness])
return pop, fitness
class GradientSearch(object) :
"""Gradient search optimization algorithm for the EZ-Climate model.
Parameters
----------
utility : `Utility` object
object of utility class
learning_rate : float
starting learning rate of gradient descent
var_nums : int
number of elements in array to optimize
accuracy : float
stop value for the gradient descent
fixed_values : ndarray, optional
nodes to keep fixed
fixed_indicies : ndarray, optional
indicies of nodes to keep fixed
print_progress : bool, optional
if the progress of the evolution should be printed
scale_alpha : ndarray, optional
array to scale the learning rate
Attributes
----------
utility : `Utility` object
object of utility class
learning_rate : float
starting learning rate of gradient descent
var_nums : int
number of elements in array to optimize
accuracy : float
stop value for the gradient descent
fixed_values : ndarray, optional
nodes to keep fixed
fixed_indicies : ndarray, optional
indicies of nodes to keep fixed
print_progress : bool, optional
if the progress of the evolution should be printed
scale_alpha : ndarray, optional
array to scale the learning rate
"""
def __init__(self, utility, var_nums, accuracy=1e-06, iterations=100, fixed_values=None,
fixed_indicies=None, print_progress=False, scale_alpha=None):
self.u = utility
self.var_nums = var_nums
self.accuracy = accuracy
self.iterations = iterations
self.fixed_values = fixed_values
self.fixed_indicies = fixed_indicies
self.print_progress = print_progress
self.scale_alpha = scale_alpha
if scale_alpha is None:
self.scale_alpha = np.exp(np.linspace(0.0, 3.0, var_nums))
def _partial_grad(self, i):
"""Calculate the ith element of the gradient vector."""
m_copy = self.m.copy()
m_copy[i] = m_copy[i] - self.delta if (m_copy[i] - self.delta)>=0 else 0.0
minus_utility = self.u.utility(m_copy)
m_copy[i] += 2*self.delta
plus_utility = self.u.utility(m_copy)
grad = (plus_utility-minus_utility) / (2*self.delta) # the math is trival
return grad, i
def numerical_gradient(self, m, delta=1e-08, fixed_indicies=None):
"""Calculate utility gradient numerically.
Parameters
----------
m : ndarray or list
array of mitigation
delta : float, optional
change in mitigation
fixed_indicies : ndarray or list, optional
indicies of gradient that should not be calculated
Returns
-------
ndarray
gradient
"""
self.delta = delta
self.m = m
if fixed_indicies is None:
fixed_indicies = []
grad = np.zeros(len(m))
if not isinstance(m, np.ndarray):
self.m = np.array(m)
pool = multiprocessing.Pool()
indicies = np.delete(range(len(m)), fixed_indicies)
res = pool.map(self._partial_grad, indicies)
for g, i in res:
grad[i] = g
pool.close()
pool.join()
del self.m
del self.delta
return grad
def _partial_grad_cons(self, i):
"""Calculate the ith element of the gradient vector."""
m_copy = self.m.copy()
m_copy[i] = m_copy[i] - self.delta if (m_copy[i] - self.delta)>=0 else 0.0
minus_utility = self.u.adjusted_utility(m_copy,first_period_consadj=self.cons)
m_copy[i] += 2*self.delta
plus_utility = self.u.adjusted_utility(m_copy,first_period_consadj=self.cons)
grad = (plus_utility-minus_utility) / (2*self.delta) # the math is trival
return grad, i
def numerical_gradient_cons(self, m, cons,delta=1e-08):
"""Calculate utility gradient numerically.
Parameters
----------
m : ndarray or list
array of mitigation
delta : float, optional
change in mitigation
fixed_indicies : ndarray or list, optional
indicies of gradient that should not be calculated
Returns
-------
ndarray
gradient
"""
self.delta = delta
self.m = m
self.cons = cons
grad = np.zeros(len(m))
if not isinstance(m, np.ndarray):
self.m = np.array(m)
pool = multiprocessing.Pool()
indicies = np.array(range(len(m)))
res = pool.map(self._partial_grad_cons, indicies)
for g, i in res:
grad[i] = g
pool.close()
pool.join()
del self.m
del self.delta
del self.cons
return grad
def _accelerate_scale(self, accelerator, prev_grad, grad):
sign_vector = np.sign(prev_grad * grad)
scale_vector = np.ones(self.var_nums) * ( 1 + 0.10)
accelerator[sign_vector <= 0] = 1
accelerator *= scale_vector
return accelerator
def gradient_descent(self, initial_point, return_last=False):
"""Gradient descent algorithm. The `initial_point` is updated using the
Adam algorithm. Adam uses the history of the gradient to compute individual
step sizes for each element in the mitigation vector. The vector of step
sizes are calculated using estimates of the first and second moments of
the gradient.
Parameters
----------
initial_point : ndarray
initial guess of the mitigation
return_last : bool, optional
if True the function returns the last point, else the point
with highest utility
Returns
-------
tuple
(best point, best utility)
"""
num_decision_nodes = initial_point.shape[0]
x_hist = np.zeros((self.iterations+1, num_decision_nodes))
u_hist = np.zeros(self.iterations+1)
u_hist[0] = self.u.utility(initial_point)
x_hist[0] = initial_point
beta1, beta2 = 0.90, 0.90
eta = 0.0015 # learning rate
eps = 1e-3
m_t, v_t = 0, 0
prev_grad = 0.0
accelerator = np.ones(self.var_nums)
# formula at http://sebastianruder.com/optimizing-gradient-descent/index.html#fnref:15
for i in range(self.iterations):
grad = self.numerical_gradient(x_hist[i], fixed_indicies=self.fixed_indicies)
m_t = beta1*m_t + (1-beta1)*grad
v_t = beta2*v_t + (1-beta2)*np.power(grad, 2)
m_hat = m_t / (1-beta1**(i+1))
v_hat = v_t / (1-beta2**(i+1))
if i != 0:
accelerator = self._accelerate_scale(accelerator, prev_grad, grad)
new_x = x_hist[i] + ((eta*m_hat)/(np.square(v_hat)+eps)) * accelerator # empirical acceleration, parameter =1.1 is need to be proved later on
new_x[new_x < 0] = 0.0
if self.fixed_values is not None:
new_x[self.fixed_indicies] = self.fixed_values
x_hist[i+1] = new_x
u_hist[i+1] = self.u.utility(new_x)[0]
prev_grad = grad.copy()
if self.print_progress:
print("-- Iteration {} -- \n Current Utility: {}".format(i+1, u_hist[i+1]))
print(new_x)
if return_last:
return x_hist[i+1], u_hist[i+1]
best_index = np.argmax(u_hist)
return x_hist[best_index], u_hist[best_index]
def run(self, initial_point_list, topk=4):
"""Initiate the gradient search algorithm.
Parameters
----------
initial_point_list : list
list of initial points to select from
topk : int, optional
select and run gradient descent on the `topk` first points of
`initial_point_list`
Returns
-------
tuple
best mitigation point and the utility of the best mitigation point
Raises
------
ValueError
If `topk` is larger than the length of `initial_point_list`.
Note
----
Uses the :mod:`~multiprocessing` package.
"""
print("----------------Gradient Search Starting----------------")
if topk > len(initial_point_list):
raise ValueError("topk {} > number of initial points {}".format(topk, len(initial_point_list)))
candidate_points = initial_point_list[:topk]
mitigations = []
utilities = np.zeros(topk)
for cp, count in zip(candidate_points, range(topk)):
if not isinstance(cp, np.ndarray):
cp = np.array(cp)
print("Starting process {} of Gradient Descent".format(count+1))
m, u = self.gradient_descent(cp)
mitigations.append(m)
utilities[count] = u
best_index = np.argmax(utilities)
return mitigations[best_index], utilities[best_index]
class CoordinateDescent(object):
"""Coordinate descent optimization algorithm for the EZ-Climate model.
Parameters
----------
utility : `Utility` object
object of utility class
var_nums : int
number of elements in array to optimize
accuracy : float
stop value for the utility increase
iterations : int
maximum number of iterations
Attributes
----------
utility : `Utility` object
object of utility class
var_nums : int
number of elements in array to optimize
accuracy : float
stop value for the utility increase
iterations : int
maximum number of iterations
"""
def __init__(self, utility, var_nums, accuracy=1e-4, iterations=100):
self.u = utility
self.var_nums = var_nums
self.accuracy = accuracy
self.iterations = iterations
def _min_func(self, x, m, i):
m_copy = m.copy()
m_copy[i] = x
return -self.u.utility(m_copy)[0]
def _minimize_node(self, node, m):
from scipy.optimize import fmin
return fmin(self._min_func, x0=m[node], args=(m, node), disp=False)
def run(self, m):
"""Run the coordinate descent iterations.
Parameters
----------
m : initial point
Returns
-------
tuple
best mitigation point and the utility of the best mitigation point
Note
----
Uses the :mod:`~scipy` package.
"""
num_decision_nodes = m.shape[0]
x_hist = []
u_hist = []
nodes = range(self.var_nums)
x_hist.append(m.copy())
u_hist.append(self.u.utility(m)[0])
print("----------------Coordinate Descent Starting----------------")
print("Starting Utility: {}".format(u_hist[0]))
for i in range(self.iterations):
print("-- Iteration {} --".format(i+1))
node_iteration = np.random.choice(nodes, replace=False, size=len(nodes))
for node in node_iteration:
m[node] = max(0.0, self._minimize_node(node, m))
x_hist.append(m.copy())
u_hist.append(self.u.utility(m)[0])
print("Current Utility: {}".format(u_hist[i+1]))
if np.abs(u_hist[i+1] - u_hist[i]) < self.accuracy:
break
return x_hist[-1], u_hist[-1]
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions to test the disparity module.
"""
import unittest
import numpy as np
import xarray as xr
import common
import pandora
import pandora.constants as cst
import pandora.disparity as disparity
import pandora.matching_cost as matching_cost
from pandora.img_tools import read_img
from pandora.state_machine import PandoraMachine
class TestDisparity(unittest.TestCase):
"""
TestDisparity class allows to test the disparity module
"""
def setUp(self):
"""
Method called to prepare the test fixture
"""
# Create stereo images
data = np.array(([[1, 2, 4, 6],
[2, 4, 1, 6],
[6, 7, 8, 10]]), dtype=np.float64)
self.left = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
self.left.attrs = {'valid_pixels': 0, 'no_data_mask': 1}
data = np.array(([[6, 1, 2, 4],
[6, 2, 4, 1],
[10, 6, 7, 8]]), dtype=np.float64)
self.right = xr.Dataset({'im': (['row', 'col'], data)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
self.right.attrs = {'valid_pixels': 0, 'no_data_mask': 1}
def test_to_disp(self):
"""
Test the to disp method
"""
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
# Disparity map ground truth, for the images described in the setUp method
gt_disp = np.array([[1, 1, 1, -3],
[1, 1, 1, -3],
[1, 1, 1, -3]])
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with negative disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Disparity map ground truth
gt_disp = np.array([[0, -1, -2, -3],
[0, -1, -1, -3],
[0, -1, -2, -3]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with positive disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 3)
# Disparity map ground truth
gt_disp = np.array([[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 1, 1, 0]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
# Test disp_indices copy
# Modify the disparity map
disp['disparity_map'].data[0, 0] = -95
# Check if the xarray disp_indices is equal to the ground truth disparity map
np.testing.assert_array_equal(cv['disp_indices'].data, gt_disp)
def test_to_disp_with_offset(self):
"""
Test the to disp method with window_size > 1
"""
# Create the left cost volume, with SAD measure window size 3, subpixel 1, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
# Disparity map ground truth, for the images described in the setUp method
# Check if gt is full size and border (i.e [offset:-offset] equal to invalid_disparity
gt_disp = np.array([[-99, -99, -99, -99],
[-99, 1, 0, -99],
[-99, -99, -99, -99]])
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': -99})
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with negative disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Disparity map ground truth
gt_disp = np.array([[-99, -99, -99, -99],
[-99, -99, -1, -99],
[-99, -99, -99, -99]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
#
# Test the to_disp method with positive disparity range
#
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 3)
# Disparity map ground truth
gt_disp = np.array([[-99, -99, -99, -99],
[-99, 1, -99, -99],
[-99, -99, -99, -99]])
# Compute the disparity
disp = disparity_.to_disp(cv)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp['disparity_map'].data, gt_disp)
# Test disp_indices copy
# Modify the disparity map
disp['disparity_map'].data[0, 0] = -95
# Check if the xarray disp_indices is equal to the ground truth disparity map
np.testing.assert_array_equal(cv['disp_indices'].data, gt_disp)
def test_argmin_split(self):
"""
Test the argmin_split method
"""
# Create the left cost volume, with SAD measure, window size 1, subpixel 2, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 2})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
indices_nan = np.isnan(cv['cost_volume'].data)
cv['cost_volume'].data[indices_nan] = np.inf
# ground truth
gt_disp = np.array([[1., 1., 1., -3.],
[1., -0.5, 1., -3.],
[1., 1., -1.5, -3]], dtype=np.float32)
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp = disparity_.argmin_split(cv)
# Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(gt_disp, disp)
def test_argmax_split(self):
"""
Test the argmax_split method
"""
# Create the left cost volume, with ZNCC measure, window size 1, subpixel 2, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'zncc', 'window_size': 1,
'subpix': 2})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
indices_nan = np.isnan(cv['cost_volume'].data)
cv['cost_volume'].data[indices_nan] = -np.inf
# ground truth
gt_disp = np.array([[0., -1., -2., -3.],
[0., -1., -2., -3.],
[0., -1., -2., -3.]], dtype=np.float32)
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp = disparity_.argmax_split(cv)
# Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(gt_disp, disp)
def test_coefficient_map(self):
"""
Test the method coefficient map
"""
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, 1)
# Compute the disparity
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disparity_.to_disp(cv)
# Coefficient map ground truth, for the images described in the setUp method
gt_coeff = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
# Compute the disparity, and the coefficient map
coeff = disparity_.coefficient_map(cv)
# Check if the calculated coefficient map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(coeff.data, gt_coeff)
def test_approximate_right_disparity(self):
"""
Test the approximate_right_disparity method
"""
# Create the left cost volume, with SAD measure window size 3 and subpixel 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)
# Right disparity map ground truth, for the images described in the setUp method
gt_disp = np.array([[0, 0, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 0]])
# Compute the right disparity map
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp_r = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp_r['disparity_map'].data, gt_disp)
def test_right_disparity_subpixel(self):
"""
Test the right disparity method, with subpixel disparity
"""
# Create the left cost volume, with SAD measure window size 3 and subpixel 4
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 4})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)
# Right disparity map ground truth
gt_disp = np.array([[0, 0, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 0]])
# Compute the right disparity map
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
disp_r = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp_r['disparity_map'].data, gt_disp)
@staticmethod
def test_right_disparity_comparaison():
"""
Test the right disparity method by comparing the right disparity map calculated from scratch with the one
calculated with the fast method
"""
# Build the default configuration
default_cfg = pandora.check_json.default_short_configuration
pandora_left = read_img('tests/pandora/left.png', no_data=np.nan, mask=None)
pandora_right = read_img('tests/pandora/right.png', no_data=np.nan, mask=None)
fast_cfg = {
'pipeline': {
'right_disp_map': {
'method': 'accurate'
},
'matching_cost': {
'matching_cost_method': 'census'
},
'disparity': {
'disparity_method': 'wta'
},
'refinement': {
'refinement_method': 'vfit'
},
'validation': {
'validation_method': 'cross_checking',
'right_left_mode': 'approximate'
}
}
}
pandora_machine_fast = PandoraMachine()
cfg = pandora.check_json.update_conf(default_cfg, fast_cfg)
left, right_fast = \
pandora.run(pandora_machine_fast, pandora_left, pandora_right, -60, 0, cfg['pipeline']) # pylint: disable=unused-variable
acc_cfg = {
'pipeline':
{
'right_disp_map': {
'method': 'accurate'
},
'matching_cost': {
'matching_cost_method': 'census'
},
'disparity': {
'disparity_method': 'wta'
},
'refinement': {
'refinement_method': 'vfit'
},
'validation': {
'validation_method': 'cross_checking',
'right_left_mode': 'accurate',
}
}
}
pandora_machine_acc = PandoraMachine()
cfg = pandora.check_json.update_conf(default_cfg, acc_cfg)
left, right_acc = pandora.run(pandora_machine_acc, pandora_left, pandora_right, -60, 0, cfg['pipeline'])
# Check if the calculated disparity map in fast mode is equal to the disparity map in accurate mode
np.testing.assert_array_equal(right_fast['disparity_map'].data, right_acc['disparity_map'].data)
# Check if the calculated coefficient map in fast mode is equal to the coefficient map in accurate mode
np.testing.assert_array_equal(right_fast['interpolated_coeff'].data, right_acc['interpolated_coeff'].data)
def test_to_disp_validity_mask(self):
"""
Test the generated validity mask in the to_disp method
# If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)
"""
# ------ Negative disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Compute the disparity map and validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min 1 disp_max 2
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, 1 << 2, cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]],
dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Negative and positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -1 disp_max 1
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -1, 1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE]],
dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Variable grids of disparities ------
# Disp_min and disp_max
disp_min_grid = np.array([[-3, -2, -3, -1],
[-2, -2, -1, -3],
[-1, -2, -2, -3]])
disp_max_grid = np.array([[-1, -1, -2, 0],
[0, -1, 0, 0],
[0, 0, -1, -1]])
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
dmin, dmax = matching_cost_plugin.dmin_dmax(disp_min_grid, disp_max_grid)
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, dmin, dmax)
matching_cost_plugin.cv_masked(self.left, self.right, cv, disp_min_grid, disp_max_grid)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
def test_to_disp_validity_mask_with_offset(self):
"""
Test the generated validity mask in the to_disp method
# If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)
"""
# ------ Negative disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -3, -1)
# Compute the disparity map and validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min 1 disp_max 2
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Negative and positive disparities ------
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -1 disp_max 1
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -1, 1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Variable grids of disparities ------
# Disp_min and disp_max
disp_min_grid = np.array([[-3, -2, -3, -1],
[-2, -2, -1, -3],
[-1, -2, -2, -3]])
disp_max_grid = np.array([[-1, -1, -2, 0],
[0, -1, 0, 0],
[0, 0, -1, -1]])
# Create the left cost volume, with SAD measure window size 1, subpixel 1, disp_min -3 disp_max -1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
dmin, dmax = matching_cost_plugin.dmin_dmax(disp_min_grid, disp_max_grid)
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, dmin, dmax)
matching_cost_plugin.cv_masked(self.left, self.right, cv, disp_min_grid, disp_max_grid)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, self.left, self.right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]], dtype=np.uint16)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
def test_approximate_right_disparity_validity_mask(self):
"""
Test the generated validity mask in the right_disparity method
# If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)
"""
# Create the left cost volume, with SAD measure window size 1 and subpixel 1
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
# ------ Negative and positive disparities ------
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, 1)
# Validity mask ground truth ( for disparities -1 0 1 2 )
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE]], dtype=np.uint16)
# Compute the right disparity map and the validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Negative disparities ------
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, 1, 2)
# Validity mask ground truth ( for disparities -2 -1 )
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
0, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
0, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
0, 0]], dtype=np.uint16)
# Compute the right disparity map and the validity mask
dataset = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ------ Positive disparities ------
cv = matching_cost_plugin.compute_cost_volume(self.left, self.right, -2, -1)
# Validity mask ground truth ( for disparities 1 2 )
gt_mask = np.array([[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, 0, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]], dtype=np.uint16)
# Compute the right disparity map and the validity mask
dataset = disparity_.approximate_right_disparity(cv, self.right)
# Check if the calculated right disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
@staticmethod
def test_validity_mask():
"""
# If bit 0 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 1 == 1 : Invalid pixel : the disparity interval is missing in the right image
# If bit 2 == 1 : Information: the disparity interval is incomplete (edge reached in the right image)
# If bit 6 == 1 : Invalid pixel : invalidated by the validity mask of the left image given as input
# If bit 7 == 1 : Invalid pixel : right positions invalidated by the mask of the right image given as
# input
"""
# Masks convention
# 1 = valid
# 2 = no_data
# ---------------------- Test with positive and negative disparity range ----------------------
data = np.array(([[1, 2, 4, 6],
[2, 4, 1, 6],
[6, 7, 8, 10]]), dtype=np.float64)
left_mask = np.array([[2, 1, 1, 1],
[1, 2, 4, 1],
[5, 1, 1, 2]], dtype=np.uint8)
left = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], left_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
left.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
data = np.array(([[6, 1, 2, 4],
[6, 2, 4, 1],
[10, 6, 7, 8]]), dtype=np.float64)
right_mask = np.array([[1, 1, 3, 5],
[4, 1, 1, 1],
[2, 2, 4, 6]], dtype=np.uint8)
right = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], right_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
right.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 1,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(left, right, -1, 1)
# Compute the disparity map and validity mask
disparity_ = disparity.AbstractDisparity(**{'disparity_method': 'wta', 'invalid_disparity': 0})
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array(
[[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE],
[cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT]], dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with negative disparity range ----------------------
cv = matching_cost_plugin.compute_cost_volume(left, right, -2, -1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, 0],
[cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]],
dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with positive disparity range ----------------------
cv = matching_cost_plugin.compute_cost_volume(left, right, 1, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER, cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT +
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[0, cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING],
[cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT, cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT,
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT +
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING]],
dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with positive and negative disparity range and window size = 3----------------
data = np.array(([[1, 2, 4, 6, 1],
[2, 4, 1, 6, 1],
[6, 7, 8, 10, 1],
[0, 5, 6, 7, 8]]), dtype=np.float64)
left_mask = np.array([[2, 1, 1, 1, 1],
[1, 2, 4, 1, 1],
[5, 2, 1, 1, 1],
[1, 1, 1, 1, 1]], dtype=np.uint8)
left = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], left_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
left.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
data = np.array(([[6, 1, 2, 4, 1],
[6, 2, 4, 1, 6],
[10, 6, 7, 8, 1],
[5, 6, 7, 8, 0]]), dtype=np.float64)
right_mask = np.array([[1, 1, 1, 2, 1],
[5, 1, 1, 1, 1],
[2, 1, 1, 6, 1],
[0, 1, 1, 1, 1]], dtype=np.uint8)
right = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], right_mask)},
coords={'row': np.arange(data.shape[0]), 'col': np.arange(data.shape[1])})
right.attrs = {'valid_pixels': 1, 'no_data_mask': 2}
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(left, right, -1, 1)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array(
[[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING +
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE + cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER, cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
],
dtype=np.uint16)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
# ---------------------- Test with positive and negative disparity range on flag 1 ----------------------
# Masks convention
# 1 = valid
# 0 = no_data
data = np.ones((10, 10), dtype=np.float64)
left_mask = np.ones((10, 10), dtype=np.uint8)
left = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], left_mask)},
coords={'row': np.arange(5, data.shape[0] + 5), 'col': np.arange(4, data.shape[1] + 4)})
left.attrs = {'valid_pixels': 1, 'no_data_mask': 0}
data = np.ones((10, 10), dtype=np.float64)
right_mask = np.ones((10, 10), dtype=np.uint8)
right_mask = np.tril(right_mask, -1.5)
right = xr.Dataset({'im': (['row', 'col'], data),
'msk': (['row', 'col'], right_mask)},
coords={'row': np.arange(5, data.shape[0] + 5), 'col': np.arange(4, data.shape[1] + 4)})
right.attrs = {'valid_pixels': 1, 'no_data_mask': 0}
matching_cost_plugin = matching_cost.AbstractMatchingCost(**{'matching_cost_method': 'sad', 'window_size': 3,
'subpix': 1})
cv = matching_cost_plugin.compute_cost_volume(left, right, -3, 2)
# Compute the disparity map and validity mask
dataset = disparity_.to_disp(cv)
disparity_.validity_mask(dataset, left, right, cv)
# Validity mask ground truth
gt_mask = np.array([[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE +
cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER],
[cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER,
cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER]
],
dtype=np.uint8)
# Check if the calculated validity mask is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(dataset['validity_mask'].data, gt_mask)
if __name__ == '__main__':
common.setup_logging()
unittest.main()
|
import datetime
import json
import os
import boto3
import pandas as pd
import io
import requests
import numpy as np
from io import StringIO
import uuid
s3 = boto3.resource(
service_name='s3',
region_name='us-east-2')
bucket_name = 'secom-daas-bucket' # already created on S3
link1 = 'https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom.data'
link2 = "https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom_labels.data"
links = [link1,link2]
path = "/tmp/"
timestamp = str(int(datetime.datetime.timestamp(datetime.datetime.now())))
def timestampify(link,timestamp):
return link.split("/")[-1].split(".")[0]+"_"+timestamp+".data"
data_filename = timestampify(link1,timestamp)
label_filename = timestampify(link2,timestamp)
def download_data():
url = link1
r = requests.get(url)
with open(path + data_filename, 'wb') as f:
f.write(r.content)
files = r.content
f.close()
print("Downloaded Secom data.")
url = link2
r = requests.get(url)
with open(path + label_filename, 'wb') as f:
f.write(r.content)
files = r.content
f.close()
print("Downloaded Secom labels.")
#time_stamp = str(int(datetime.datetime.timestamp(datetime.datetime.now())))
def process_time(secom_labels):
return [" ".join(i.decode("utf-8").split()[1:]).split('"')[1] for i in secom_labels]
def process_data(secom):
return np.array([pd.to_numeric(bytearray(i).decode("UTF-8").split(),errors='coerce') for i in secom]).astype(str)
def process_dataset(secom_path,secom_labels_path):
print("processing dataset from {} and {}".format(secom_path,secom_labels_path))
#read the downloaded .data files
with open(secom_path,'rb') as myfile:
secom= myfile.readlines()
myfile.close()
with open(secom_labels_path,'rb') as myfile:
secom_labels= myfile.readlines()
myfile.close()
columns1= ["Time"]
df1 = pd.DataFrame(data=process_time(secom_labels),
columns=columns1)
df1
features_size = len(secom[0].split())
columns2 = ["feature "+ str(i) for i in range(features_size)]
df2 = pd.DataFrame(data=process_data(secom),
columns=columns2)
df2.fillna(df2.mean(),inplace=True)
df3 = pd.concat([df1,df2],axis=1).reset_index()
df3 = df3.rename(columns = {'index':'secomId'})
#set the secomId as unique ids
df3['secomId'] = pd.Series([int(uuid.uuid4().int/(10**30)) for i in range(df3.shape[0])])
return df3
#bucket = 'my_bucket_name' # already created on S3
def upload_to_s3(df,bucket_name,dest_path='df.csv'):
csv_buffer = StringIO()
df.to_csv(csv_buffer)
#s3_resource = boto3.resource('s3')
s3.Object(bucket_name, dest_path).put(Body=csv_buffer.getvalue())
print("Succesfully stored csv file into S3...")
def handler(event, context):
# Your code goes here!
startTime = datetime.datetime.now()
download_data()
df = process_dataset(path + data_filename,path + label_filename)
upload_to_s3(df, bucket_name, 'processed/processed_'+timestamp+".csv" )
print(datetime.datetime.now() - startTime)
handler(1,1)
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Check alignments
# Check alignemnts of stimuli with the EEG data. The EEG recording contains a record of the acoustic stimulus, which can be compare with the stimulus itself. This loads the events through the pipeline in `alice.py`, i.e. the trigger correction is already applied and all subjects should have the correct alignment.
# +
# %matplotlib inline
from eelbrain import *
from alice import alice
# load the acoustic envelope predictor for each stimulus
gt = {f'{i}': alice.load_predictor(f'{i}~gammatone-1', 0.002, 1000, name='WAV') for i in range(1, 13)}
for y in gt.values():
y /= y.std()
# -
for subject in alice:
events = alice.load_events(raw='raw', data_raw=True)
raw = events.info['raw']
raw.load_data()
# S16, S22 have broken AUX channels
if subject in ['S05', 'S38']:
continue # no AUD channel
for name in ['AUD', 'Aux5']:
if name in raw.ch_names:
ch = raw.ch_names.index(name)
break
else:
print(subject, raw.ch_names)
raise
xs = []
# extract audio from EEG
for segment, i0 in events.zip('event', 'i_start'):
x = NDVar(raw._data[ch, i0:i0+1000], UTS(0, 0.002, 1000), name='EEG')
x -= x.min()
x /= x.std()
xs.append([x, gt[segment]])
p = plot.UTS(xs, axh=2, w=10, ncol=1, title=subject, axtitle=events['trigger'])
# display and close to avoid having too many open figures
display(p)
p.close()
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Learning Rate Monitor
=====================
Monitor and logs learning rate for lr schedulers during training.
"""
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, Optional, Set, Type
from torch.optim.optimizer import Optimizer
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class LearningRateMonitor(Callback):
r"""
Automatically monitor and logs learning rate for learning rate schedulers during training.
Args:
logging_interval: set to ``'epoch'`` or ``'step'`` to log ``lr`` of all optimizers
at the same interval, set to ``None`` to log at individual interval
according to the ``interval`` key of each scheduler. Defaults to ``None``.
log_momentum: option to also log the momentum values of the optimizer, if the optimizer
has the ``momentum`` or ``betas`` attribute. Defaults to ``False``.
Raises:
MisconfigurationException:
If ``logging_interval`` is none of ``"step"``, ``"epoch"``, or ``None``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import LearningRateMonitor
>>> lr_monitor = LearningRateMonitor(logging_interval='step')
>>> trainer = Trainer(callbacks=[lr_monitor])
Logging names are automatically determined based on optimizer class name.
In case of multiple optimizers of same type, they will be named ``Adam``,
``Adam-1`` etc. If a optimizer has multiple parameter groups they will
be named ``Adam/pg1``, ``Adam/pg2`` etc. To control naming, pass in a
``name`` keyword in the construction of the learning rate schedulers.
A ``name`` keyword can also be used for parameter groups in the
construction of the optimizer.
Example::
def configure_optimizer(self):
optimizer = torch.optim.Adam(...)
lr_scheduler = {
'scheduler': torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
'name': 'my_logging_name'
}
return [optimizer], [lr_scheduler]
Example::
def configure_optimizer(self):
optimizer = torch.optim.SGD(
[{
'params': [p for p in self.parameters()],
'name': 'my_parameter_group_name'
}],
lr=0.1
)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
return [optimizer], [lr_scheduler]
"""
def __init__(self, logging_interval: Optional[str] = None, log_momentum: bool = False):
if logging_interval not in (None, 'step', 'epoch'):
raise MisconfigurationException('logging_interval should be `step` or `epoch` or `None`.')
self.logging_interval = logging_interval
self.log_momentum = log_momentum
self.lrs = None
self.lr_sch_names = []
def on_train_start(self, trainer, *args, **kwargs):
"""
Called before training, determines unique names for all lr
schedulers in the case of multiple of the same type or in
the case of multiple parameter groups
Raises:
MisconfigurationException:
If ``Trainer`` has no ``logger``.
"""
if not trainer.logger:
raise MisconfigurationException(
'Cannot use `LearningRateMonitor` callback with `Trainer` that has no logger.'
)
if not trainer.lr_schedulers:
rank_zero_warn(
'You are using `LearningRateMonitor` callback with models that'
' have no learning rate schedulers. Please see documentation'
' for `configure_optimizers` method.', RuntimeWarning
)
if self.log_momentum:
def _check_no_key(key):
return any(key not in sch['scheduler'].optimizer.defaults for sch in trainer.lr_schedulers)
if _check_no_key('momentum') and _check_no_key('betas'):
rank_zero_warn(
"You have set log_momentum=True, but some optimizers do not"
" have momentum. This will log a value 0 for the momentum.", RuntimeWarning
)
# Find names for schedulers
names = self._find_names(trainer.lr_schedulers)
# Initialize for storing values
self.lrs = {name: [] for name in names}
self.last_momentum_values = {name + "-momentum": None for name in names}
def on_train_batch_start(self, trainer, *args, **kwargs):
if not self._should_log(trainer):
return
if self.logging_interval != 'epoch':
interval = 'step' if self.logging_interval is None else 'any'
latest_stat = self._extract_stats(trainer, interval)
if latest_stat:
trainer.logger.log_metrics(latest_stat, step=trainer.global_step)
def on_train_epoch_start(self, trainer, *args, **kwargs):
if self.logging_interval != 'step':
interval = 'epoch' if self.logging_interval is None else 'any'
latest_stat = self._extract_stats(trainer, interval)
if latest_stat:
trainer.logger.log_metrics(latest_stat, step=trainer.global_step)
def _extract_stats(self, trainer, interval: str) -> Dict[str, float]:
latest_stat = {}
names = self._find_names(trainer.lr_schedulers, add_lr_sch_names=False)
self._remap_keys(names)
for name, scheduler in zip(self.lr_sch_names, trainer.lr_schedulers):
if scheduler['interval'] == interval or interval == 'any':
opt = scheduler['scheduler'].optimizer
param_groups = opt.param_groups
use_betas = 'betas' in opt.defaults
for i, pg in enumerate(param_groups):
name_and_suffix = self._add_suffix(name, param_groups, i)
lr = self._extract_lr(pg, name_and_suffix)
latest_stat.update(lr)
momentum = self._extract_momentum(
param_group=pg, name=name_and_suffix.replace(name, f'{name}-momentum'), use_betas=use_betas
)
latest_stat.update(momentum)
return latest_stat
def _extract_lr(self, param_group: Dict[str, Any], name: str) -> Dict[str, Any]:
lr = param_group.get('lr')
self.lrs[name].append(lr)
return {name: lr}
def _remap_keys(self, names: List[str], token: str = '/pg1') -> None:
"""
This function is used the remap the keys if param groups for a given optimizer increased.
"""
for new_name in names:
old_name = new_name.replace(token, '')
if token in new_name and old_name in self.lrs:
self.lrs[new_name] = self.lrs.pop(old_name)
elif new_name not in self.lrs:
self.lrs[new_name] = []
def _extract_momentum(self, param_group: Dict[str, Any], name: str, use_betas: bool) -> Dict[str, float]:
if not self.log_momentum:
return {}
momentum = param_group.get('betas')[0] if use_betas else param_group.get('momentum', 0)
self.last_momentum_values[name] = momentum
return {name: momentum}
def _add_prefix(
self, name: str, optimizer_cls: Type[Optimizer], seen_optimizer_types: DefaultDict[Type[Optimizer], int]
) -> str:
if optimizer_cls not in seen_optimizer_types:
return name
count = seen_optimizer_types[optimizer_cls]
return name + f'-{count - 1}' if count > 1 else name
def _add_suffix(self, name: str, param_groups: List[Dict], param_group_index: int, use_names: bool = True) -> str:
if len(param_groups) > 1:
if not use_names:
return f'{name}/pg{param_group_index+1}'
pg_name = param_groups[param_group_index].get('name', f'pg{param_group_index+1}')
return f'{name}/{pg_name}'
elif use_names:
pg_name = param_groups[param_group_index].get('name')
return f'{name}/{pg_name}' if pg_name else name
return name
def _duplicate_param_group_names(self, param_groups: List[Dict]) -> Set[str]:
names = [pg.get('name', f'pg{i}') for i, pg in enumerate(param_groups, start=1)]
unique = set(names)
if len(names) == len(unique):
return set()
return {n for n in names if names.count(n) > 1}
def _find_names(self, lr_schedulers: List, add_lr_sch_names: bool = True) -> List[str]:
# Create unique names in the case we have multiple of the same learning
# rate scheduler + multiple parameter groups
names = []
seen_optimizers = []
seen_optimizer_types = defaultdict(int)
for scheduler in lr_schedulers:
sch = scheduler['scheduler']
if scheduler['name'] is not None:
name = scheduler['name']
else:
name = 'lr-' + sch.optimizer.__class__.__name__
seen_optimizers.append(sch.optimizer)
optimizer_cls = type(sch.optimizer)
if scheduler['name'] is None:
seen_optimizer_types[optimizer_cls] += 1
# Multiple param groups for the same scheduler
param_groups = sch.optimizer.param_groups
duplicates = self._duplicate_param_group_names(param_groups)
if duplicates:
raise MisconfigurationException(
'A single `Optimizer` cannot have multiple parameter groups with identical '
f'`name` values. {name} has duplicated parameter group names {duplicates}'
)
name = self._add_prefix(name, optimizer_cls, seen_optimizer_types)
names.extend(self._add_suffix(name, param_groups, i) for i in range(len(param_groups)))
if add_lr_sch_names:
self.lr_sch_names.append(name)
return names
@staticmethod
def _should_log(trainer) -> bool:
return (trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop
|
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fire
from fire import test_components as tc
from fire import trace
import unittest
class FireTest(unittest.TestCase):
def testFire(self):
fire.Fire(tc.Empty)
fire.Fire(tc.OldStyleEmpty)
fire.Fire(tc.WithInit)
self.assertEqual(fire.Fire(tc.NoDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.NoDefaults, 'triple 4'), 12)
self.assertEqual(fire.Fire(tc.WithDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.WithDefaults, 'triple 4'), 12)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'triple 4'), 12)
def testFireNoArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'ten'), 10)
def testFireExceptions(self):
# Exceptions of Fire are printed to stderr and None is returned.
self.assertIsNone(fire.Fire(tc.Empty, 'nomethod')) # Member doesn't exist.
self.assertIsNone(fire.Fire(tc.NoDefaults, 'double')) # Missing argument.
self.assertIsNone(fire.Fire(tc.TypedProperties, 'delta x')) # Missing key.
# Exceptions of the target components are still raised.
with self.assertRaises(ZeroDivisionError):
fire.Fire(tc.NumberDefaults, 'reciprocal 0.0')
def testFireNamedArgs(self):
self.assertEqual(fire.Fire(tc.WithDefaults, 'double --count 5'), 10)
self.assertEqual(fire.Fire(tc.WithDefaults, 'triple --count 5'), 15)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'double --count 5'), 10)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults, 'triple --count 5'), 15)
def testFireNamedArgsWithEquals(self):
self.assertEqual(fire.Fire(tc.WithDefaults, 'double --count=5'), 10)
self.assertEqual(fire.Fire(tc.WithDefaults, 'triple --count=5'), 15)
def testFireAllNamedArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --alpha 1 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --beta 1 2'), 4)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1 --alpha 2'), 4)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1 --beta 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --alpha 1 --beta 2'), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --beta 1 --alpha 2'), 4)
def testFireAllNamedArgsOneMissing(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum'), 0)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum 1'), 1)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --alpha 1'), 1)
self.assertEqual(fire.Fire(tc.MixedDefaults, 'sum --beta 2'), 4)
def testFirePartialNamedArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity 1 2'), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 1 2'), (1, 2))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity --beta 1 2'), (2, 1))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity 1 --alpha 2'), (2, 1))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity 1 --beta 2'), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 1 --beta 2'), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --beta 1 --alpha 2'), (2, 1))
def testFirePartialNamedArgsOneMissing(self):
# By default, errors are written to standard out and None is returned.
self.assertIsNone( # Identity needs an arg.
fire.Fire(tc.MixedDefaults, 'identity'))
self.assertIsNone( # Identity needs a value for alpha.
fire.Fire(tc.MixedDefaults, 'identity --beta 2'))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity 1'), (1, '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 1'), (1, '0'))
def testFireProperties(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'alpha'), True)
self.assertEqual(fire.Fire(tc.TypedProperties, 'beta'), (1, 2, 3))
def testFireRecursion(self):
self.assertEqual(
fire.Fire(tc.TypedProperties, 'charlie double hello'), 'hellohello')
self.assertEqual(fire.Fire(tc.TypedProperties, 'charlie triple w'), 'www')
def testFireVarArgs(self):
self.assertEqual(
fire.Fire(tc.VarArgs, 'cumsums a b c d'), ['a', 'ab', 'abc', 'abcd'])
self.assertEqual(fire.Fire(tc.VarArgs, 'cumsums 1 2 3 4'), [1, 3, 6, 10])
def testFireVarArgsWithNamedArgs(self):
self.assertEqual(fire.Fire(tc.VarArgs, 'varchars 1 2 c d'), (1, 2, 'cd'))
self.assertEqual(fire.Fire(tc.VarArgs, 'varchars 3 4 c d e'), (3, 4, 'cde'))
def testFireKeywordArgs(self):
self.assertEqual(fire.Fire(tc.Kwargs, 'props --name David --age 24'),
{'name': 'David', 'age': 24})
self.assertEqual(
fire.Fire(tc.Kwargs,
'props --message "This is a message it has -- in it"'),
{'message': 'This is a message it has -- in it'})
self.assertEqual(fire.Fire(tc.Kwargs, 'upper --alpha A --beta B'),
'ALPHA BETA')
self.assertEqual(fire.Fire(tc.Kwargs, 'upper --alpha A --beta B - lower'),
'alpha beta')
def testFireKeywordArgsWithMissingPositionalArgs(self):
self.assertEqual(fire.Fire(tc.Kwargs, 'run Hello World --cell is'),
('Hello', 'World', {'cell': 'is'}))
self.assertEqual(fire.Fire(tc.Kwargs, 'run Hello --cell ok'),
('Hello', None, {'cell': 'ok'}))
def testFireObject(self):
self.assertEqual(fire.Fire(tc.WithDefaults(), 'double --count 5'), 10)
self.assertEqual(fire.Fire(tc.WithDefaults(), 'triple --count 5'), 15)
def testFireDict(self):
component = {
'double': lambda x=0: 2 * x,
'cheese': 'swiss',
}
self.assertEqual(fire.Fire(component, 'double 5'), 10)
self.assertEqual(fire.Fire(component, 'cheese'), 'swiss')
def testFireObjectWithDict(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'delta echo'), 'E')
self.assertEqual(fire.Fire(tc.TypedProperties, 'delta echo lower'), 'e')
self.assertIsInstance(fire.Fire(tc.TypedProperties, 'delta nest'), dict)
self.assertEqual(fire.Fire(tc.TypedProperties, 'delta nest 0'), 'a')
def testFireList(self):
component = ['zero', 'one', 'two', 'three']
self.assertEqual(fire.Fire(component, '2'), 'two')
self.assertEqual(fire.Fire(component, '3'), 'three')
self.assertEqual(fire.Fire(component, '-1'), 'three')
def testFireObjectWithList(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'echo 0'), 'alex')
self.assertEqual(fire.Fire(tc.TypedProperties, 'echo 1'), 'bethany')
def testFireObjectWithTuple(self):
self.assertEqual(fire.Fire(tc.TypedProperties, 'fox 0'), 'carry')
self.assertEqual(fire.Fire(tc.TypedProperties, 'fox 1'), 'divide')
def testFireNoComponent(self):
self.assertEqual(fire.Fire(command='tc WithDefaults double 10'), 20)
last_char = lambda text: text[-1] # pylint: disable=unused-variable
self.assertEqual(fire.Fire(command='last_char "Hello"'), 'o')
self.assertEqual(fire.Fire(command='last-char "World"'), 'd')
rset = lambda count=0: set(range(count)) # pylint: disable=unused-variable
self.assertEqual(fire.Fire(command='rset 5'), {0, 1, 2, 3, 4})
def testFireUnderscores(self):
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore-example'), 'fish fingers')
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore_example'), 'fish fingers')
def testFireUnderscoresInArg(self):
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore-function example'), 'example')
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore_function --underscore-arg=score'),
'score')
self.assertEqual(
fire.Fire(tc.Underscores, 'underscore_function --underscore_arg=score'),
'score')
def testBoolParsing(self):
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool True'), True)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool False'), False)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --arg=True'), True)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --arg=False'), False)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --arg'), True)
self.assertEqual(fire.Fire(tc.BoolConverter, 'as-bool --noarg'), False)
def testBoolParsingContinued(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity True False'), (True, False))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha=False 10'), (False, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha --beta 10'), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha --beta=10'), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --noalpha --beta'), (False, True))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity 10 --beta'), (10, True))
def testBoolParsingLessExpectedCases(self):
# Note: Does not return (True, 10).
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha 10'), (10, '0'))
# To get (True, 10), use one of the following:
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity --alpha --beta=10'), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity True 10'), (True, 10))
# Note: Does not return ('--test', '0').
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity --alpha --test'),
(True, '--test'))
# To get ('--test', '0'), use one of the following:
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity --alpha=--test'),
('--test', '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults, r'identity --alpha \"--test\"'),
('--test', '0'))
def testBoolParsingWithNo(self):
# In these examples --nothing always refers to the nothing argument:
def fn1(thing, nothing):
return thing, nothing
self.assertEqual(fire.Fire(fn1, '--thing --nothing'), (True, True))
self.assertEqual(fire.Fire(fn1, '--thing --nonothing'), (True, False))
# In the next example nothing=False (since rightmost setting of a flag gets
# precedence), but it errors because thing has no value.
self.assertEqual(fire.Fire(fn1, '--nothing --nonothing'), None)
# In these examples, --nothing sets thing=False:
def fn2(thing, **kwargs):
return thing, kwargs
self.assertEqual(fire.Fire(fn2, '--thing'), (True, {}))
self.assertEqual(fire.Fire(fn2, '--nothing'), (False, {}))
# In the next one, nothing=True, but it errors because thing has no value.
self.assertEqual(fire.Fire(fn2, '--nothing=True'), None)
self.assertEqual(fire.Fire(fn2, '--nothing --nothing=True'),
(False, {'nothing': True}))
def fn3(arg, **kwargs):
return arg, kwargs
self.assertEqual(fire.Fire(fn3, '--arg=value --thing'),
('value', {'thing': True}))
self.assertEqual(fire.Fire(fn3, '--arg=value --nothing'),
('value', {'thing': False}))
self.assertEqual(fire.Fire(fn3, '--arg=value --nonothing'),
('value', {'nothing': False}))
def testTraceFlag(self):
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- --trace'), trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- -t'), trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, '-- --trace'), trace.FireTrace)
def testHelpFlag(self):
self.assertIsNone(fire.Fire(tc.BoolConverter, 'as-bool True -- --help'))
self.assertIsNone(fire.Fire(tc.BoolConverter, 'as-bool True -- -h'))
self.assertIsNone(fire.Fire(tc.BoolConverter, '-- --help'))
def testHelpFlagAndTraceFlag(self):
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- --help --trace'),
trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, 'as-bool True -- -h -t'), trace.FireTrace)
self.assertIsInstance(
fire.Fire(tc.BoolConverter, '-- -h --trace'), trace.FireTrace)
def testTabCompletionNoName(self):
with self.assertRaises(ValueError):
fire.Fire(tc.NoDefaults, '-- --completion')
def testTabCompletion(self):
completion_script = fire.Fire(tc.NoDefaults, '-- --completion', name='c')
self.assertIn('double', completion_script)
self.assertIn('triple', completion_script)
def testTabCompletionWithDict(self):
actions = {'multiply': lambda a, b: a * b}
completion_script = fire.Fire(actions, '-- --completion', name='actCLI')
self.assertIn('actCLI', completion_script)
self.assertIn('multiply', completion_script)
def testBasicSeparator(self):
# '-' is the default separator.
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity + _'), ('+', '_'))
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity _ + -'), ('_', '+'))
# If we change the separator we can use '-' as an argument.
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'identity - _ -- --separator &'),
('-', '_'))
# The separator triggers a function call, but there aren't enough arguments.
self.assertEqual(fire.Fire(tc.MixedDefaults, 'identity - _ +'), None)
def testExtraSeparators(self):
self.assertEqual(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 - - as-bool True'), True)
self.assertEqual(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 - - - as-bool True'), True)
def testSeparatorForChaining(self):
# Without a separator all args are consumed by get_obj.
self.assertIsInstance(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 as-bool True'),
tc.BoolConverter)
# With a separator only the preceeding args are consumed by get_obj.
self.assertEqual(
fire.Fire(tc.ReturnsObj, 'get-obj arg1 arg2 - as-bool True'), True)
self.assertEqual(
fire.Fire(tc.ReturnsObj,
'get-obj arg1 arg2 & as-bool True -- --separator &'),
True)
self.assertEqual(
fire.Fire(tc.ReturnsObj,
'get-obj arg1 $$ as-bool True -- --separator $$'),
True)
def testFloatForExpectedInt(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults, 'sum --alpha 2.2 --beta 3.0'), 8.2)
self.assertEqual(
fire.Fire(tc.NumberDefaults, 'integer_reciprocal --divisor 5.0'), 0.2)
self.assertEqual(
fire.Fire(tc.NumberDefaults, 'integer_reciprocal 4.0'), 0.25)
def testClassInstantiation(self):
self.assertIsInstance(fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2'),
tc.InstanceVars)
# Cannot instantiate a class with positional args by default.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2'))
def testTraceErrors(self):
# Class needs additional value but runs out of args.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1'))
self.assertIsNone(fire.Fire(tc.InstanceVars, '--arg1=a1'))
# Routine needs additional value but runs out of args.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2 - run b1'))
self.assertIsNone(
fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2 - run b1'))
# Extra args cannot be consumed.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2 - run b1 b2 b3'))
self.assertIsNone(
fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2 - run b1 b2 b3'))
# Cannot find member to access.
self.assertIsNone(fire.Fire(tc.InstanceVars, 'a1 a2 - jog'))
self.assertIsNone(fire.Fire(tc.InstanceVars, '--arg1=a1 --arg2=a2 - jog'))
if __name__ == '__main__':
unittest.main()
|
# -*-coding:utf-8-*-
from flask import Flask
__author__ = 'ZeroLoo'
|
import http.client
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import re
import csv
from http.cookiejar import CookieJar
class pyGTrends(object):
"""
Google Trends API
Recommended usage:
from csv import DictReader
r = pyGTrends(username, password)
r.download_report(('pants', 'skirt'))
d = DictReader(r.csv().split('\n'))
"""
def __init__(self, username, password):
"""
provide login and password to be used to connect to Google Analytics
all immutable system variables are also defined here
website_id is the ID of the specific site on google analytics
"""
self.login_params = {
"continue": 'http://www.google.com/trends',
"PersistentCookie": "yes",
"Email": username,
"Passwd": password,
}
self.headers = [("Referrer", "https://www.google.com/accounts/ServiceLoginBoxAuth"),
("Content-type", "application/x-www-form-urlencoded"),
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.21 (KHTML, like Gecko) Chrome/19.0.1042.0 Safari/535.21'),
("Accept", "text/plain")]
self.url_ServiceLoginBoxAuth = 'https://accounts.google.com/ServiceLoginBoxAuth'
self.url_Export = 'http://www.google.com/trends/viz'
self.url_CookieCheck = 'https://www.google.com/accounts/CheckCookie?chtml=LoginDoneHtml'
self.header_dictionary = {}
self._connect()
def _connect(self):
"""
connect to Google Trends
"""
self.cj = CookieJar()
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cj))
self.opener.addheaders = self.headers
galx = re.compile('<input type="hidden" name="GALX" value="(?P<galx>[a-zA-Z0-9_-]+)">')
resp = self.opener.open(self.url_ServiceLoginBoxAuth).read()
m = galx.search(resp)
if not m:
raise Exception("Cannot parse GALX out of login page")
self.login_params['GALX'] = m.group('galx')
params = urllib.parse.urlencode(self.login_params)
self.opener.open(self.url_ServiceLoginBoxAuth, params)
self.opener.open(self.url_CookieCheck)
def download_report(self, keywords, date='all', geo='all', geor='all', graph = 'all_csv', sort=0, scale=0, sa='N'):
"""
download a specific report
date, geo, geor, graph, sort, scale and sa
are all Google Trends specific ways to slice the data
"""
if type(keywords) not in (type([]), type(('tuple',))):
keywords = [keywords]
params = urllib.parse.urlencode({
'q': ",".join(keywords),
'date': date,
'graph': graph,
'geo': geo,
'geor': geor,
'sort': str(sort),
'scale': str(scale),
'sa': sa
})
self.raw_data = self.opener.open('http://www.google.com/trends/viz?' + params).read()
if self.raw_data in ['You must be signed in to export data from Google Trends']:
raise Exception(self.raw_data)
def csv(self, section="main", as_list=False):
"""
Returns a CSV of a specific segment of the data.
Available segments include Main, Language, City and Region.
"""
if section == "main":
section = ("Week","Year","Day","Month")
else:
section = (section,)
segments = self.raw_data.split('\n\n\n')
for s in segments:
if s.partition(',')[0] in section:
if as_list:
return [line for line in csv.reader(s.split('\n'))]
else:
return s
raise Exception("Could not find requested section")
|
import platform, sys, os, subprocess
import psutil
from app.api.models.LXDModule import LXDModule
import logging
def readInstanceDetails():
instanceDetails = ("Python Version: {}".format(platform.python_version()))
instanceDetails +=("\nPython Path: {}".format(' '.join(path for path in sys.path)))
instanceDetails +=("\nLXD Version: {}".format(getLXDInfo()['environment']['server_version']))
instanceDetails +=("\nLXD Status: {}".format(getLXDInfo()['api_status']))
instanceDetails +=("\nOS: {}".format(platform.platform()))
instanceDetails +=("\nLXDUI Path: {}".format(sys.path[0]))
instanceDetails +=("\nCPU Count: {}".format(getProcessorDetails()))
instanceDetails +=("\nMemory: {}MB".format(getMemory()))
instanceDetails +=("\nDisk used percent: {}".format(getDiskDetails()))
logging.info(instanceDetails)
def getLXDInfo():
try:
info = LXDModule().config()
return info
except:
return {
'environment': {
'server_version': 'N/A'
},
'api_status': 'N/A'
}
def getMemory():
return int(psutil.virtual_memory().total / (1024*1024))
def getProcessorDetails():
return psutil.cpu_count()
def getDiskDetails():
return psutil.disk_usage('/').percent
|
import json
filename = "num_predileto.txt"
try:
numero = int(input("Qual o seu numero predileto? "))
except ValueError:
print("Você digitou um valor incorreto.")
else:
with open(filename, "w") as f:
json.dump(numero, f)
|
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/07_data.block.ipynb (unless otherwise specified).
__all__ = ['TransformBlock', 'CategoryBlock', 'MultiCategoryBlock', 'DataBlock']
#Cell
from ..torch_basics import *
from ..test import *
from .core import *
from .load import *
from .external import *
from .transforms import *
#Cell
class TransformBlock():
"A basic wrapper that links defaults transforms for the data block API"
def __init__(self, type_tfms=None, item_tfms=None, batch_tfms=Cuda, dl_type=None, dbunch_kwargs=None):
self.type_tfms = L(type_tfms)
self.item_tfms = ToTensor + L(item_tfms)
self.batch_tfms = Cuda + L(batch_tfms)
self.dl_type,self.dbunch_kwargs = dl_type,({} if dbunch_kwargs is None else dbunch_kwargs)
#Cell
def CategoryBlock(vocab=None, add_na=False):
"`TransformBlock` for single-label categorical targets"
return TransformBlock(type_tfms=Categorize(vocab=vocab, add_na=add_na))
#Cell
def MultiCategoryBlock(encoded=False, vocab=None, add_na=False):
"`TransformBlock` for multi-label categorical targets"
tfm = EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode]
return TransformBlock(type_tfms=tfm)
#Cell
from inspect import isfunction,ismethod
#Cell
def _merge_tfms(*tfms):
"Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating"
g = groupby(concat(*tfms), lambda o:
o if isinstance(o, type) else o.__qualname__ if (isfunction(o) or ismethod(o)) else o.__class__)
return L(v[-1] for k,v in g.items()).map(instantiate)
#Cell
@docs
@funcs_kwargs
class DataBlock():
"Generic container to quickly build `DataSource` and `DataBunch`"
get_x=get_items=splitter=get_y = None
dl_type = TfmdDL
_methods = 'get_items splitter get_y get_x'.split()
def __init__(self, blocks=None, dl_type=None, getters=None, n_inp=None, **kwargs):
blocks = L(getattr(self,'blocks',(TransformBlock,TransformBlock)) if blocks is None else blocks)
blocks = L(b() if callable(b) else b for b in blocks)
self.default_type_tfms = blocks.attrgot('type_tfms', L())
self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L()))
self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L()))
for t in blocks:
if getattr(t, 'dl_type', None) is not None: self.dl_type = t.dl_type
if dl_type is not None: self.dl_type = dl_type
self.databunch = delegates(self.dl_type.__init__)(self.databunch)
self.dbunch_kwargs = merge(*blocks.attrgot('dbunch_kwargs', {}))
self.n_inp,self.getters = n_inp,L(getters)
if getters is not None: assert self.get_x is None and self.get_y is None
assert not kwargs
def datasource(self, source, type_tfms=None):
self.source = source
items = (self.get_items or noop)(source)
if isinstance(items,tuple):
items = L(items).zip()
labellers = [itemgetter(i) for i in range_of(self.default_type_tfms)]
else: labellers = [noop] * len(self.default_type_tfms)
splits = (self.splitter or noop)(items)
if self.get_x: labellers[0] = self.get_x
if self.get_y: labellers[1] = self.get_y
if self.getters: labellers = self.getters
if type_tfms is None: type_tfms = [L() for t in self.default_type_tfms]
type_tfms = L([self.default_type_tfms, type_tfms, labellers]).map_zip(
lambda tt,tfm,l: L(l) + _merge_tfms(tt, tfm))
return DataSource(items, tfms=type_tfms, splits=splits, dl_type=self.dl_type, n_inp=self.n_inp)
def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
dsrc = self.datasource(source, type_tfms=type_tfms)
item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
kwargs = {**self.dbunch_kwargs, **kwargs}
return dsrc.databunch(path=path, after_item=item_tfms, after_batch=batch_tfms, **kwargs)
_docs = dict(datasource="Create a `Datasource` from `source` with `type_tfms`",
databunch="Create a `DataBunch` from `source` with `item_tfms` and `batch_tfms`")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
with open('README.md', 'r') as fp:
long_description = fp.read()
pos = long_description.find('# Development')
if pos > -1:
long_description = long_description[:pos]
setuptools.setup(
name='qri',
version='0.1.5',
author='Dustin Long',
author_email='dustmop@qri.io',
description='qri python client',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/qri-io/qri-python',
packages=setuptools.find_packages(),
install_requires=[
'pandas==1.0.0',
'Markdown==3.2.2',
'requests==2.24.0',
],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
],
python_requires='>=3.6'
)
|
def test_something():
assert 1 == 1
|
from slick_reporting.views import SampleReportView
from .models import OrderLine
class MonthlyProductSales(SampleReportView):
report_model = OrderLine
date_field = 'date_placed' # or 'order__date_placed'
group_by = 'product'
columns = ['name', 'sku']
time_series_pattern = 'monthly'
time_series_columns = ['__total_quantity__']
|
#!/usr/bin/env python3
"""
Synopsis: utilities/generate_schema.py > lib/schema.py
This routine pulls the current table definitions from the csv2 database and writes the
schema to stdout. To use the schema definitions:
from lib.schema import <view_or_table_name_1>, <view_or_table_name_2>, ...
"""
from subprocess import Popen, PIPE
from tempfile import mkdtemp
import json
import os
import sys
import yaml
REMOVE_BRACKETS = str.maketrans('()', ' ')
def main(args):
"""
This does everything:
o Writes the schema header to stdout.
o Retrieves the list of tables from the csv2 database.
o Then for each table:
- Resets the variable _stdout to just the table header.
- Retrieves the column list for the table.
- Then for each column:
+ Appends the column definition to _stdout.-
- Appends the table footer to _stdout.
- Writes the table definition to stdout.
"""
gvar = {}
fd = open('/etc/cloudscheduler/cloudscheduler.yaml')
gvar['csv2_config'] = yaml.full_load(fd.read())
fd.close()
# Schema_na_path has been updated to point to the same file as the original schema
# half of this code can probably be removed since it's overwriting the same file
# we need to check if there is any required computing done in the first loop that is reused in the second
# or if we can just remove the first (sqlalchemy) version
gvar['cmd_path'] = os.path.abspath(args[0])
gvar['cmd_path_stat'] = os.stat(gvar['cmd_path'])
gvar['path_info'] = gvar['cmd_path'].split('/')
gvar['ix'] = gvar['path_info'].index('cloudscheduler')
gvar['schema_path'] = '%s/lib/schema.py' % '/'.join(gvar['path_info'][:gvar['ix']+1])
gvar['schema_na_path'] = '%s/lib/schema.py' % '/'.join(gvar['path_info'][:gvar['ix']+1])
gvar['fd'] = open(gvar['schema_path'], 'w')
gvar['schema_na'] = {}
_p1 = Popen(
[
'mysql',
'-u%s' % gvar['csv2_config']['database']['db_user'],
'-p%s' % gvar['csv2_config']['database']['db_password'],
'-h%s' % gvar['csv2_config']['database']['db_host'],
'-e',
'show tables;',
gvar['csv2_config']['database']['db_name']
],
stdout=PIPE,
stderr=PIPE
)
_p2 = Popen(
[
'awk',
'!/Tables_in_csv2/ {print $1}'
],
stdin=_p1.stdout,
stdout=PIPE,
stderr=PIPE
)
stdout, stderr = _p2.communicate()
if _p2.returncode != 0:
print('Failed to retrieve table list.')
exit(1)
gvar['fd'].write(
"if 'Table' not in locals() and 'Table' not in globals():\n" + \
" from sqlalchemy import Table, Column, Float, Integer, String, MetaData, ForeignKey\n" + \
" metadata = MetaData()\n\n"
)
tables = stdout.decode('ascii').split()
for table in tables:
_stdout = ["%s = Table('%s', metadata,\n" % (table, table)]
gvar['schema_na'][table] = {'keys': [], 'columns': {}}
_p1 = Popen(
[
'mysql',
'-u%s' % gvar['csv2_config']['database']['db_user'],
'-p%s' % gvar['csv2_config']['database']['db_password'],
'-h%s' % gvar['csv2_config']['database']['db_host'],
'-e',
'show columns from %s;' % table,
gvar['csv2_config']['database']['db_name']
],
stdout=PIPE,
stderr=PIPE
)
_p2 = Popen(
[
'awk',
'!/^+/'
],
stdin=_p1.stdout,
stdout=PIPE,
stderr=PIPE
)
stdout, stderr = _p2.communicate()
if _p2.returncode != 0:
print('Failed to retrieve table columns.')
exit(1)
columns = stdout.decode('ascii').split("\n")
for _ix in range(1, len(columns)):
_w = columns[_ix].split()
if len(_w) > 2:
_stdout.append(" Column('%s'," % _w[0])
# gvar['schema_na'][table]['columns'][_w[0]] = []
if _w[1][:5] == 'char(' or \
_w[1][:8] == 'varchar(':
_w2 = _w[1].translate(REMOVE_BRACKETS).split()
_stdout.append(" String(%s)" % _w2[1])
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'str', 'len': _w2[1], 'nulls': _w[2]}
elif _w[1][:4] == 'int(' or \
_w[1][:6] == 'bigint' or \
_w[1][:7] == 'decimal' or \
_w[1][:8] == 'smallint' or \
_w[1][:7] == 'tinyint':
_stdout.append(" Integer")
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'int'}
elif _w[1] == 'text' or \
_w[1][:4] == 'date' or \
_w[1][:8] == 'datetime' or \
_w[1][:4] == 'time' or \
_w[1][:9] == 'timestamp' or \
_w[1] == 'tinytext' or \
_w[1] == 'longtext' or \
_w[1] == 'mediumtext':
_stdout.append(" String")
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'str', 'nulls': _w[2]}
elif _w[1][:7] == 'double' or \
_w[1][:5] == 'float':
_stdout.append(" Float")
gvar['schema_na'][table]['columns'][_w[0]] = {'type': 'float'}
else:
print('Table %s, unknown data type for column: %s' % (table, columns[_ix]))
exit(1)
if len(_w) > 3 and _w[3] == 'PRI':
_stdout.append(", primary_key=True")
gvar['schema_na'][table]['keys'].append(_w[0])
if _ix < len(columns) - 2:
_stdout.append("),\n")
else:
_stdout.append(")\n )\n")
gvar['fd'].write('%s\n' % ''.join(_stdout))
gvar['fd'].close()
gvar['fd'] = open(gvar['schema_na_path'], 'w')
gvar['fd'].write('schema = {\n')
tix = 0
for table in sorted(gvar['schema_na']):
gvar['fd'].write(' "%s": {\n "keys": [\n' % table)
ix = 0
for key in gvar['schema_na'][table]['keys']:
if ix < len(gvar['schema_na'][table]['keys'])-1:
gvar['fd'].write(' "%s",\n' % key)
else:
gvar['fd'].write(' "%s"\n' % key)
ix += 1
gvar['fd'].write(' ],\n "columns": {\n')
ix = 0
for column in gvar['schema_na'][table]['columns']:
if ix < len(gvar['schema_na'][table]['columns'])-1:
gvar['fd'].write(' "%s": %s,\n' % (column, json.dumps(gvar['schema_na'][table]['columns'][column])))
else:
gvar['fd'].write(' "%s": %s\n' % (column, json.dumps(gvar['schema_na'][table]['columns'][column])))
ix += 1
if tix < len(gvar['schema_na'])-1:
gvar['fd'].write(' }\n },\n')
else:
gvar['fd'].write(' }\n }\n }\n')
tix += 1
gvar['fd'].close()
_p1 = Popen(
[
'chown',
'%s.%s' % (gvar['cmd_path_stat'].st_uid, gvar['cmd_path_stat'].st_gid),
gvar['schema_path']
]
)
_p1.communicate()
if __name__ == "__main__":
main(sys.argv)
|
import string
import torch
from torch.nn import CrossEntropyLoss
from torch.nn import CTCLoss
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
from tqdm import tqdm
from cnn_seq2seq import ConvSeq2Seq
from cnn_seq2seq import Decoder
from cnn_seq2seq import Encoder
from cnn_seq2seq_att import ConvSeq2SeqAtt
from crnn import CRNN
from data_utils import FakeTextImageGenerator
from utils import labels_to_text
from utils import text_to_labels
def train(path=None):
dataset = FakeTextImageGenerator(batch_size=16).iter()
criterion = CTCLoss(reduction="mean", zero_infinity=True)
net = CRNN(nclass=100).float()
optimizer = optim.Adam(net.parameters(), lr=0.001)
if path:
checkpoint = torch.load(path)
net.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
epoch = checkpoint["epoch"]
loss = checkpoint["loss"]
print(f"model current epoch: {epoch} with loss: {loss}")
# loop over the dataset multiple times
for epoch in range(1, 1000):
running_loss = 0.0
loop = tqdm(range(100))
for i in loop:
data = next(dataset)
images = data["the_inputs"]
labels = data["the_labels"]
input_length = data["input_length"]
label_length = data["label_length"]
targets = data["targets"]
# print("target", targets)
# print("target l", targets.size())
# print("label_l", label_length)
# print("label_l l", label_length.size())
# print("pred_l", input_length)
# print("pred_l l", input_length.size())
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(images.float())
# print(outputs[8, 0, :])
# print(outputs[:, 0, :])
# print(outputs.size())
loss = criterion(outputs, labels, input_length, label_length)
# print(loss.item())
loss.backward()
optimizer.step()
running_loss += loss.item()
loop.set_postfix(epoch=epoch, loss=(running_loss / (i + 1)))
# print(f"Epoch: {epoch} | Loss: {running_loss/100}")
torch.save(
{
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": running_loss,
},
"checkpoint5.pt",
)
print("Finished Training")
def train_cs2s(path=None):
alphabet = string.printable
nclass = len(alphabet)
writer = SummaryWriter()
dataset = FakeTextImageGenerator(batch_size=4).iter()
criterion = CrossEntropyLoss(ignore_index=97)
encoder = Encoder(512, 512, 1, 0)
decoder = Decoder(512, 100, 100, 1, 0)
net = ConvSeq2Seq(encoder, decoder, nclass=nclass).float()
optimizer = optim.Adam(net.parameters(), lr=0.003)
if path:
net2 = CRNN(nclass=100).float()
checkpoint = torch.load(path)
net2.load_state_dict(checkpoint["model_state_dict"])
# optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
# epoch = checkpoint["epoch"]
# loss = checkpoint["loss"]
# print(f"model current epoch: {epoch} with loss: {loss}")
print(net2)
net.conv1.load_state_dict(net2.conv1.state_dict())
net.conv2.load_state_dict(net2.conv2.state_dict())
net.conv3.load_state_dict(net2.conv3.state_dict())
net.conv4.load_state_dict(net2.conv4.state_dict())
net.conv5.load_state_dict(net2.conv5.state_dict())
net.conv6.load_state_dict(net2.conv6.state_dict())
net.conv7.load_state_dict(net2.conv7.state_dict())
net.train()
# loop over the dataset multiple times
step = 0
for epoch in range(1, 1000):
running_loss = 0.0
loop = tqdm(range(100))
for i in loop:
data = next(dataset)
images = data["the_inputs"]
labels = data["the_labels"]
input_length = data["input_length"]
label_length = data["label_length"]
targets = data["targets"]
# print("target", targets)
# print("target l", targets.size())
# print("label_l", label_length)
# print("label_l l", label_length.size())
# print("pred_l", input_length)
# print("pred_l l", input_length.size())
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(images.float(), labels, 0.5)
# permute batchsize and seq_len dim to match labels when using .view(-1, output.size()[2])
outputs = outputs.permute(1, 0, 2)
# print(outputs[8, 0, :])
# print(outputs[:, 0, :])
# print(outputs.size())
# print(labels.size())
output_argmax = outputs.argmax(2)
# print(output_argmax.view(-1))
# print(labels.reshape(-1))
loss = criterion(outputs.reshape(-1, 100), labels.reshape(-1))
writer.add_scalar("loss", loss.item(), step)
step += 1
loss.backward()
# torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
running_loss += loss.item()
loop.set_postfix(epoch=epoch, Loss=(running_loss / (i + 1)))
# print(f"Epoch: {epoch} | Loss: {running_loss/100}")
torch.save(
{
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": running_loss,
},
"cs2s_good.pt",
)
torch.save(net, "model_test_pretrained.pt")
print("Finished Training")
def train_cs2satt(path=None):
writer = SummaryWriter()
dataset = FakeTextImageGenerator(batch_size=8).iter()
criterion = CrossEntropyLoss(ignore_index=97)
net = ConvSeq2SeqAtt(nclass=100).float()
optimizer = optim.Adam(net.parameters(), lr=3e-4)
if path:
checkpoint = torch.load(path)
net.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
epoch = checkpoint["epoch"]
loss = checkpoint["loss"]
print(f"model current epoch: {epoch} with loss: {loss}")
net.train()
# loop over the dataset multiple times
step = 0
for epoch in range(1, 1000):
running_loss = 0.0
loop = tqdm(range(100))
for i in loop:
data = next(dataset)
images = data["the_inputs"]
labels = data["the_labels"]
input_length = data["input_length"]
label_length = data["label_length"]
targets = data["targets"]
# print("target", targets)
# print("target l", targets.size())
# print("label_l", label_length)
# print("label_l l", label_length.size())
# print("pred_l", input_length)
# print("pred_l l", input_length.size())
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(images.float(), labels, 0.5)
# permute batchsize and seq_len dim to match labels when using .view(-1, output.size()[2])
outputs = outputs.permute(1, 0, 2)
# print(outputs[8, 0, :])
# print(outputs[:, 0, :])
# print(outputs.size())
# print(labels.size())
output_argmax = outputs.argmax(2)
# print(output_argmax.view(-1))
# print(labels.reshape(-1))
loss = criterion(outputs.reshape(-1, 100), labels.reshape(-1))
# print(loss.item())
writer.add_scalar("loss", loss.item(), step)
step += 1
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
running_loss += loss.item()
loop.set_postfix(epoch=epoch, Loss=(running_loss / (i + 1)))
print(f"Epoch: {epoch} | Loss: {running_loss/100}")
torch.save(
{
"epoch": epoch,
"model_state_dict": net.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": running_loss,
},
"cs2satt_good.pt",
)
# torch.save(net, "model_test_pretrained.pt")
print("Finished Training")
if __name__ == "__main__":
train_cs2satt("cs2satt_good.pt")
|
import os
import pytest
import yaml
from tests import AsyncMock
from asyncio import Future
from app.utility.file_decryptor import decrypt
@pytest.mark.usefixtures(
'init_base_world'
)
class TestFileService:
def test_save_file(self, loop, file_svc, tmp_path):
filename = "test_file.txt"
payload = b'These are the file contents.'
# Save temporary test file
loop.run_until_complete(file_svc.save_file(filename, payload, tmp_path, encrypt=False))
file_location = tmp_path / filename
# Read file contents from saved file
file_contents = open(file_location, "r")
assert os.path.isfile(file_location)
assert payload.decode("utf-8") == file_contents.read()
def test_create_exfil_sub_directory(self, loop, file_svc):
exfil_dir_name = 'unit-testing-Rocks'
new_dir = loop.run_until_complete(file_svc.create_exfil_sub_directory(exfil_dir_name))
assert os.path.isdir(new_dir)
os.rmdir(new_dir)
def test_read_write_result_file(self, tmpdir, file_svc):
link_id = '12345'
output = 'output testing unit'
# write output data
file_svc.write_result_file(link_id=link_id, output=output, location=tmpdir)
# read output data
output_data = file_svc.read_result_file(link_id=link_id, location=tmpdir)
assert output_data == output
def test_pack_file(self, loop, mocker, tmpdir, file_svc, data_svc):
payload = 'unittestpayload'
payload_content = b'content'
new_payload_content = b'new_content'
packer_name = 'test'
# create temp files
file = tmpdir.join(payload)
file.write(payload_content)
# start mocking up methods
packer = mocker.Mock(return_value=Future())
packer.return_value = packer
packer.pack = AsyncMock(return_value=(payload, new_payload_content))
data_svc.locate = AsyncMock(return_value=[])
module = mocker.Mock()
module.Packer = packer
file_svc.packers[packer_name] = module
file_svc.data_svc = data_svc
file_svc.read_file = AsyncMock(return_value=(payload, payload_content))
file_path, content, display_name = loop.run_until_complete(file_svc.get_file(headers=dict(file='%s:%s' % (packer_name, payload))))
packer.pack.assert_called_once()
assert payload == file_path
assert content == new_payload_content
def test_upload_file(self, loop, file_svc):
upload_dir = loop.run_until_complete(file_svc.create_exfil_sub_directory('test-upload'))
upload_filename = 'uploadedfile.txt'
upload_content = b'this is a test upload file'
loop.run_until_complete(file_svc.save_file(upload_filename, upload_content, upload_dir, encrypt=False))
uploaded_file_path = os.path.join(upload_dir, upload_filename)
assert os.path.isfile(uploaded_file_path)
with open(uploaded_file_path, 'rb') as file:
written_data = file.read()
assert written_data == upload_content
os.remove(uploaded_file_path)
os.rmdir(upload_dir)
def test_encrypt_upload(self, loop, file_svc):
upload_dir = loop.run_until_complete(file_svc.create_exfil_sub_directory('test-encrypted-upload'))
upload_filename = 'encryptedupload.txt'
upload_content = b'this is a test upload file'
loop.run_until_complete(file_svc.save_file(upload_filename, upload_content, upload_dir))
uploaded_file_path = os.path.join(upload_dir, upload_filename)
decrypted_file_path = upload_filename + '_decrypted'
config_to_use = 'conf/default.yml'
with open(config_to_use, encoding='utf-8') as conf:
config = list(yaml.load_all(conf, Loader=yaml.FullLoader))[0]
decrypt(uploaded_file_path, config, output_file=decrypted_file_path)
assert os.path.isfile(decrypted_file_path)
with open(decrypted_file_path, 'rb') as decrypted_file:
decrypted_data = decrypted_file.read()
assert decrypted_data == upload_content
os.remove(uploaded_file_path)
os.remove(decrypted_file_path)
os.rmdir(upload_dir)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# cbc_decode.py
#
# Programma minimale per l'applicazione di un cifrario
# a blocchi su un messaggio, in modalità CBC (Chained Block Cypher)
# in cui ogni blocco viene messo in OR esclusivo con il codice
# del blocco precedente prima di essere cifrato.
#
# Nel nostro caso, un blocco corrisponde a un byte, e l'algoritmo
# di cifratura consiste nell'OR esclusivo con una chiave fissa a 8 bit.
#
# Istruzioni:
#
# - creare il file codice.bin come descritto in cbc_encode.py
# - decifrare il codice con il comando:
# python cbc_decode.py codice.bin 154 decodifica.txt
# - verificare che decodifica.txt e messaggio.txt sono uguali.
#
# Attenzione: il codice ha scopo puramente dimostrativo.
###################
#
# Importazione dei pacchetti
#
import sys
######################
#
# Lettura dei dati di input (messaggio e chiave)
#
f = open(sys.argv[1], 'r')
c = f.read()
f.close()
k = int(sys.argv[2])
#########################
#
# Decifrazione del codice
#
m = ''
c0 = 0
for i in range(len(c)):
v = ord(c[i])
m = m + chr((v ^ k) ^ c0)
c0 = v
##########################
#
# Scrittura del messaggio decifrato
#
f = open(sys.argv[3], 'w')
f.write(m)
f.close()
|
import asyncio
from typing import TYPE_CHECKING
from uvicorn.config import Config
if TYPE_CHECKING: # pragma: no cover
from uvicorn.server import ServerState
async def handle_http(
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
server_state: "ServerState",
config: Config,
) -> None:
# Run transport/protocol session from streams.
#
# This is a bit fiddly, so let me explain why we do this in the first place.
#
# This was introduced to switch to the asyncio streams API while retaining our
# existing protocols-based code.
#
# The aim was to:
# * Make it easier to support alternative async libaries (all of which expose
# a streams API, rather than anything similar to asyncio's transports and
# protocols) while keeping the change footprint (and risk) at a minimum.
# * Keep a "fast track" for asyncio that's as efficient as possible, by reusing
# our asyncio-optimized protocols-based implementation.
#
# See: https://github.com/encode/uvicorn/issues/169
# See: https://github.com/encode/uvicorn/pull/869
# Use a future to coordinate between the protocol and this handler task.
# https://docs.python.org/3/library/asyncio-protocol.html#connecting-existing-sockets
loop = asyncio.get_event_loop()
connection_lost = loop.create_future()
# Switch the protocol from the stream reader to our own HTTP protocol class.
protocol = config.http_protocol_class( # type: ignore[call-arg, operator]
config=config,
server_state=server_state,
on_connection_lost=lambda: connection_lost.set_result(True),
)
transport = writer.transport
transport.set_protocol(protocol)
# Asyncio stream servers don't `await` handler tasks (like the one we're currently
# running), so we must make sure exceptions that occur in protocols but outside the
# ASGI cycle (e.g. bugs) are properly retrieved and logged.
# Vanilla asyncio handles exceptions properly out-of-the-box, but uvloop doesn't.
# So we need to attach a callback to handle exceptions ourselves for that case.
# (It's not easy to know which loop we're effectively running on, so we attach the
# callback in all cases. In practice it won't be called on vanilla asyncio.)
task = asyncio.current_task()
assert task is not None
@task.add_done_callback
def retrieve_exception(task: asyncio.Task) -> None:
exc = task.exception()
if exc is None:
return
loop.call_exception_handler(
{
"message": "Fatal error in server handler",
"exception": exc,
"transport": transport,
"protocol": protocol,
}
)
# Hang up the connection so the client doesn't wait forever.
transport.close()
# Kick off the HTTP protocol.
protocol.connection_made(transport)
# Pass any data already in the read buffer.
# The assumption here is that we haven't read any data off the stream reader
# yet: all data that the client might have already sent since the connection has
# been established is in the `_buffer`.
data = reader._buffer # type: ignore
if data:
protocol.data_received(data)
# Let the transport run in the background. When closed, this future will complete
# and we'll exit here.
await connection_lost
|
"""Test for the appeaser strategy."""
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestAppeaser(TestPlayer):
name = "Appeaser"
player = axelrod.Appeaser
expected_classifier = {
'memory_depth': float('inf'), # Depends on internal memory.
'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating."""
self.first_play_test(C)
def test_effect_of_strategy(self):
P1 = axelrod.Appeaser()
P2 = axelrod.Cooperator()
self.assertEqual(P1.strategy(P2), C)
self.responses_test([C], [C], [C, C, C])
self.responses_test([C, D, C, D], [C, C, D], [D])
self.responses_test([C, D, C, D, C], [C, C, D, D], [C])
self.responses_test([C, D, C, D, C, D], [C, C, D, D, D], [D])
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Custom style
plt.style.use('scientific')
# absolute tolerances for chimera
absolutes = np.array([0.67, 1080000, 0.2, 0.15848931924611134])
# load in gryffin runs with Naive score as objective
df_naive = pd.read_pickle('Optimization/runs/gryffin_runs_naive.pkl')
# make the plot
fig, axes = plt.subplots(nrows=4, ncols=1, sharex=True, figsize=(8, 10))
sns.lineplot(x='eval', y='peak_score', data=df_naive, ax=axes[0], label='Naive Score Included')
axes[0].axhline(absolutes[0], ls='--', linewidth=2, c='k', alpha=0.6)
axes[0].fill_between(df_naive['eval'], absolutes[0], np.amin(df_naive['peak_score']), color='#8C9196', alpha=0.25)
axes[0].set_ylim(0.25, 0.9)
axes[0].set_ylabel('Peak score ', fontsize=15)
axes[0].tick_params(labelsize=13)
axes[0].legend(loc='lower right', ncol=1, fontsize=15)
sns.lineplot(x='eval', y='naive_score', data=df_naive, ax=axes[1])
axes[1].set_yscale('log')
axes[1].axhline(absolutes[1], ls='--', linewidth=2, c='k', alpha=0.6)
axes[1].fill_between(df_naive['eval'], absolutes[1], np.amax(df_naive['naive_score']), color='#8C9196', alpha=0.25)
axes[1].set_ylim(np.amin(df_naive['naive_score']), np.amax(df_naive['naive_score']))
axes[1].set_ylabel('Naive score \n$( \$ \cdot (mol \ target)^{-1}$)', fontsize=15)
axes[1].tick_params(labelsize=13)
sns.lineplot(x='eval', y='spectral_overlap', data=df_naive, ax=axes[2])
axes[2].axhline(absolutes[2], ls='--', linewidth=2, c='k', alpha=0.6)
axes[2].fill_between(df_naive['eval'], absolutes[2], np.amax(df_naive['spectral_overlap']), color='#8C9196', alpha=0.25)
axes[2].set_ylim(0., 0.3)
axes[2].set_ylabel('Spectral \noverlap', fontsize=15)
axes[2].tick_params(labelsize=13)
sns.lineplot(x='eval', y='fluo_rate', data=df_naive, ax=axes[3])
axes[3].axhline(absolutes[3], ls='--', linewidth=2, c='k', alpha=0.6)
axes[3].fill_between(df_naive['eval'], absolutes[3], np.amin(df_naive['fluo_rate']), color='#8C9196', alpha=0.25)
axes[3].set_ylim(0., 0.6)
axes[3].set_ylabel('Fluorescence \nrate (ns$^{-1}$)', fontsize=15)
axes[3].tick_params(labelsize=13)
axes[3].set_xlabel('Number of evaluations', fontsize=15)
for ax in axes:
ax.set_xlim(0, 500)
plt.tight_layout()
plt.savefig('Figure_S18.png', dpi=300)
plt.show()
|
from src.json2df import PubChemBioAssayJsonConverter
c = PubChemBioAssayJsonConverter("./examples", "PUBCHEM400.json")
df = c.get_all_results()
c.save_df(df, "./examples")
c.get_description("./examples")
|
## code simplified from the dca package
import os
import numpy as np
import scanpy.api as sc
import keras
from keras.layers import Input, Dense, Dropout, Activation, BatchNormalization
from keras.models import Model
from keras.objectives import mean_squared_error
from keras import backend as K
import tensorflow as tf
from .loss import NB
from .layers import ConstantDispersionLayer, ColWiseMultLayer
MeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
class Autoencoder():
def __init__(self,
input_size,
output_size=None,
hidden_size=(64, 32, 64),
hidden_dropout=0.,
input_dropout=0.,
batchnorm=True,
activation='relu',
init='glorot_uniform',
nonmissing_indicator = None,
debug = False):
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.hidden_dropout = hidden_dropout
self.input_dropout = input_dropout
self.batchnorm = batchnorm
self.activation = activation
self.init = init
self.loss = None
self.extra_models = {}
self.model = None
self.input_layer = None
self.sf_layer = None
self.debug = debug
self.nonmissing_indicator = nonmissing_indicator
if self.output_size is None:
self.output_size = input_size
if isinstance(self.hidden_dropout, list):
assert len(self.hidden_dropout) == len(self.hidden_size)
else:
self.hidden_dropout = [self.hidden_dropout]*len(self.hidden_size)
def build(self):
self.input_layer = Input(shape=(self.input_size,), name='count')
self.sf_layer = Input(shape=(1,), name='size_factors')
last_hidden = self.input_layer
if self.input_dropout > 0.0:
last_hidden = Dropout(self.input_dropout, name='input_dropout')(last_hidden)
for i, (hid_size, hid_drop) in enumerate(zip(self.hidden_size, self.hidden_dropout)):
center_idx = int(np.floor(len(self.hidden_size) / 2.0))
if i == center_idx:
layer_name = 'center'
stage = 'center' # let downstream know where we are
elif i < center_idx:
layer_name = 'enc%s' % i
stage = 'encoder'
else:
layer_name = 'dec%s' % (i-center_idx)
stage = 'decoder'
last_hidden = Dense(hid_size, activation=None, kernel_initializer=self.init,
name=layer_name)(last_hidden)
if self.batchnorm:
last_hidden = BatchNormalization(center=True, scale=False)(last_hidden)
### TODO: check why scale = False
last_hidden = Activation(self.activation, name='%s_act'%layer_name)(last_hidden)
if hid_drop > 0.0:
last_hidden = Dropout(hid_drop, name='%s_drop'%layer_name)(last_hidden)
self.decoder_output = last_hidden
self.build_output()
def build_output(self):
## For Gaussian loss
self.loss = mean_squared_error
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
name='mean')(self.decoder_output)
output = ColWiseMultLayer(name='output')([mean, self.sf_layer])
# keep unscaled output as an extra model
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)
######## ADD WEIGHTS ###########
def load_weights(self, filename):
self.model.load_weights(filename)
def predict(self, adata, colnames=None, dimreduce=True, reconstruct=True, error=True):
res = {}
colnames = adata.var_names.values if colnames is None else colnames
rownames = adata.obs_names.values
# print('Calculating reconstructions...')
res['mean_norm'] = self.extra_models['mean_norm'].predict(adata.X)
return res
class NBConstantDispAutoencoder(Autoencoder):
def build_output(self):
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
name='mean')(self.decoder_output)
# Plug in dispersion parameters via fake dispersion layer
disp = ConstantDispersionLayer(name='dispersion')
mean = disp(mean)
output = ColWiseMultLayer(name='output')([mean, self.sf_layer])
nb = NB(disp.theta_exp, nonmissing_indicator = self.nonmissing_indicator)
self.extra_models['dispersion'] = lambda :K.function([], [nb.theta])([])[0].squeeze()
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)
def predict(self, adata, colnames=None, **kwargs):
colnames = adata.var_names.values if colnames is None else colnames
rownames = adata.obs_names.values
res = super().predict(adata, colnames=colnames, **kwargs)
res['dispersion'] = self.extra_models['dispersion']()
return res
|
from flask_wtf import FlaskForm
from wtforms import (
StringField,
PasswordField,
SubmitField,
SelectMultipleField,
BooleanField,
)
try:
from wtforms.fields import EmailField
except ImportError:
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from seamm_datastore.database.models import User, Group
def _validate_group(self, field):
if Group.query.filter(Group.name == field.data).first():
raise ValidationError(
f"Group name '{field.data}' already in use. Please pick a different group "
"name."
)
def _validate_user_delete(self, field):
raise ValidationError("Input username does not match user ID.")
def _validate_group_delete(self, field):
raise ValidationError("Input group name does not match group ID.")
def _validate_username(self, field):
if User.query.filter(User.username == field.data).first():
raise ValidationError(
f"Username {field.data} already in use. Please pick a different username"
)
def _validate_email(self, field):
if User.query.filter(User.email == field.data).first():
raise ValidationError(
f"Email address {field.data} already in use. Please pick a different email "
"address."
)
def _password_none_or_usual(self, field):
"""
This validator is for the manage user form. Either the password is not changed
(len 0), or the password is changed and should meet the usual length requirement.
"""
if 0 < len(field.data) < 7:
raise ValidationError("Passwords must be at least 7 characters in length.")
# Common username field
_username = StringField(
"Username",
validators=[
_validate_username,
DataRequired(),
Length(3, 64),
Regexp(
"^[A-Za-z][A-Za-z0-9_.]*$",
0,
"Usernames must have only letters, numbers, dots or " "underscores",
),
],
)
class CreateUsernamePasswordForm(FlaskForm):
"""
A subform for creating a new username and password.
"""
username = _username
password2 = PasswordField("Confirm password", validators=[DataRequired()])
password = PasswordField(
"Password",
validators=[
DataRequired(),
Length(min=7),
EqualTo("password2", message="Passwords must match."),
],
)
class EditUsernamePasswordForm(FlaskForm):
"""
A subform for editing username and password.
"""
username = _username
password = PasswordField(
"Password",
validators=[
_password_none_or_usual,
EqualTo("password2", message="Passwords must match."),
],
)
password2 = PasswordField("Confirm Password")
class ContactInformationForm(FlaskForm):
"""
A form for adding or updating contact information.
"""
first_name = StringField("First Name", validators=[Length(2, 64)])
last_name = StringField("Last Name", validators=[Length(2, 64)])
email = EmailField(
"Email Address",
validators=[
DataRequired(),
Email(),
_validate_email,
],
)
class CreateUserForm(CreateUsernamePasswordForm, ContactInformationForm):
"""
Form for adding or updating a user
"""
roles = SelectMultipleField("User Roles", choices=[])
groups = SelectMultipleField("User Groups", choices=[])
submit = SubmitField("Create New User")
class ManageUserFormAdmin(EditUsernamePasswordForm, ContactInformationForm):
"""
Form for adding or updating a user
"""
roles = SelectMultipleField("User Roles", choices=[])
groups = SelectMultipleField("User Groups", choices=[])
submit = SubmitField("Update User Information")
class EditGroupForm(FlaskForm):
"""
Form for adding or editing a group
"""
group_name = StringField(
"Group Name", validators=[Length(2, 64), DataRequired(), _validate_group]
)
group_members = SelectMultipleField("Group Members", choices=[])
submit = SubmitField("Submit")
class DeleteUserForm(FlaskForm):
"""
Form for deleting a user.
"""
username = _username
confirm = BooleanField("Confirm")
submit = SubmitField("Delete User")
class DeleteGroupForm(FlaskForm):
"""
Form for deleting a user.
"""
group_name = StringField("Group Name", validators=[Length(2, 64), DataRequired()])
confirm = BooleanField("Confirm")
submit = SubmitField("Delete Group")
|
from collections import namedtuple
from .api import APIItems
# Represents a CIE 1931 XY coordinate pair.
XYPoint = namedtuple("XYPoint", ["x", "y"])
# Represents the Gamut of a light.
GamutType = namedtuple("GamutType", ["red", "green", "blue"])
class Lights(APIItems):
"""Represents Hue Lights.
https://developers.meethue.com/documentation/lights-api
"""
def __init__(self, logger, raw, v2_resources, request):
super().__init__(logger, raw, v2_resources, request, "lights", Light)
class Light:
"""Represents a Hue light."""
ITEM_TYPE = "lights"
def __init__(self, id, raw, v2_resources, request):
self.id = id
self.raw = raw
self._request = request
@property
def uniqueid(self):
return self.raw["uniqueid"]
@property
def manufacturername(self):
return self.raw["manufacturername"]
@property
def modelid(self):
return self.raw["modelid"]
@property
def productname(self):
# productname added in Bridge API 1.24 (published 03/05/2018)
return self.raw.get("productname")
@property
def name(self):
return self.raw["name"]
@property
def state(self):
return self.raw["state"]
@property
def type(self):
return self.raw["type"]
@property
def swversion(self):
"""Software version of the light."""
return self.raw["swversion"]
@property
def swupdatestate(self):
"""Software update state of the light."""
return self.raw.get("swupdate", {}).get("state")
@property
def controlcapabilities(self):
"""Capabilities that the light has to control it."""
return self.raw.get("capabilities", {}).get("control", {})
@property
def colorgamuttype(self):
"""The color gamut type of the light."""
light_spec = self.controlcapabilities
return light_spec.get("colorgamuttype", "None")
@property
def colorgamut(self):
"""The color gamut information of the light."""
try:
light_spec = self.controlcapabilities
gtup = tuple([XYPoint(*x) for x in light_spec["colorgamut"]])
color_gamut = GamutType(*gtup)
except KeyError:
color_gamut = None
return color_gamut
def process_update_event(self, update):
state = dict(self.state)
if color := update.get("color"):
state["xy"] = [color["xy"]["x"], color["xy"]["y"]]
if ct := update.get("color_temperature"):
state["ct"] = ct["mirek"]
if "on" in update:
state["on"] = update["on"]["on"]
if dimming := update.get("dimming"):
state["bri"] = int(dimming["brightness"] / 100 * 254)
state["reachable"] = True
self.raw = {**self.raw, "state": state}
async def set_state(
self,
on=None,
bri=None,
hue=None,
sat=None,
xy=None,
ct=None,
alert=None,
effect=None,
transitiontime=None,
bri_inc=None,
sat_inc=None,
hue_inc=None,
ct_inc=None,
xy_inc=None,
):
"""Change state of a light."""
data = {
key: value
for key, value in {
"on": on,
"bri": bri,
"hue": hue,
"sat": sat,
"xy": xy,
"ct": ct,
"alert": alert,
"effect": effect,
"transitiontime": transitiontime,
"bri_inc": bri_inc,
"sat_inc": sat_inc,
"hue_inc": hue_inc,
"ct_inc": ct_inc,
"xy_inc": xy_inc,
}.items()
if value is not None
}
await self._request("put", "lights/{}/state".format(self.id), json=data)
|
import app
gameApp = app.app()
gameApp.Run()
|
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Callable, Union
from lux.vis.Clause import Clause
from lux.utils.utils import check_import_lux_widget
import lux
import warnings
class Vis:
"""
Vis Object represents a collection of fully fleshed out specifications required for data fetching and visualization.
"""
def __init__(self, intent, source=None, title="", score=0.0):
self._intent = intent # user's original intent to Vis
self._inferred_intent = intent # re-written, expanded version of user's original intent
self._source = source # original data attached to the Vis
self._vis_data = None # processed data for Vis (e.g., selected, aggregated, binned)
self._code = None
self._mark = ""
self._min_max = {}
self._postbin = None
self.title = title
self.score = score
self.refresh_source(self._source)
def __repr__(self):
all_clause = all([isinstance(unit, lux.Clause) for unit in self._inferred_intent])
if all_clause:
filter_intents = None
channels, additional_channels = [], []
for clause in self._inferred_intent:
if hasattr(clause, "value"):
if clause.value != "":
filter_intents = clause
if hasattr(clause, "attribute"):
if clause.attribute != "":
if clause.aggregation != "" and clause.aggregation is not None:
attribute = f"{clause._aggregation_name.upper()}({clause.attribute})"
elif clause.bin_size > 0:
attribute = f"BIN({clause.attribute})"
else:
attribute = clause.attribute
if clause.channel == "x":
channels.insert(0, [clause.channel, attribute])
elif clause.channel == "y":
channels.insert(1, [clause.channel, attribute])
elif clause.channel != "":
additional_channels.append([clause.channel, attribute])
channels.extend(additional_channels)
str_channels = ""
for channel in channels:
str_channels += f"{channel[0]}: {channel[1]}, "
if filter_intents:
return f"<Vis ({str_channels[:-2]} -- [{filter_intents.attribute}{filter_intents.filter_op}{filter_intents.value}]) mark: {self._mark}, score: {self.score} >"
else:
return f"<Vis ({str_channels[:-2]}) mark: {self._mark}, score: {self.score} >"
else:
# When Vis not compiled (e.g., when self._source not populated), print original intent
return f"<Vis ({str(self._intent)}) mark: {self._mark}, score: {self.score} >"
@property
def data(self):
return self._vis_data
@property
def code(self):
return self._code
@property
def mark(self):
return self._mark
@property
def min_max(self):
return self._min_max
@property
def intent(self):
return self._intent
@intent.setter
def intent(self, intent: List[Clause]) -> None:
self.set_intent(intent)
def set_intent(self, intent: List[Clause]) -> None:
"""
Sets the intent of the Vis and refresh the source based on the new intent
Parameters
----------
intent : List[Clause]
Query specifying the desired VisList
"""
self._intent = intent
self.refresh_source(self._source)
def _repr_html_(self):
from IPython.display import display
check_import_lux_widget()
import luxwidget
if self.data is None:
raise Exception(
"No data is populated in Vis. In order to generate data required for the vis, use the 'refresh_source' function to populate the Vis with a data source (e.g., vis.refresh_source(df))."
)
else:
from lux.core.frame import LuxDataFrame
widget = luxwidget.LuxWidget(
currentVis=LuxDataFrame.current_vis_to_JSON([self]),
recommendations=[],
intent="",
message="",
)
display(widget)
def get_attr_by_attr_name(self, attr_name):
return list(filter(lambda x: x.attribute == attr_name, self._inferred_intent))
def get_attr_by_channel(self, channel):
spec_obj = list(
filter(
lambda x: x.channel == channel and x.value == "" if hasattr(x, "channel") else False,
self._inferred_intent,
)
)
return spec_obj
def get_attr_by_data_model(self, dmodel, exclude_record=False):
if exclude_record:
return list(
filter(
lambda x: x.data_model == dmodel and x.value == ""
if x.attribute != "Record" and hasattr(x, "data_model")
else False,
self._inferred_intent,
)
)
else:
return list(
filter(
lambda x: x.data_model == dmodel and x.value == ""
if hasattr(x, "data_model")
else False,
self._inferred_intent,
)
)
def get_attr_by_data_type(self, dtype):
return list(
filter(
lambda x: x.data_type == dtype and x.value == "" if hasattr(x, "data_type") else False,
self._inferred_intent,
)
)
def remove_filter_from_spec(self, value):
new_intent = list(filter(lambda x: x.value != value, self._inferred_intent))
self.set_intent(new_intent)
def remove_column_from_spec(self, attribute, remove_first: bool = False):
"""
Removes an attribute from the Vis's clause
Parameters
----------
attribute : str
attribute to be removed
remove_first : bool, optional
Boolean flag to determine whether to remove all instances of the attribute or only one (first) instance, by default False
"""
if not remove_first:
new_inferred = list(filter(lambda x: x.attribute != attribute, self._inferred_intent))
self._inferred_intent = new_inferred
self._intent = new_inferred
elif remove_first:
new_inferred = []
skip_check = False
for i in range(0, len(self._inferred_intent)):
if self._inferred_intent[i].value == "": # clause is type attribute
column_spec = []
column_names = self._inferred_intent[i].attribute
# if only one variable in a column, columnName results in a string and not a list so
# you need to differentiate the cases
if isinstance(column_names, list):
for column in column_names:
if (column != attribute) or skip_check:
column_spec.append(column)
elif remove_first:
remove_first = True
new_inferred.append(Clause(column_spec))
else:
if column_names != attribute or skip_check:
new_inferred.append(Clause(attribute=column_names))
elif remove_first:
skip_check = True
else:
new_inferred.append(self._inferred_intent[i])
self._intent = new_inferred
self._inferred_intent = new_inferred
def to_Altair(self, standalone=False) -> str:
"""
Generate minimal Altair code to visualize the Vis
Parameters
----------
standalone : bool, optional
Flag to determine if outputted code uses user-defined variable names or can be run independently, by default False
Returns
-------
str
String version of the Altair code. Need to print out the string to apply formatting.
"""
from lux.vislib.altair.AltairRenderer import AltairRenderer
renderer = AltairRenderer(output_type="Altair")
self._code = renderer.create_vis(self, standalone)
return self._code
def to_matplotlib(self) -> str:
"""
Generate minimal Matplotlib code to visualize the Vis
Returns
-------
str
String version of the Matplotlib code. Need to print out the string to apply formatting.
"""
from lux.vislib.matplotlib.MatplotlibRenderer import MatplotlibRenderer
renderer = MatplotlibRenderer(output_type="matplotlib")
self._code = renderer.create_vis(self)
return self._code
def to_matplotlib_code(self) -> str:
"""
Generate minimal Matplotlib code to visualize the Vis
Returns
-------
str
String version of the Matplotlib code. Need to print out the string to apply formatting.
"""
from lux.vislib.matplotlib.MatplotlibRenderer import MatplotlibRenderer
renderer = MatplotlibRenderer(output_type="matplotlib_code")
self._code = renderer.create_vis(self)
return self._code
def to_VegaLite(self, prettyOutput=True) -> Union[dict, str]:
"""
Generate minimal Vega-Lite code to visualize the Vis
Returns
-------
Union[dict,str]
String or Dictionary of the VegaLite JSON specification
"""
import json
from lux.vislib.altair.AltairRenderer import AltairRenderer
renderer = AltairRenderer(output_type="VegaLite")
self._code = renderer.create_vis(self)
if prettyOutput:
return (
"** Remove this comment -- Copy Text Below to Vega Editor(vega.github.io/editor) to visualize and edit **\n"
+ json.dumps(self._code, indent=2)
)
else:
return self._code
def to_code(self, language="vegalite", **kwargs):
"""
Export Vis object to code specification
Parameters
----------
language : str, optional
choice of target language to produce the visualization code in, by default "vegalite"
Returns
-------
spec:
visualization specification corresponding to the Vis object
"""
if language == "vegalite":
return self.to_VegaLite(**kwargs)
elif language == "altair":
return self.to_Altair(**kwargs)
elif language == "matplotlib":
return self.to_matplotlib()
elif language == "matplotlib_code":
return self.to_matplotlib_code()
else:
warnings.warn(
"Unsupported plotting backend. Lux currently only support 'altair', 'vegalite', or 'matplotlib'",
stacklevel=2,
)
def refresh_source(self, ldf): # -> Vis:
"""
Loading the source data into the Vis by instantiating the specification and
populating the Vis based on the source data, effectively "materializing" the Vis.
Parameters
----------
ldf : LuxDataframe
Input Dataframe to be attached to the Vis
Returns
-------
Vis
Complete Vis with fully-specified fields
See Also
--------
lux.Vis.VisList.refresh_source
Note
----
Function derives a new _inferred_intent by instantiating the intent specification on the new data
"""
if ldf is not None:
from lux.processor.Parser import Parser
from lux.processor.Validator import Validator
from lux.processor.Compiler import Compiler
self.check_not_vislist_intent()
ldf.maintain_metadata()
self._source = ldf
self._inferred_intent = Parser.parse(self._intent)
Validator.validate_intent(self._inferred_intent, ldf)
vlist = [Compiler.compile_vis(ldf, self)]
lux.config.executor.execute(vlist, ldf)
# Copying properties over since we can not redefine `self` within class function
if len(vlist) > 0:
vis = vlist[0]
self.title = vis.title
self._mark = vis._mark
self._inferred_intent = vis._inferred_intent
self._vis_data = vis.data
self._min_max = vis._min_max
self._postbin = vis._postbin
Compiler.compile_vis(ldf, self)
lux.config.executor.execute([self], ldf)
def check_not_vislist_intent(self):
syntaxMsg = (
"The intent that you specified corresponds to more than one visualization. "
"Please replace the Vis constructor with VisList to generate a list of visualizations. "
"For more information, see: https://lux-api.readthedocs.io/en/latest/source/guide/vis.html#working-with-collections-of-visualization-with-vislist"
)
for i in range(len(self._intent)):
clause = self._intent[i]
if isinstance(clause, str):
if "|" in clause or "?" in clause:
raise TypeError(syntaxMsg)
if isinstance(clause, list):
raise TypeError(syntaxMsg)
|
"""
Module: 'flowlib.m5mqtt' on M5 FlowUI v1.4.0-beta
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32')
# Stubber: 1.3.1 - updated
from typing import Any
class M5mqtt:
""""""
def _daemonTask(self, *argv) -> Any:
pass
def _msg_deal(self, *argv) -> Any:
pass
def _on_data(self, *argv) -> Any:
pass
def on_connect(self, *argv) -> Any:
pass
def publish(self, *argv) -> Any:
pass
def start(self, *argv) -> Any:
pass
def subscribe(self, *argv) -> Any:
pass
def unsubscribe(self, *argv) -> Any:
pass
class MQTTClient:
""""""
def _clean_sock_buffer(self, *argv) -> Any:
pass
def _recv_len(self, *argv) -> Any:
pass
def _send_str(self, *argv) -> Any:
pass
def check_msg(self, *argv) -> Any:
pass
def connect(self, *argv) -> Any:
pass
def disconnect(self, *argv) -> Any:
pass
def lock_msg_rec(self, *argv) -> Any:
pass
def ping(self, *argv) -> Any:
pass
def publish(self, *argv) -> Any:
pass
def set_block(self, *argv) -> Any:
pass
def set_callback(self, *argv) -> Any:
pass
def set_last_will(self, *argv) -> Any:
pass
def socket_connect(self, *argv) -> Any:
pass
def subscribe(self, *argv) -> Any:
pass
def topic_get(self, *argv) -> Any:
pass
def topic_msg_get(self, *argv) -> Any:
pass
def unlock_msg_rec(self, *argv) -> Any:
pass
def wait_msg(self, *argv) -> Any:
pass
_thread = None
def autoConnect():
pass
lcd = None
m5base = None
machine = None
def reconnect():
pass
time = None
wlan_sta = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.