text
stringlengths 2
999k
|
|---|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue717-v2"]
CONFIGS = [
IssueConfig(
"lama-first-original", [], driver_options=["--alias", "lama-first"])
] + [
IssueConfig(
"lama-first-new", [], driver_options=["--alias", "lama-first-new"])
] + [
IssueConfig(
"lama-original", [], driver_options=["--alias", "seq-sat-lama-2011"])
] + [
IssueConfig(
"lama-new", [], driver_options=["--alias", "seq-sat-lama-2011-new"])
]
SUITE = [
'airport', 'assembly', 'barman-sat11-strips', 'barman-sat14-strips',
'blocks', 'cavediving-14-adl', 'childsnack-sat14-strips',
'citycar-sat14-adl', 'depot', 'driverlog', 'elevators-sat08-strips',
'elevators-sat11-strips', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'openstacks', 'openstacks-sat08-adl',
'openstacks-sat08-strips', 'openstacks-sat11-strips',
'openstacks-sat14-strips', 'openstacks-strips', 'optical-telegraphs',
'parcprinter-08-strips', 'parcprinter-sat11-strips',
'parking-sat11-strips', 'parking-sat14-strips', 'pathways',
'pathways-noneg', 'pegsol-08-strips', 'pegsol-sat11-strips',
'philosophers', 'pipesworld-notankage', 'pipesworld-tankage',
'psr-large', 'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'sokoban-sat08-strips', 'sokoban-sat11-strips', 'storage',
'tetris-sat14-strips', 'thoughtful-sat14-strips',
'tidybot-sat11-strips', 'tpp', 'transport-sat08-strips',
'transport-sat11-strips', 'transport-sat14-strips', 'trucks',
'trucks-strips', 'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips', 'zenotravel']
ENVIRONMENT = MaiaEnvironment(
priority=0, email="cedric.geissmann@unibas.ch")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
algorithm_pairs = [
('issue717-v2-lama-first-original', 'issue717-v2-lama-first-new', 'Diff lama-first'),
('issue717-v2-lama-original', 'issue717-v2-lama-new', 'Diff lama')]
exp.add_report(ComparativeReport(
algorithm_pairs,
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES))
exp.add_scatter_plot_step(attributes=["total_time", "memory"])
exp.run_steps()
|
import os
#import numpy as np
import pyqmc.mc as mc
import sys
import h5py
import jax
import jax.numpy as jnp
import numpy as np
from functools import partial
def limdrift(g, tau, acyrus=0.25):
"""
Use Cyrus Umrigar's algorithm to limit the drift near nodes.
Args:
g: a [nconf,ndim] vector
tau: time step
acyrus: the maximum magnitude
Returns:
The vector with the cut off applied and multiplied by tau.
"""
tot = jnp.linalg.norm(g, axis=1) * acyrus
mask = tot > 1e-8
taueff = jnp.ones(tot.shape) * tau
taueff = jnp.where(
mask,
(jnp.sqrt(1 + 2 * tau * tot) - 1) / tot,
taueff
)
return g * taueff[:, jnp.newaxis]
def limdrift_cutoff(g, tau, cutoff=1):
"""
Limit a vector to have a maximum magnitude of cutoff while maintaining direction
Args:
g: a [nconf,ndim] vector
cutoff: the maximum magnitude
Returns:
The vector with the cut off applied and multiplied by tau.
"""
return mc.limdrift(g, cutoff) * tau
#@partial(jax.jit, static_argnums=(1,9,10,11))
def dmc_step(
key,
wf,
configs,
df,
weights,
tstep,
branchcut_start,
branchcut_stop,
eref,
accumulators,
ekey,
drift_limiter,
):
nconfig, nelec = configs.shape[0:2]
#wf.recompute(configs)
eloc = accumulators[ekey[0]](configs, wf)[ekey[1]].real
acc = jnp.zeros(nelec)
for e in range(nelec):
# Propose move
grad = drift_limiter(jnp.real(wf["gradient"](configs, e, configs[:, e]).T), tstep)
key, subkey = jax.random.split(key)
gauss = jax.random.normal(subkey, (nconfig, 3))*jnp.sqrt(tstep)
newepos = configs[:, e, :] + gauss + grad
#newepos = configs.make_irreducible(e, eposnew)
# Compute reverse move
new_grad = drift_limiter(jnp.real(wf["gradient"](configs, e, newepos).T), tstep)
forward = jnp.sum(gauss ** 2, axis=1)
backward = jnp.sum((gauss + grad + new_grad) ** 2, axis=1)
# forward = np.sum((configs[:, e, :] + grad - eposnew) ** 2, axis=1)
# backward = np.sum((eposnew + new_grad - configs[:, e, :]) ** 2, axis=1)
t_prob = jnp.exp(1 / (2 * tstep) * (forward - backward))
# Acceptance -- fixed-node: reject if wf changes sign
wfratio = wf["testvalue"](configs, e, newepos)
ratio = jnp.abs(wfratio) ** 2 * t_prob
if not wf["iscomplex"]:
ratio *= jnp.sign(wfratio)
key, subkey = jax.random.split(key)
accept = ratio > jax.random.uniform(subkey, (nconfig,))
# Update wave function
proposed = jax.ops.index_update(
configs,
jax.ops.index[:, e, :],
newepos
)
configs = jnp.where(accept[:, jnp.newaxis, jnp.newaxis], proposed, configs)
#wf.updateinternals(e, newepos, mask=accept)
acc = jax.ops.index_update(
acc,
e,
jnp.mean(accept)
)
# weights
energydat = accumulators[ekey[0]](configs, wf)
elocnew = energydat[ekey[1]].real
tdamp = limit_timestep(
weights, elocnew, eloc, eref, branchcut_start, branchcut_stop
)
wmult = jnp.exp(-tstep * 0.5 * tdamp * (eloc + elocnew - 2 * eref))
wmult = jnp.where(wmult > 2.0, 2.0, wmult)
weights *= wmult
wavg = jnp.mean(weights)
avg = {}
for k, accumulator in accumulators.items():
dat = accumulator(configs, wf) if k != ekey[0] else energydat
for m, res in dat.items():
avg[k + m] = jnp.einsum("...i,i...->...", weights, res) / (
nconfig * wavg
)
avg["weight"] = wavg
avg["acceptance"] = jnp.mean(acc)
df.append(avg)
return df, configs, weights
def dmc_propagate(
key,
wf,
configs,
weights,
tstep,
branchcut_start,
branchcut_stop,
eref,
nsteps=5,
accumulators=None,
ekey=("energy", "total"),
drift_limiter=limdrift,
):
"""
Propagate DMC without branching
Args:
wf: A Wave function-like class. recompute(), gradient(), and updateinternals() are used, as well as anything (such as laplacian() ) used by accumulators
configs: Configs object, (nconfig, nelec, 3) - initial coordinates to start calculation.
weights: (nconfig,) - initial weights to start calculation
tstep: Time step for move proposals. Introduces time step error.
nsteps: number of DMC steps to take
accumulators: A dictionary of functor objects that take in (coords,wf) and return a dictionary of quantities to be averaged. np.mean(quantity,axis=0) should give the average over configurations. If none, a default energy accumulator will be used.
ekey: tuple of strings; energy is needed for DMC weights. Access total energy by accumulators[ekey[0]](configs, wf)[ekey[1]
drift_limiter: a function that takes a gradient and a cutoff and returns an adjusted gradient
Returns: (df,coords,weights)
df: A list of dictionaries nstep long that contains all results from the accumulators.
coords: The final coordinates from this calculation.
weights: The final weights from this calculation
"""
assert accumulators is not None, "Need an energy accumulator for DMC"
df = []
for _ in range(nsteps):
key, subkey = jax.random.split(key)
df, configs, weights = dmc_step(
subkey,
wf,
configs,
df,
weights,
tstep,
branchcut_start,
branchcut_stop,
eref,
accumulators,
ekey,
drift_limiter,
)
df_ret = {}
weight = jnp.asarray([d["weight"] for d in df])
avg_weight = weight / jnp.mean(weight)
for k in df[0].keys():
df_ret[k] = jnp.mean(jnp.array([d[k] * w for d, w in zip(df, avg_weight)]), axis=0)
df_ret["weight"] = jnp.mean(weight)
return df_ret, configs, weights
def limit_timestep(weights, elocnew, elocold, eref, start, stop):
"""
Stabilizes weights by scaling down the effective tstep if the local energy is too far from eref.
Args:
weights: (nconfigs,) array
walker weights
elocnew: (nconfigs,) array
current local energy of each walker
elocold: (nconfigs,) array
previous local energy of each walker
eref: scalar
reference energy that fixes normalization
start: scalar
number of sigmas to start damping tstep
stop: scalar
number of sigmas where tstep becomes zero
Return:
tdamp: scalar
Damping factor to multiply timestep; always between 0 and 1. The damping factor is
1 if eref-eloc < branchcut_start*sigma,
0 if eref-eloc > branchcut_stop*sigma,
decreases linearly inbetween.
"""
# JAX does not like this kind of stuff!
#if start is None or stop is None:
# return 1
#assert (
# stop > start
#), "stabilize weights requires stop>start. Invalid stop={0}, start={1}".format(
# stop, start
#)
eloc = jnp.stack([elocnew, elocold])
fbet = jnp.amax(eref - eloc, axis=0)
return jnp.clip((1 - (fbet - start)) / (stop - start), 0, 1)
def branch(key, configs, weights):
"""
Perform branching on a set of walkers by stochastic reconfiguration
Walkers are resampled with probability proportional to the weights, and the new weights are all set to be equal to the average weight.
Args:
configs: (nconfig,nelec,3) walker coordinates
weights: (nconfig,) walker weights
Returns:
configs: resampled walker configurations
weights: (nconfig,) all weights are equal to average weight
"""
nconfig = configs.shape[0]
wtot = jnp.sum(weights)
probability = jnp.cumsum(weights / wtot)
key, subkey = jax.random.split(key)
base = jax.random.uniform(subkey)
newinds = jnp.searchsorted(probability, (base + jnp.arange(nconfig) / nconfig) % 1.0)
configs = configs[newinds]
weights = jnp.ones((nconfig, ))*wtot/nconfig
return configs, weights
def dmc_file(hdf_file, data, attr, configs, weights):
import pyqmc.hdftools as hdftools
npdata = jax.tree_util.tree_map(np.asarray, data)
if hdf_file is not None:
with h5py.File(hdf_file, "a") as hdf:
if "configs" not in hdf.keys():
hdftools.setup_hdf(hdf, npdata, attr)
hdf.create_dataset(
"configs",
configs.shape,
chunks=True,
maxshape=(None, *configs.shape[1:]),
)
if "weights" not in hdf.keys():
hdf.create_dataset("weights", weights.shape)
hdftools.append_hdf(hdf, npdata)
hdf["configs"].resize(configs.shape)
hdf["configs"][...] = configs
hdf["weights"][:] = weights
def rundmc(
key,
wf,
configs,
weights=None,
tstep=0.01,
nsteps=1000,
branchtime=5,
stepoffset=0,
branchcut_start=3,
branchcut_stop=6,
drift_limiter=limdrift,
verbose=False,
accumulators=None,
ekey=("energy", "total"),
propagate=dmc_propagate,
feedback=1.0,
hdf_file=None,
client=None,
npartitions=None,
**kwargs,
):
"""
Run DMC
Args:
wf: A Wave function-like class. recompute(), gradient(), and updateinternals() are used, as well as anything (such as laplacian() ) used by accumulators
configs: (nconfig, nelec, 3) - initial coordinates to start calculation.
weights: (nconfig,) - initial weights to start calculation, defaults to uniform.
nsteps: number of DMC steps to take
tstep: Time step for move proposals. Introduces time step error.
branchtime: number of steps to take between branching
accumulators: A dictionary of functor objects that take in (coords,wf) and return a dictionary of quantities to be averaged. np.mean(quantity,axis=0) should give the average over configurations. If none, a default energy accumulator will be used.
ekey: tuple of strings; energy is needed for DMC weights. Access total energy by accumulators[ekey[0]](configs, wf)[ekey[1]
verbose: Print out step information
drift_limiter: a function that takes a gradient and a cutoff and returns an adjusted gradient
stepoffset: If continuing a run, what to start the step numbering at.
Returns: (df,coords,weights)
df: A list of dictionaries nstep long that contains all results from the accumulators.
coords: The final coordinates from this calculation.
weights: The final weights from this calculation
"""
# Restart from HDF file
if hdf_file is not None and os.path.isfile(hdf_file):
with h5py.File(hdf_file, "r") as hdf:
stepoffset = hdf["step"][-1] + 1
configs.load_hdf(hdf)
weights = jnp.array(hdf["weights"])
eref = hdf["eref"][-1]
esigma = hdf["esigma"][-1]
if verbose:
print("Restarted calculation")
else:
warmup = 2
key, subkey = jax.random.split(key)
df, configs = mc.vmc(
subkey,
wf,
configs,
accumulators=accumulators,
client=client,
npartitions=npartitions,
verbose=verbose,
)
en = df[ekey[0] + ekey[1]][warmup:]
eref = jnp.mean(en).real
esigma = jnp.sqrt(jnp.var(en) * jnp.mean(df["nconfig"]))
if verbose:
print("eref start", eref, "esigma", esigma)
nconfig = configs.shape[0]
if weights is None:
weights = jnp.ones(nconfig)
npropagate = int(jnp.ceil(nsteps / branchtime))
df = []
for step in range(npropagate):
key, subkey = jax.random.split(key)
df_, configs, weights = dmc_propagate(
subkey,
wf,
configs,
weights,
tstep,
branchcut_start * esigma,
branchcut_stop * esigma,
eref=eref,
nsteps=branchtime,
accumulators=accumulators,
ekey=ekey,
drift_limiter=drift_limiter,
**kwargs,
)
df_["eref"] = eref
df_["step"] = step + stepoffset
df_["esigma"] = esigma
df_["tstep"] = tstep
df_["weight_std"] = jnp.std(weights)
df_["nsteps"] = branchtime
dmc_file(hdf_file, df_, {}, configs, weights)
# print(df_)
df.append(df_)
eref = df_[ekey[0] + ekey[1]] - feedback * jnp.log(jnp.mean(weights))
key, subkey = jax.random.split(key)
configs, weights = branch(subkey, configs, weights)
if verbose:
print(
"energy",
df_[ekey[0] + ekey[1]],
"eref",
df_["eref"],
"sigma(w)",
df_["weight_std"],
)
df_ret = {}
for k in df[0].keys():
df_ret[k] = jnp.asarray([d[k] for d in df])
return df_ret, configs, weights
|
# PRIMITIVE DATA TYPES
# str - string
# bool - boolean
# int - integer
# float
# COMPLEX DATA TYPES
# list
# dict
attendees = ['sara', 'alex', 'justin', 'ryan']
for attendee in attendees:
# print(attendee)
pass
# print(attendees[0])
# key = value
employees = {
'sara': 'csa',
'alex': 'it stupport tech',
'justin': 'software ninja',
'ryan': 'numbers nerd',
'robot': str(1)
}
# print(employees['alex'])
# print(employees.get('ilya', 'russian spy'))
# for employee_id in employees:
# print(employee_id + ' - ' + employees[employee_id])
# for key, value in employees.items():
# print(key + ' - ' + value)
# CASTING
# str + int = runtime exception
# SCOPE
# white space in Python defines scope
# block of code associated with a control structure
# def my_method():
# temp = 1
# print(temp)
# CONTROL STRUCTURES
# --for loops--
# for {variable_name} in <collection>:
# <action>
# --logical--
# if <bool>:
# pass
# elif <bool>:
# pass
# else <bool>:
# pass
# -- exception handling --
# try <expression>:
# <action>
# except [error_type]:
# <handle error>
# try:
# employees['iyla']
# except KeyError:
# print('call alex to add access')
# print('in exception')
# except Exception:
# print('here')
# else:
# print('else')
# --assignment--
# =
# --comparisons--
# == -> equals
# != -> not equals
# > -> greater than
# >= -> greater than equal
# < -> less than
# <= -> les than equal
"""
PRACTICE: print each letter in a given string
"""
name = 'justin'
# for char in name:
# print(char)
"""
PRACTICE: create a function that takes an input,
then prints each character of the input
"""
def print_char(input_name):
for char in input_name:
print(char)
# print_char(name)
"""
PRACTICE: create a function that takes two inputs,
then prints True/False whether or not the first input
is contained within the second input
"""
text_value = 'some input'
def search_string(search_value, text_value):
return search_value in text_value
print(search_string('a', text_value)) # False
print(search_string('s', text_value)) # True
print(search_string('S', text_value)) # False
|
from __future__ import absolute_import
try:
# use relative import for installed modules
from .vtkIOParallelXMLPython import *
except ImportError:
# during build and testing, the modules will be elsewhere,
# e.g. in lib directory or Release/Debug config directories
from vtkIOParallelXMLPython import *
|
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from .error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, bytes):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `bytes` object,
# - a `str` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, str):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+'\0'
elif isinstance(stream, bytes):
self.name = "<byte string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in '\n\x85\u2028\u2029' \
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
self.line += 1
self.column = 0
elif ch != '\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
#try:
# import psyco
# psyco.bind(Reader)
#except ImportError:
# pass
|
from rest_framework import viewsets, request
from rest_framework.response import Response
from rest_framework.decorators import action
from posthog.models import Event, Filter
from posthog.utils import request_to_date_query, dict_from_cursor_fetchall
from django.db.models import OuterRef
from django.db import connection
from typing import Optional
from django.db.models.expressions import Window
from django.db.models.functions import Lag
from django.db.models import F, Q
from django.db import connection
import json
# At the moment, paths don't support users changing distinct_ids midway through.
# See: https://github.com/PostHog/posthog/issues/185
class PathsViewSet(viewsets.ViewSet):
def _event_subquery(self, event: str, key: str):
return Event.objects.filter(pk=OuterRef(event)).values(key)[:1]
def _determine_path_type(self, request):
requested_type = request.GET.get('type', None)
# Default
event: Optional[str] = "$pageview"
event_filter = {"event":event}
path_type = "properties->> \'$current_url\'"
start_comparator = "{} ~".format(path_type)
# determine requested type
if requested_type:
if requested_type == "$screen":
event = "$screen"
event_filter = {"event":event}
path_type = "properties->> \'$screen_name\'"
start_comparator = "{} ~".format(path_type)
elif requested_type == "$autocapture":
event = "$autocapture"
event_filter = {"event":event}
path_type = "tag_name_source"
start_comparator = "group_id ="
elif requested_type == "custom_event":
event = None
event_filter = {}
path_type = "event"
start_comparator = "event ="
return event, path_type, event_filter, start_comparator
@action(methods=['GET'], detail=False)
def elements(self, request: request.Request):
team = request.user.team_set.get()
all_events = Event.objects.filter(team=team, event="$autocapture")
all_events_SQL, sql_params = all_events.query.sql_with_params()
elements_readble = '\
SELECT tag_name_source as name, group_id as id FROM (SELECT \'<\' || e."tag_name" || \'> \' || e."text" as tag_name_source, e."text" as text_source, e.group_id FROM "posthog_element" e\
JOIN ( SELECT group_id, MIN("posthog_element"."order") as minOrder FROM "posthog_element" GROUP BY group_id) e2 ON e.order = e2.minOrder AND e.group_id = e2.group_id) as element\
JOIN (SELECT id, hash, count FROM posthog_elementgroup as g JOIN (SELECT count(*), elements_hash from ({}) as a group by elements_hash) as e on g.hash = e.elements_hash) as outer_group ON element.group_id = outer_group.id where text_source <> \'\' order by count DESC limit 20\
'.format(all_events_SQL)
cursor = connection.cursor()
cursor.execute(elements_readble, sql_params)
rows = dict_from_cursor_fetchall(cursor)
return Response(rows)
def _apply_start_point(self, start_comparator: str, query_string: str, start_point:str) -> str:
marked = '\
SELECT *, CASE WHEN {} \'{}\' THEN timestamp ELSE NULL END as mark from ({}) as sessionified\
'.format(start_comparator, start_point, query_string)
marked_plus = '\
SELECT *, MIN(mark) OVER (\
PARTITION BY distinct_id\
, session ORDER BY timestamp\
) AS max from ({}) as marked order by session\
'.format(marked)
sessionified = '\
SELECT * FROM ({}) as something where timestamp >= max \
'.format(marked_plus)
return sessionified
def _add_elements(self, query_string: str) -> str:
element = 'SELECT \'<\'|| e."tag_name" || \'> \' || e."text" as tag_name_source, e."text" as text_source FROM "posthog_element" e JOIN \
( SELECT group_id, MIN("posthog_element"."order") as minOrder FROM "posthog_element" GROUP BY group_id) e2 ON e.order = e2.minOrder AND e.group_id = e2.group_id where e.group_id = v2.group_id'
element_group = 'SELECT g."id" as group_id FROM "posthog_elementgroup" g where v1."elements_hash" = g."hash"'
sessions_sql = 'SELECT * FROM ({}) as v1 JOIN LATERAL ({}) as v2 on true JOIN LATERAL ({}) as v3 on true'.format(query_string, element_group, element)
return sessions_sql
# FIXME: Timestamp is timezone aware timestamp, date range uses naive date.
# To avoid unexpected results should convert date range to timestamps with timezone.
def list(self, request):
team = request.user.team_set.get()
resp = []
date_query = request_to_date_query(request.GET, exact=False)
event, path_type, event_filter, start_comparator = self._determine_path_type(request)
properties = request.GET.get('properties')
start_point = request.GET.get('start')
sessions = Event.objects.add_person_id(team.pk).filter(
team=team,
**(event_filter),
**date_query
)\
.filter(~Q(event__in=['$autocapture', '$pageview', '$identify', '$pageleave']) if event is None else Q())\
.filter(Filter(data={'properties': json.loads(properties)}).properties_to_Q(team_id=team.pk) if properties else Q())\
.annotate(previous_timestamp=Window(
expression=Lag('timestamp', default=None),
partition_by=F('distinct_id'),
order_by=F('timestamp').asc()
))
sessions_sql, sessions_sql_params = sessions.query.sql_with_params()
if event == "$autocapture":
sessions_sql = self._add_elements(query_string=sessions_sql)
events_notated = '\
SELECT *, CASE WHEN EXTRACT(\'EPOCH\' FROM (timestamp - previous_timestamp)) >= (60 * 30) OR previous_timestamp IS NULL THEN 1 ELSE 0 END AS new_session\
FROM ({}) AS inner_sessions\
'.format(sessions_sql)
sessionified = '\
SELECT events_notated.*, SUM(new_session) OVER (\
ORDER BY distinct_id\
,timestamp\
) AS session\
FROM ({}) as events_notated\
'.format(events_notated)
if start_point:
sessionified = self._apply_start_point(start_comparator=start_comparator, query_string=sessionified, start_point=start_point)
final = '\
SELECT {} as path_type, id, sessionified.session\
,ROW_NUMBER() OVER (\
PARTITION BY distinct_id\
,session ORDER BY timestamp\
) AS event_number\
FROM ({}) as sessionified\
'.format(path_type, sessionified)
counts = '\
SELECT event_number || \'_\' || path_type as target_event, id as target_id, LAG(event_number || \'_\' || path_type, 1) OVER (\
PARTITION BY session\
) AS source_event , LAG(id, 1) OVER (\
PARTITION BY session\
) AS source_id from \
({}) as final\
where event_number <= 4\
'.format(final)
cursor = connection.cursor()
cursor.execute('\
SELECT source_event, target_event, MAX(target_id), MAX(source_id), count(*) from ({}) as counts\
where source_event is not null and target_event is not null\
group by source_event, target_event order by count desc limit 20\
'.format(counts), sessions_sql_params)
rows = cursor.fetchall()
for row in rows:
resp.append({
'source': row[0],
'target': row[1],
'target_id': row[2],
'source_id': row[3],
'value': row[4]
})
resp = sorted(resp, key=lambda x: x['value'], reverse=True)
return Response(resp)
|
"""Preprocessing of MALDI-TOF spectra."""
from .generic import SubsetPeaksTransformer
from .normalization import TotalIonCurrentNormalizer
from .normalization import ScaleNormalizer
from .topological import TopologicalPeakFiltering
__all__ = [
'ScaleNormalizer',
'SubsetPeaksTransformer',
'TopologicalPeakFiltering',
'TotalIonCurrentNormalizer'
]
|
# -*- coding: utf-8 -*-
"""Command line scripts to launch a `Q2rCalculation` for testing and demonstration purposes."""
from aiida.cmdline.params import options as options_core
from aiida.cmdline.params import types
from aiida.cmdline.utils import decorators
import click
from . import cmd_launch
from ..utils import launch, options
@cmd_launch.command('q2r')
@options_core.CODE(required=True, type=types.CodeParamType(entry_point='quantumespresso.q2r'))
@options_core.CALCULATION(required=True)
@options.MAX_NUM_MACHINES()
@options.MAX_WALLCLOCK_SECONDS()
@options.WITH_MPI()
@options.DAEMON()
@decorators.with_dbenv()
def launch_calculation(code, calculation, max_num_machines, max_wallclock_seconds, with_mpi, daemon):
"""Run a Q2rCalculation."""
from aiida.plugins import CalculationFactory
from aiida_quantumespresso.utils.resources import get_default_options
# Check that the parent calculation node comes from quantumespresso.ph.
# I cannot move this check into the option declaration, because CalcJobNode is not subclassed by the specific
# calculation plugins (only Process is), and there is no feature yet to filter by the associated process_type.
expected_process_type = 'aiida.calculations:quantumespresso.ph'
if calculation.process_type != expected_process_type:
raise click.BadParameter(
f'input calculation node has process_type: {calculation.process_type}; should be {expected_process_type}'
)
inputs = {
'code': code,
'parent_folder': calculation.outputs.remote_folder,
'metadata': {
'options': get_default_options(max_num_machines, max_wallclock_seconds, with_mpi),
}
}
launch.launch_process(CalculationFactory('quantumespresso.q2r'), daemon, **inputs)
|
# Copyright 2021 Christophe Bedard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from launch.actions import SetEnvironmentVariable
from tracetools_test.case import TraceTestCase
from tracetools_trace.tools import tracepoints as tp
class TestPubSub(TraceTestCase):
def __init__(self, *args) -> None:
super().__init__(
*args,
session_name_prefix='session-test-pub-sub',
events_ros=[
tp.rmw_publisher_init,
tp.rcl_publisher_init,
tp.rmw_publish,
tp.rcl_publish,
tp.rclcpp_publish,
tp.rmw_subscription_init,
tp.rcl_subscription_init,
tp.rclcpp_subscription_init,
tp.rclcpp_subscription_callback_added,
tp.callback_start,
tp.callback_end,
],
package='test_tracetools',
nodes=['test_ping', 'test_pong'],
# Need rmw_cyclonedds_cpp for the rmw instrumentation
additional_actions=SetEnvironmentVariable('RMW_IMPLEMENTATION', 'rmw_cyclonedds_cpp'),
)
def test_all(self):
# Check events as set
self.assertEventsSet(self._events_ros)
# Get publisher init events & publisher handles of test topics
rmw_pub_init_events = self.get_events_with_name(tp.rmw_publisher_init)
rmw_sub_init_events = self.get_events_with_name(tp.rmw_subscription_init)
publisher_init_events = self.get_events_with_name(tp.rcl_publisher_init)
ping_publisher_init_events = self.get_events_with_field_value(
'topic_name',
'/ping',
publisher_init_events,
)
pong_publisher_init_events = self.get_events_with_field_value(
'topic_name',
'/pong',
publisher_init_events,
)
self.assertNumEventsEqual(ping_publisher_init_events, 1)
self.assertNumEventsEqual(pong_publisher_init_events, 1)
ping_publisher_init_event = ping_publisher_init_events[0]
pong_publisher_init_event = pong_publisher_init_events[0]
ping_pub_handle = self.get_field(ping_publisher_init_event, 'publisher_handle')
ping_rmw_pub_handle = self.get_field(ping_publisher_init_event, 'rmw_publisher_handle')
pong_pub_handle = self.get_field(pong_publisher_init_event, 'publisher_handle')
pong_rmw_pub_handle = self.get_field(pong_publisher_init_event, 'rmw_publisher_handle')
# Find corresponding rmw_pub_init events
ping_rmw_pub_init_events = self.get_events_with_field_value(
'rmw_publisher_handle',
ping_rmw_pub_handle,
rmw_pub_init_events,
)
pong_rmw_pub_init_events = self.get_events_with_field_value(
'rmw_publisher_handle',
pong_rmw_pub_handle,
rmw_pub_init_events,
)
self.assertNumEventsEqual(ping_rmw_pub_init_events, 1)
self.assertNumEventsEqual(pong_rmw_pub_init_events, 1)
ping_rmw_pub_init_event = ping_rmw_pub_init_events[0]
pong_rmw_pub_init_event = pong_rmw_pub_init_events[0]
# Check publisher init order (rmw then rcl)
self.assertEventOrder([
ping_rmw_pub_init_event,
ping_publisher_init_event,
])
self.assertEventOrder([
pong_rmw_pub_init_event,
pong_publisher_init_event,
])
# Get corresponding rmw/rcl/rclcpp publish events for ping & pong
rcl_publish_events = self.get_events_with_name(tp.rcl_publish)
ping_rcl_pub_events = self.get_events_with_field_value(
'publisher_handle',
ping_pub_handle,
rcl_publish_events,
)
pong_rcl_pub_events = self.get_events_with_field_value(
'publisher_handle',
pong_pub_handle,
rcl_publish_events,
)
self.assertNumEventsEqual(ping_rcl_pub_events, 1)
self.assertNumEventsEqual(pong_rcl_pub_events, 1)
ping_rcl_pub_event = ping_rcl_pub_events[0]
pong_rcl_pub_event = pong_rcl_pub_events[0]
rclcpp_publish_events = self.get_events_with_name(tp.rclcpp_publish)
rmw_publish_events = self.get_events_with_name(tp.rmw_publish)
ping_pub_message = self.get_field(ping_rcl_pub_event, 'message')
pong_pub_message = self.get_field(pong_rcl_pub_event, 'message')
ping_rclcpp_pub_events = self.get_events_with_field_value(
'message',
ping_pub_message,
rclcpp_publish_events,
)
pong_rclcpp_pub_events = self.get_events_with_field_value(
'message',
pong_pub_message,
rclcpp_publish_events,
)
ping_rmw_pub_events = self.get_events_with_field_value(
'message',
ping_pub_message,
rmw_publish_events,
)
pong_rmw_pub_events = self.get_events_with_field_value(
'message',
pong_pub_message,
rmw_publish_events,
)
self.assertNumEventsEqual(ping_rclcpp_pub_events, 1)
self.assertNumEventsEqual(pong_rclcpp_pub_events, 1)
self.assertNumEventsEqual(ping_rmw_pub_events, 1)
self.assertNumEventsEqual(pong_rmw_pub_events, 1)
ping_rclcpp_pub_event = ping_rclcpp_pub_events[0]
pong_rclcpp_pub_event = pong_rclcpp_pub_events[0]
ping_rmw_pub_event = ping_rmw_pub_events[0]
pong_rmw_pub_event = pong_rmw_pub_events[0]
# Get subscription init events & subscription handles of test topics
rcl_subscription_init_events = self.get_events_with_name(tp.rcl_subscription_init)
ping_rcl_subscription_init_events = self.get_events_with_field_value(
'topic_name',
'/ping',
rcl_subscription_init_events,
)
pong_rcl_subscription_init_events = self.get_events_with_field_value(
'topic_name',
'/pong',
rcl_subscription_init_events,
)
self.assertNumEventsEqual(ping_rcl_subscription_init_events, 1)
self.assertNumEventsEqual(pong_rcl_subscription_init_events, 1)
ping_rcl_subscription_init_event = ping_rcl_subscription_init_events[0]
pong_rcl_subscription_init_event = pong_rcl_subscription_init_events[0]
ping_sub_handle = self.get_field(ping_rcl_subscription_init_event, 'subscription_handle')
ping_rmw_sub_handle = self.get_field(
ping_rcl_subscription_init_event, 'rmw_subscription_handle')
pong_sub_handle = self.get_field(pong_rcl_subscription_init_event, 'subscription_handle')
pong_rmw_sub_handle = self.get_field(
pong_rcl_subscription_init_event, 'rmw_subscription_handle')
# Find corresponding rmw_sub_init events
ping_rmw_sub_init_events = self.get_events_with_field_value(
'rmw_subscription_handle',
ping_rmw_sub_handle,
rmw_sub_init_events,
)
pong_rmw_sub_init_events = self.get_events_with_field_value(
'rmw_subscription_handle',
pong_rmw_sub_handle,
rmw_sub_init_events,
)
self.assertNumEventsEqual(ping_rmw_sub_init_events, 1)
self.assertNumEventsEqual(pong_rmw_sub_init_events, 1)
ping_rmw_sub_init_event = ping_rmw_sub_init_events[0]
pong_rmw_sub_init_event = pong_rmw_sub_init_events[0]
# Get corresponding subscription objects
rclcpp_subscription_init_events = self.get_events_with_name(
tp.rclcpp_subscription_init,
)
ping_rclcpp_subscription_init_events = self.get_events_with_field_value(
'subscription_handle',
ping_sub_handle,
rclcpp_subscription_init_events,
)
pong_rclcpp_subscription_init_events = self.get_events_with_field_value(
'subscription_handle',
pong_sub_handle,
rclcpp_subscription_init_events,
)
self.assertNumEventsEqual(ping_rclcpp_subscription_init_events, 1)
self.assertNumEventsEqual(pong_rclcpp_subscription_init_events, 1)
ping_rclcpp_subscription_init_event = ping_rclcpp_subscription_init_events[0]
pong_rclcpp_subscription_init_event = pong_rclcpp_subscription_init_events[0]
ping_sub_object = self.get_field(ping_rclcpp_subscription_init_event, 'subscription')
pong_sub_object = self.get_field(pong_rclcpp_subscription_init_event, 'subscription')
# Get corresponding subscription callback objects
rclcpp_subscription_callback_events = self.get_events_with_name(
tp.rclcpp_subscription_callback_added,
)
ping_rclcpp_subscription_callback_events = self.get_events_with_field_value(
'subscription',
ping_sub_object,
rclcpp_subscription_callback_events,
)
pong_rclcpp_subscription_callback_events = self.get_events_with_field_value(
'subscription',
pong_sub_object,
rclcpp_subscription_callback_events,
)
self.assertNumEventsEqual(ping_rclcpp_subscription_callback_events, 1)
self.assertNumEventsEqual(pong_rclcpp_subscription_callback_events, 1)
ping_rclcpp_subscription_callback_event = ping_rclcpp_subscription_callback_events[0]
pong_rclcpp_subscription_callback_event = pong_rclcpp_subscription_callback_events[0]
ping_callback_object = self.get_field(ping_rclcpp_subscription_callback_event, 'callback')
pong_callback_object = self.get_field(pong_rclcpp_subscription_callback_event, 'callback')
# Check subscription init order
self.assertEventOrder([
ping_rmw_sub_init_event,
ping_rcl_subscription_init_event,
ping_rclcpp_subscription_init_event,
ping_rclcpp_subscription_callback_event,
])
self.assertEventOrder([
pong_rmw_sub_init_event,
pong_rcl_subscription_init_event,
pong_rclcpp_subscription_init_event,
pong_rclcpp_subscription_callback_event,
])
# Get corresponding callback start/end events
callback_start_events = self.get_events_with_name(tp.callback_start)
callback_end_events = self.get_events_with_name(tp.callback_end)
ping_callback_start_events = self.get_events_with_field_value(
'callback',
ping_callback_object,
callback_start_events,
)
pong_callback_start_events = self.get_events_with_field_value(
'callback',
pong_callback_object,
callback_start_events,
)
ping_callback_end_events = self.get_events_with_field_value(
'callback',
ping_callback_object,
callback_end_events,
)
pong_callback_end_events = self.get_events_with_field_value(
'callback',
pong_callback_object,
callback_end_events,
)
self.assertNumEventsEqual(ping_callback_start_events, 1)
self.assertNumEventsEqual(pong_callback_start_events, 1)
self.assertNumEventsEqual(ping_callback_end_events, 1)
self.assertNumEventsEqual(pong_callback_end_events, 1)
ping_callback_start_event = ping_callback_start_events[0]
pong_callback_start_event = pong_callback_start_events[0]
ping_callback_end_event = ping_callback_end_events[0]
pong_callback_end_event = pong_callback_end_events[0]
# Check pub/sub order:
# * /ping pub rclcpp_publish
# * /ping pub rcl_publish
# * /ping pub rmw_publish
# * /ping sub callback_start
# * /pong pub rclcpp_publish
# * /pong pub rcl_publish
# * /pong pub rmw_publish
# ...
# * /ping sub callback_end
# ... we shouldn't necessarily expect the /pong callback to start
# before the /ping callback has ended
# * /pong sub callback_start
# * /pong sub callback_end
self.assertEventOrder([
ping_rclcpp_pub_event,
ping_rcl_pub_event,
ping_rmw_pub_event,
ping_callback_start_event,
pong_rclcpp_pub_event,
pong_rcl_pub_event,
pong_rmw_pub_event,
ping_callback_end_event,
])
self.assertEventOrder([
pong_rclcpp_pub_event,
pong_rcl_pub_event,
pong_rmw_pub_event,
pong_callback_start_event,
pong_callback_end_event,
])
if __name__ == '__main__':
unittest.main()
|
"""Test suite for our JSON utilities.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import json
from base64 import decodestring
# third party
import nose.tools as nt
# our own
from IPython.testing import decorators as dec
from ..jsonutil import json_clean, encode_images
from ..py3compat import unicode_to_str, str_to_bytes
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test():
# list of input/expected output. Use None for the expected output if it
# can be the same as the input.
pairs = [(1, None), # start with scalars
(1.0, None),
('a', None),
(True, None),
(False, None),
(None, None),
# complex numbers for now just go to strings, as otherwise they
# are unserializable
(1j, '1j'),
# Containers
([1, 2], None),
((1, 2), [1, 2]),
(set([1, 2]), [1, 2]),
(dict(x=1), None),
({'x': 1, 'y':[1,2,3], '1':'int'}, None),
# More exotic objects
((x for x in range(3)), [0, 1, 2]),
(iter([1, 2]), [1, 2]),
]
for val, jval in pairs:
if jval is None:
jval = val
out = json_clean(val)
# validate our cleanup
nt.assert_equal(out, jval)
# and ensure that what we return, indeed encodes cleanly
json.loads(json.dumps(out))
@dec.parametric
def test_encode_images():
# invalid data, but the header and footer are from real files
pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82'
jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9'
fmt = {
'image/png' : pngdata,
'image/jpeg' : jpegdata,
}
encoded = encode_images(fmt)
for key, value in fmt.iteritems():
# encoded has unicode, want bytes
decoded = decodestring(encoded[key].encode('ascii'))
yield nt.assert_equal(decoded, value)
encoded2 = encode_images(encoded)
yield nt.assert_equal(encoded, encoded2)
b64_str = {}
for key, encoded in encoded.iteritems():
b64_str[key] = unicode_to_str(encoded)
encoded3 = encode_images(b64_str)
yield nt.assert_equal(encoded3, b64_str)
for key, value in fmt.iteritems():
# encoded3 has str, want bytes
decoded = decodestring(str_to_bytes(encoded3[key]))
yield nt.assert_equal(decoded, value)
def test_lambda():
jc = json_clean(lambda : 1)
assert isinstance(jc, str)
assert '<lambda>' in jc
json.dumps(jc)
def test_exception():
bad_dicts = [{1:'number', '1':'string'},
{True:'bool', 'True':'string'},
]
for d in bad_dicts:
nt.assert_raises(ValueError, json_clean, d)
|
import requests
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import HTTPError
import re
import json
from base64 import b64encode
def get_playlists(spotify_url):
with open('MY_SECRETS.json', 'r') as f:
spotify_key = json.load(f)['SPOTIFY_KEY']
playlist_id = spotify_url.split('/')[-1].split('?')[0]
r = requests.get(f"https://api.spotify.com/v1/playlists/{playlist_id}", headers={'Authorization': f'Bearer {spotify_key}'})
if r.status_code == 400 or r.status_code == 401:
raise TypeError('Invalid Spotify Token')
returned_tracks = {}
playlist_name = r.json()['name']
r = requests.get(f"https://api.spotify.com/v1/playlists/{playlist_id}/tracks", headers={'Authorization': f'Bearer {spotify_key}'})
data = r.json()
tracks = data['items']
while data['next']:
r = requests.get(data['next'], headers={'Authorization': f'Bearer {spotify_key}'})
data = r.json()
tracks = tracks + data['items']
for track in tracks:
song_name = track['track']['name']
artists = []
for artist in track['track']['artists']:
artists.append(artist['name'])
artist_name = ' '.join(artists)
try:
query_string = urlencode({'search_query': artist_name + ' ' + song_name})
htm_content = urlopen('http://www.youtube.com/results?' + query_string)
search_results = re.findall(r'/watch\?v=(.{11})', htm_content.read().decode())
returned_tracks.update({f'{song_name}': f'http://www.youtube.com/watch?v={search_results[0]}'})
except HTTPError:
print(f'Couldn\'t download "{song_name}", continuing')
continue
return playlist_name, returned_tracks
def get_access_token():
with open('MY_SECRETS.json', 'r') as f:
load_file = json.load(f)
spotify_client_id = load_file['spotify_client_id']
spotify_client_secret = load_file['spotify_client_secret']
headers = {
'Authorization': f'Basic {b64encode(f"{spotify_client_id}:{spotify_client_secret}".encode()).decode()}',
}
data = {
'grant_type': 'client_credentials'
}
r = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
token = r.json()['access_token']
updated_dict = {
"spotify_client_id": f"{spotify_client_id}",
"spotify_client_secret": f"{spotify_client_secret}",
"SPOTIFY_KEY": token
}
with open('MY_SECRETS.json', 'w') as f:
json.dump(updated_dict, f)
|
# -*- coding: utf-8 -*-
from collective.solr.interfaces import ISolrConnectionManager
from collective.solr.interfaces import IZCMLSolrConnectionConfig
from collective.solr.local import getLocal
from collective.solr.local import setLocal
from collective.solr.solr import SolrConnection
from collective.solr.utils import getConfig
from collective.solr.utils import isActive
from six.moves.http_client import CannotSendRequest
from six.moves.http_client import ResponseNotReady
from logging import getLogger
from socket import error
from zope.component import queryUtility
from zope.interface import implementer
from plone.registry.interfaces import IRegistry
from zope.component import getUtility
import six
logger = getLogger("collective.solr.manager")
marker = object()
@implementer(IZCMLSolrConnectionConfig)
class ZCMLSolrConnectionConfig(object):
"""Connection values that can be configured through zcml"""
def __init__(self, host, port, base):
self.host = "%s:%d" % (host, port)
self.base = base
@implementer(ISolrConnectionManager)
class SolrConnectionManager(object):
""" a thread-local connection manager for solr """
lock = False
def __init__(self, active=None):
if isinstance(active, bool):
self.setHost(active=active)
def setHost(self, active=False, host="localhost", port=8983, base="/solr/plone"):
""" set connection parameters """
config = getConfig()
config.active = active
config.host = six.text_type(host)
config.port = port
config.base = six.text_type(base)
self.closeConnection(clearSchema=True)
def closeConnection(self, clearSchema=False):
""" close the current connection, if any """
logger.debug("closing connection")
conn = getLocal("connection")
if conn is not None:
conn.close()
setLocal("connection", None)
if clearSchema:
setLocal("schema", None)
def getConnection(self):
""" returns an existing connection or opens one """
if not isActive():
return None
conn = getLocal("connection")
if conn is not None:
return conn
zcmlconfig = queryUtility(IZCMLSolrConnectionConfig)
registry = getUtility(IRegistry)
config_host = registry["collective.solr.host"]
if zcmlconfig is not None:
# use connection parameters defined in zcml...
logger.debug("opening connection to %s", zcmlconfig.host)
conn = SolrConnection(
host=zcmlconfig.host, solrBase=zcmlconfig.base, persistent=True
)
setLocal("connection", conn)
elif config_host is not None:
# otherwise use connection parameters defined in control panel...
config_port = registry["collective.solr.port"]
config_base = registry["collective.solr.base"]
host = "%s:%d" % (config_host, config_port)
logger.debug("opening connection to %s", host)
conn = SolrConnection(host=host, solrBase=config_base, persistent=True)
setLocal("connection", conn)
return conn
def getSchema(self):
""" returns the currently used schema or fetches it """
schema = getLocal("schema")
if schema is None:
conn = self.getConnection()
if conn is not None:
logger.debug("getting schema from solr")
self.setSearchTimeout()
try:
schema = conn.get_schema()
setLocal("schema", schema)
except (error, CannotSendRequest, ResponseNotReady):
logger.exception("exception while getting schema")
return schema
def setTimeout(self, timeout, lock=marker):
""" set the timeout on the current (or to be opened) connection
to the given value """
update = not self.lock # update if not locked...
if lock is not marker:
self.lock = bool(lock)
update = True # ...or changed
logger.debug("%ssetting timeout lock", lock and "" or "re")
if update:
conn = self.getConnection()
if conn is not None:
logger.debug("setting timeout to %s", timeout)
conn.setTimeout(timeout)
def setIndexTimeout(self):
""" set the timeout on the current (or to be opened) connection
to the value specified for indexing operations """
registry = getUtility(IRegistry)
index_timeout = registry["collective.solr.index_timeout"]
self.setTimeout(index_timeout or None)
def setSearchTimeout(self):
""" set the timeout on the current (or to be opened) connection
to the value specified for search operations """
registry = getUtility(IRegistry)
search_timeout = registry["collective.solr.search_timeout"]
self.setTimeout(search_timeout or None)
|
import sys, os, shutil
import h5py
import time
import io
import random
import tempfile
from tqdm import tqdm
from absl import app, flags, logging
from ray.util.multiprocessing import Pool
import gcsfs
import numpy as np
from pathlib import Path
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
import torchtext
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn as nn
from transformers import BertTokenizer, BertModel, BertForSequenceClassification
import opacus
from privatekube.experiments.datasets import (
EventLevelDataset,
split_review_batch,
UserTimeLevelDataset,
select_blocks_by_timeframe,
)
from privatekube.experiments.utils import (
build_flags,
flags_to_dict,
load_yaml,
results_to_dict,
save_yaml,
save_model,
binary_accuracy,
multiclass_accuracy,
epoch_time,
)
from privatekube.privacy.text import build_public_vocab
from privatekube.privacy.rdp import (
compute_noise_from_target_epsilon,
ALPHAS,
compute_rdp_sgm,
)
import models
DEFAULT_DATA_PATH = Path(__file__).resolve().parent.parent.parent.joinpath("data")
# Define default args
dataset_args = {
"n_blocks": 200,
"max_text_len": 140,
"vocab_size": 10_000,
"n_blocks_test": 200,
}
input_path_args = {
"dataset_dir": "",
"dataset_monofile": "",
"block_counts": str(DEFAULT_DATA_PATH.joinpath("block_counts.yaml")),
"emb_path": str(DEFAULT_DATA_PATH.joinpath(".vector_cache")),
}
model_args = {
"task": "product",
"model": "bow",
"embedding_dim": 100,
"hidden_dim_1": 240,
"hidden_dim_2": 195,
"hidden_dim": 100,
"dropout": 0.25,
}
training_args = {
"device": "cuda",
"learning_rate": 0.01,
"dp": 0,
"dp_eval": 0,
"user_level": 0,
"epsilon": 5.0,
"delta": 1e-5,
"n_epochs": 15,
"batch_size": 64,
"virtual_batch_multiplier": 2,
"adaptive_batch_size": 1,
"noise": -1.0,
"timeframe_days": 0,
"learning_rate_scheduler": 1,
"dynamic_clipping": 0,
"max_grad_norm": 1.0,
"per_layer_clipping": 0,
"n_workers": 6,
"non_dp_batch_size": 256,
}
output_args = {
"log_path": "",
"model_path": "",
"metrics_path": "",
}
build_flags(dataset_args, model_args, training_args, input_path_args, output_args)
FLAGS = flags.FLAGS
np.random.seed(0)
def build_split_dataset():
block_dir = tempfile.mkdtemp()
test_block_dir = tempfile.mkdtemp()
if FLAGS.dataset_dir[0:5] == "gs://":
os.system(
"gcloud auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS"
)
fs = gcsfs.GCSFileSystem(
project=os.get_env("GCP_PROJECT"), token="google_default"
) # Get the local Gcloud token
logging.info("Listing bucket files.")
all_blocks = list(
map(
lambda blob: os.path.basename(blob["name"]),
fs.listdir(FLAGS.dataset_dir),
)
)
logging.info(f"Got {len(all_blocks)} blocks.")
logging.warning(f"The evaluation set is not fixed.")
elif FLAGS.dataset_dir == "":
logging.info("Listing the block names.")
all_blocks = list(load_yaml(FLAGS.block_counts).keys())
else:
all_blocks = os.listdir(FLAGS.dataset_dir)
logging.info(f"Selecting {FLAGS.n_blocks_test} test blocks (fixed randomness).")
test_blocks = np.random.choice(all_blocks, FLAGS.n_blocks_test, replace=False)
for tb in test_blocks:
all_blocks.remove(tb)
# Use every user to the maximum.
def sort_by_user(block_name):
if block_name.endswith(".h5"):
block_name = block_name[: -len(".h5")]
name = block_name.split("-")
user_slice = int(name[1])
return user_slice
logging.info(
f"Selecting as few users as possible.\n Pseudorandom and deterministic (hashed user ids)."
)
selected_blocks = sorted(all_blocks, key=sort_by_user)[0 : FLAGS.n_blocks]
if FLAGS.dataset_dir[0:5] == "gs://":
pool = Pool()
bucket_path = FLAGS.dataset_dir
def download_datasource(block_name):
block_path = os.path.join(bucket_path, block_name)
dest = os.path.join(block_dir, block_name)
os.system(f"gsutil cp {block_path} {dest}")
return
logging.warning("Downloading the blocks in parallel.")
b = pool.map(download_datasource, selected_blocks)
pool.close()
pool.join()
block_names = None
test_block_names = None
elif FLAGS.dataset_dir == "":
block_dir = None
test_block_dir = None
block_names = selected_blocks
test_block_names = test_blocks
else:
for b in selected_blocks:
os.symlink(os.path.join(FLAGS.dataset_dir, b), os.path.join(block_dir, b))
for b in test_blocks:
os.symlink(
os.path.join(FLAGS.dataset_dir, b), os.path.join(test_block_dir, b)
)
block_names = None
test_block_names = None
# Store for the logs
FLAGS.dataset_dir = block_dir
if not FLAGS.dataset_monofile:
if FLAGS.model == "bert":
from_h5 = DEFAULT_DATA_PATH.joinpath("reviews.h5")
else:
from_h5 = DEFAULT_DATA_PATH.joinpath("reviews_custom_vocab.h5")
else:
from_h5 = FLAGS.dataset_monofile
if FLAGS.dp and FLAGS.user_level:
train_data = UserTimeLevelDataset(
blocks_dir=block_dir,
timeframe=FLAGS.timeframe_days * 86400,
from_h5=from_h5,
block_names=block_names,
)
else:
train_data = EventLevelDataset(
blocks_dir=block_dir,
from_h5=from_h5,
block_names=block_names,
)
test_data = EventLevelDataset(
blocks_dir=test_block_dir,
from_h5=from_h5,
block_names=test_block_names,
)
test_data, valid_data = test_data.split([0.75, 0.25])
logging.info(f"Test size: {len(test_data)}\n Valid size: {len(valid_data)}")
# Values from the preprocessing
# (max text len doesn't matter here)
text_field = torchtext.data.Field(
batch_first=True,
use_vocab=True,
init_token="<bos>",
eos_token="<eos>",
pad_token="<pad>",
unk_token="<unk>",
include_lengths=True,
)
build_public_vocab(
text_field,
max_size=FLAGS.vocab_size - 4,
vectors=f"glove.6B.{FLAGS.embedding_dim}d",
unk_init=torch.Tensor.normal_,
vectors_cache=FLAGS.emb_path,
)
return train_data, test_data, valid_data, text_field
def compute_optimal_batch_size(real_batch_size, dataset_len):
logging.info(
f"Computing the optimal batch size. Dataset {dataset_len}, real batch {real_batch_size}"
)
# Under approximate
optimal_batch_size = int(np.sqrt(dataset_len))
if optimal_batch_size <= real_batch_size:
return optimal_batch_size, 0
else:
return (real_batch_size, optimal_batch_size // real_batch_size)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def build_model(text_field):
INPUT_DIM = len(text_field.vocab)
word_embeddings = text_field.vocab.vectors
PAD_IDX = text_field.vocab.stoi[text_field.pad_token]
UNK_IDX = text_field.vocab.stoi[text_field.unk_token]
if FLAGS.task == "sentiment":
output_dim = 1
elif FLAGS.task == "product":
output_dim = 11
if FLAGS.model == "lstm":
model = models.LSTMClassifier(
batch_size=FLAGS.batch_size,
output_size=output_dim,
hidden_size=FLAGS.hidden_dim,
vocab_size=INPUT_DIM,
embedding_length=FLAGS.embedding_dim,
weights=word_embeddings,
dropout=FLAGS.dropout,
dp=FLAGS.dp,
)
elif FLAGS.model == "bow":
model = models.NBOW(
input_dim=word_embeddings.shape[0],
emb_dim=FLAGS.embedding_dim,
output_dim=output_dim,
pad_idx=PAD_IDX,
word_embeddings=word_embeddings,
)
elif FLAGS.model == "feedforward":
model = models.FeedforwardModel(
vocab_size=INPUT_DIM,
embedding_dim=FLAGS.embedding_dim,
pad_idx=PAD_IDX,
H_1=FLAGS.hidden_dim_1,
H_2=FLAGS.hidden_dim_2,
D_out=output_dim,
word_embeddings=word_embeddings,
)
elif FLAGS.model == "bert":
# The dataset has been preprocessed with the bert tokenizer, so the indices should be correct
logging.info(f"Pad and unk index {PAD_IDX, UNK_IDX}")
model = models.FineTunedBert.build_new(output_dim=output_dim)
logging.info(
f"Model {FLAGS.model} has {count_parameters(model)} trainable parameters."
)
# Bert has its own pretrained embeddings
return model
pretrained_embeddings = text_field.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
model.embedding.weight.data[UNK_IDX] = torch.zeros(FLAGS.embedding_dim)
model.embedding.weight.data[PAD_IDX] = torch.zeros(FLAGS.embedding_dim)
logging.info(
f"Model {FLAGS.model} has {count_parameters(model)} trainable parameters."
)
return model
def train(model, iterator, optimizer, criterion, accuracy_fn):
epoch_loss = 0
epoch_acc = 0
model.train()
optimizer.zero_grad()
for i, batch in enumerate(tqdm(iterator)):
# batch = batch.to(FLAGS.device)
if FLAGS.task == "sentiment":
data, label = split_review_batch(
batch,
label_feature="binary_rating",
max_text_len=FLAGS.max_text_len,
include_len=True,
vocab_size=FLAGS.vocab_size,
custom_vocab=(FLAGS.model != "bert"),
)
text_lengths, text = data
elif FLAGS.task == "product":
text, label = split_review_batch(
batch,
label_feature="category",
max_text_len=FLAGS.max_text_len,
vocab_size=FLAGS.vocab_size,
custom_vocab=(FLAGS.model != "bert"),
)
text = text.to(device=FLAGS.device, dtype=torch.long)
label = (
label.to(device=FLAGS.device, dtype=torch.long)
if FLAGS.task == "product"
else label.to(device=FLAGS.device, dtype=torch.float)
)
if FLAGS.model == "lstm":
hidden = model.init_hidden(batch_size=len(batch))
if isinstance(hidden, tuple):
hidden = (
hidden[0].to(FLAGS.device),
hidden[1].to(FLAGS.device),
)
else:
hidden = hidden.to(FLAGS.device)
outputs = model(text, hidden)
elif FLAGS.model == "bert":
PAD_IDX = 0
inputs = {
"input_ids": text,
"labels": label,
"attention_mask": torch.where(
text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)
),
}
# logging.info(f"Inputs {inputs}")
# The model outputs loss, logits
outputs = model(**inputs)[1]
# logging.info(f"Outputs {outputs}")
else:
outputs = model(text)
# logging.info(f"Outputs {outputs}")
if FLAGS.task == "sentiment":
outputs = outputs.squeeze(1)
loss = criterion(outputs, label)
acc = accuracy_fn(outputs.detach(), label)
loss.backward()
if FLAGS.dp and FLAGS.virtual_batch_multiplier > 1:
# NOTE: step is not called at every minibatch, so the RDP accountant need to know this
if (i + 1) % FLAGS.virtual_batch_multiplier == 0 or (i + 1) == len(
iterator
):
# For the (virtual_batch_multiplier)th batch, call a clip-noise-step
optimizer.step()
optimizer.zero_grad()
else:
# For the first (virtual_batch_multiplier - 1) batches, just accumulate the gradients
optimizer.virtual_step()
else:
# Regular optimizer step (either non-DP or DP with no virtual step)
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.item()
# epoch_loss += loss.detach().item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion, accuracy_fn):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
# batch = batch.to(FLAGS.device)
if FLAGS.task == "sentiment":
data, label = split_review_batch(
batch,
label_feature="binary_rating",
max_text_len=FLAGS.max_text_len,
include_len=True,
vocab_size=FLAGS.vocab_size,
custom_vocab=(FLAGS.model != "bert"),
)
text_lengths, text = data
elif FLAGS.task == "product":
text, label = split_review_batch(
batch,
label_feature="category",
max_text_len=FLAGS.max_text_len,
vocab_size=FLAGS.vocab_size,
custom_vocab=(FLAGS.model != "bert"),
)
text = text.to(device=FLAGS.device, dtype=torch.long)
label = (
label.to(device=FLAGS.device, dtype=torch.long)
if FLAGS.task == "product"
else label.to(device=FLAGS.device, dtype=torch.float)
)
if FLAGS.model == "lstm":
hidden = model.init_hidden(batch_size=len(batch))
if isinstance(hidden, tuple):
hidden = (
hidden[0].to(FLAGS.device),
hidden[1].to(FLAGS.device),
)
else:
hidden = hidden.to(FLAGS.device)
outputs = model(text, hidden)
elif FLAGS.model == "bert":
PAD_IDX = 0
inputs = {
"input_ids": text,
"labels": label,
"attention_mask": torch.where(
text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)
),
}
outputs = model(**inputs)[1]
else:
outputs = model(text)
if FLAGS.task == "sentiment":
outputs = outputs.squeeze(1)
# print(f"Training. Outputs: {outputs}, labels: {batch.label}")
loss = criterion(outputs, label)
acc = accuracy_fn(outputs, label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def train_validate(
train_data, valid_data, model, optimizer, criterion, accuracy_fn, scheduler
):
validation_accuracy_epochs = []
validation_loss_epochs = []
training_loss_epochs = []
training_accuracy_epochs = []
logging.info(f"n workers: {FLAGS.n_workers}")
train_iterator = torch.utils.data.DataLoader(
train_data,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.n_workers,
drop_last=True,
)
valid_iterator = torch.utils.data.DataLoader(
valid_data,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.n_workers,
drop_last=False,
)
criterion = criterion.to(FLAGS.device)
best_valid_loss = float("inf")
for epoch in range(FLAGS.n_epochs):
start_time = time.time()
logging.info(f"Starting epoch {epoch + 1}.")
train_loss, train_acc = train(
model, train_iterator, optimizer, criterion, accuracy_fn
)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion, accuracy_fn)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), "tut2-model.pt")
logging.info(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
logging.info(
f"\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%"
)
scheduler.step(train_loss)
logging.info(
f"\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%"
)
validation_accuracy_epochs.append(valid_acc)
validation_loss_epochs.append(valid_loss)
training_loss_epochs.append(train_loss)
training_accuracy_epochs.append(train_acc)
return (
training_loss_epochs,
training_accuracy_epochs,
validation_loss_epochs,
validation_accuracy_epochs,
)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def main(argv):
start_time = time.time()
# Convert flags for the epsilon = -1 shortcut
if FLAGS.dp and FLAGS.epsilon < 0 and FLAGS.noise < 0:
FLAGS.dp = False
# No multiprocessing for large datasets (save RAM)
if FLAGS.n_blocks > 50_000:
logging.info(f"Large dataset, we use a single thread for the loader.")
FLAGS.n_workers = 0
# Build the dataset, either event level or user level
train_data, test_data, valid_data, text_field = build_split_dataset()
logging.info(
f"Number of samples for training: {len(train_data)}, validation: {len(valid_data)} and testing: {len(test_data)}"
)
# Adapt the batch size and the virtual step size, unless it has been specified manually
if FLAGS.dp and FLAGS.adaptive_batch_size and FLAGS.virtual_batch_multiplier <= 0:
FLAGS.batch_size, FLAGS.virtual_batch_multiplier = compute_optimal_batch_size(
FLAGS.batch_size, len(train_data)
)
logging.info(
f"Using real batch {FLAGS.batch_size} with multiplier {FLAGS.virtual_batch_multiplier}"
)
if not FLAGS.dp:
FLAGS.batch_size = FLAGS.non_dp_batch_size
# Prepare the model and optimizer
model = build_model(text_field).to(FLAGS.device)
logging.info(f"Number of trainable parameters: {count_parameters(model)}")
# optimizer = optim.Adam(model.parameters())
optimizer = optim.AdamW(model.parameters(), lr=FLAGS.learning_rate, eps=1e-8)
scheduler = ReduceLROnPlateau(optimizer, mode="min", patience=3)
# train_it = torch.utils.data.DataLoader(
# train_data,
# batch_size=2048,
# shuffle=False,
# num_workers=FLAGS.n_workers,
# drop_last=False,
# )
# counts = {}
# for i in range(11):
# counts[i] = 0
# for b in train_it:
# for cat in b[:, 3]:
# counts[int(cat)] += 1
# s = sum(counts.values())
# for cat, count in counts.items():
# counts[cat] = count / s
# logging.info(counts)
if FLAGS.task == "sentiment":
criterion = nn.BCEWithLogitsLoss().to(FLAGS.device)
accuracy_fn = binary_accuracy
# automotive: 0.03036145803296712
# books: 0.41258122723567553
# cds: 0.012897189083383703
# clothing: 0.2025265712144095
# games: 0.031613111956201506
# groceries: 0.01949595483554337
# home: 0.119920985593197
# movies: 0.0484712255807162
# pets: 0.03665525816121956
# sports: 0.04961580907019007
# tools: 0.035861209236496445
elif FLAGS.task == "product":
# criterion = nn.CrossEntropyLoss(
# weight=torch.Tensor(
# [0.05, 0.035, 0.03, 0.035, 0.05, 0.02, 0.12, 0.01, 0.03, 0.20, 0.41]
# )
# )
criterion = nn.CrossEntropyLoss()
accuracy_fn = multiclass_accuracy
# Plug Opacus if DP training is activated
if FLAGS.dp:
if FLAGS.noise >= 0:
logging.info(f"User-provided noise: {FLAGS.noise}.")
else:
logging.info("Computing noise for the given parameters.")
FLAGS.noise = compute_noise_from_target_epsilon(
target_epsilon=FLAGS.epsilon,
target_delta=FLAGS.delta,
epochs=FLAGS.n_epochs,
batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier
if FLAGS.virtual_batch_multiplier > 0
else FLAGS.batch_size,
dataset_size=len(train_data),
alphas=ALPHAS,
)
logging.info(f"Noise computed from RDP budget: {FLAGS.noise}.")
# NOTE: when user-level DP is activated, the training dataset __len__ method returns
# the number of users, and the DataLoader calls the batch-of-user method that overrides
# the regular __getitem__ method
# WARNING: fishy non-DP adaptive clipping
privacy_engine = opacus.PrivacyEngine(
module=model,
batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier
if FLAGS.virtual_batch_multiplier > 0
else FLAGS.batch_size,
sample_size=len(train_data),
alphas=ALPHAS,
noise_multiplier=FLAGS.noise,
max_grad_norm=FLAGS.max_grad_norm,
experimental=bool(FLAGS.dynamic_clipping),
clipping_method=FLAGS.dynamic_clipping,
clip_per_layer=bool(FLAGS.per_layer_clipping),
)
privacy_engine.attach(optimizer)
# Do the actual training
t = time.time()
(
training_loss_epochs,
training_accuracy_epochs,
validation_loss_epochs,
validation_accuracy_epochs,
) = train_validate(
train_data, valid_data, model, optimizer, criterion, accuracy_fn, scheduler
)
training_time = time.time() - t
if FLAGS.dp:
epsilon_consumed, best_alpha = optimizer.privacy_engine.get_privacy_spent(
FLAGS.delta
)
epsilon_consumed = float(epsilon_consumed)
best_alpha = float(best_alpha)
logging.info(f"Best alpha: {best_alpha}")
rdp_epsilons_consumed = (
optimizer.privacy_engine.get_renyi_divergence()
* optimizer.privacy_engine.steps
).tolist()
logging.info(f"RDP budget consumed: {rdp_epsilons_consumed} for orders.")
# Identical to planned budget when we don't have early stopping
# rdp_epsilon_planned = compute_rdp_sgm(
# epochs=FLAGS.n_epochs,
# batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier
# if FLAGS.virtual_batch_multiplier > 0
# else FLAGS.batch_size,
# dataset_size=len(train_data),
# noise=FLAGS.noise,
# alphas=ALPHAS,
# )
# logging.info(f"Planned RDP budget: {rdp_epsilon_planned}")
else:
epsilon_consumed = None
rdp_epsilons_consumed = None
best_alpha = None
# Evaluate the model (non-DP evaluation here)
testing_size = len(test_data)
test_iterator = torch.utils.data.DataLoader(
test_data,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.n_workers,
drop_last=False,
)
final_loss, final_accuracy = evaluate(model, test_iterator, criterion, accuracy_fn)
# Collect the metrics and the logs
logs = {
"training_time": training_time,
"total_time": time.time() - start_time,
"test_size": testing_size,
"n_trainable_parameters": count_parameters(model),
}
# Update the logs with the training data
if isinstance(train_data, UserTimeLevelDataset):
logs["train_size"] = train_data.get_n_events()
logs["n_train_users"] = len(train_data)
else:
logs["train_size"] = len(train_data)
logs.update(
flags_to_dict(dataset_args, model_args, training_args)
) # Dump the configuration flags
metrics = {
"accuracy": final_accuracy,
"training_loss_epochs": training_loss_epochs,
"training_accuracy_epochs": training_accuracy_epochs,
"validation_loss_epochs": validation_loss_epochs,
"validation_accuracy_epochs": validation_accuracy_epochs,
"loss": final_loss,
"epsilon": epsilon_consumed,
"target_epsilon": FLAGS.epsilon,
"alphas": ALPHAS,
"rdp_epsilons": rdp_epsilons_consumed,
"best_alpha": best_alpha,
# "dataset_files": os.listdir(FLAGS.dataset_dir),
}
# Save or logging.info the outputs
# Useless to separate for our experiments
if FLAGS.metrics_path != "":
save_yaml(FLAGS.metrics_path, metrics)
logging.info(f"Saved metrics: {FLAGS.metrics_path}")
else:
logging.info("Metrics not saved but concatenated to the logs.")
logs.update(metrics)
if FLAGS.log_path != "":
save_yaml(FLAGS.log_path, logs)
logging.info(f"Saved logs: {FLAGS.log_path}")
if FLAGS.model_path != "":
save_model(FLAGS.model_path, model)
logging.info(f"Saved model: {FLAGS.model_path}")
logging.info(logs)
logging.info(metrics)
if __name__ == "__main__":
app.run(main)
|
"""
Provides a cross-platform way to figure out the system uptime.
Should work on damned near any operating system you can realistically expect
to be asked to write Python code for.
If this module is invoked as a stand-alone script, it will print the current
uptime in a human-readable format, or display an error message if it can't,
to standard output.
This file was forked from the uptime project: https://github.com/Cairnarvon/uptime
Copyright (c) 2012, Koen Crolla, All rights reserved.
"""
import os
import sys
import time
import ctypes
import struct
import typing as tp
import xonsh.platform as xp
import xonsh.lazyimps as xlimps
import xonsh.lazyasd as xl
_BOOTTIME: tp.Optional[float] = None
def _uptime_osx():
"""Returns the uptime on mac / darwin."""
global _BOOTTIME
bt = xlimps.macutils.sysctlbyname(b"kern.boottime", return_str=False)
if len(bt) == 4:
bt = struct.unpack_from("@hh", bt)
elif len(bt) == 8:
bt = struct.unpack_from("@ii", bt)
elif len(bt) == 16:
bt = struct.unpack_from("@qq", bt)
else:
raise ValueError("length of boot time not understood: " + repr(bt))
bt = bt[0] + bt[1] * 1e-6
if bt == 0.0:
return None
_BOOTTIME = bt
return time.time() - bt
def _uptime_linux():
"""Returns uptime in seconds or None, on Linux."""
# With procfs
try:
with open("/proc/uptime") as f:
up = float(f.readline().split()[0])
return up
except (OSError, ValueError):
pass
buf = ctypes.create_string_buffer(128) # 64 suffices on 32-bit, whatever.
if xp.LIBC.sysinfo(buf) < 0:
return None
up = struct.unpack_from("@l", buf.raw)[0]
if up < 0:
up = None
return up
def _boottime_linux():
"""A way to figure out the boot time directly on Linux."""
global _BOOTTIME
try:
with open("/proc/stat") as f:
for line in f:
if line.startswith("btime"):
_BOOTTIME = float(line.split()[1])
return _BOOTTIME
except (OSError, IndexError):
return None
def _uptime_amiga():
"""Returns uptime in seconds or None, on AmigaOS."""
global _BOOTTIME
try:
_BOOTTIME = os.stat("RAM:").st_ctime
return time.time() - _BOOTTIME
except (NameError, OSError):
return None
def _uptime_beos():
"""Returns uptime in seconds on None, on BeOS/Haiku."""
if not hasattr(xp.LIBC, "system_time"):
return None
xp.LIBC.system_time.restype = ctypes.c_int64
return xp.LIBC.system_time() / 1000000.0
def _uptime_bsd():
"""Returns uptime in seconds or None, on BSD (including OS X)."""
global _BOOTTIME
if not hasattr(xp.LIBC, "sysctlbyname"):
# Not BSD.
return None
# Determine how much space we need for the response.
sz = ctypes.c_uint(0)
xp.LIBC.sysctlbyname("kern.boottime", None, ctypes.byref(sz), None, 0)
if sz.value != struct.calcsize("@LL"):
# Unexpected, let's give up.
return None
# For real now.
buf = ctypes.create_string_buffer(sz.value)
xp.LIBC.sysctlbyname("kern.boottime", buf, ctypes.byref(sz), None, 0)
sec, usec = struct.unpack_from("@LL", buf.raw)
# OS X disagrees what that second value is.
if usec > 1000000:
usec = 0.0
_BOOTTIME = sec + usec / 1000000.0
up = time.time() - _BOOTTIME
if up < 0:
up = None
return up
def _uptime_minix():
"""Returns uptime in seconds or None, on MINIX."""
try:
with open("/proc/uptime") as f:
up = float(f.read())
return up
except (OSError, ValueError):
return None
def _uptime_plan9():
"""Returns uptime in seconds or None, on Plan 9."""
# Apparently Plan 9 only has Python 2.2, which I'm not prepared to
# support. Maybe some Linuxes implement /dev/time, though, someone was
# talking about it somewhere.
try:
# The time file holds one 32-bit number representing the sec-
# onds since start of epoch and three 64-bit numbers, repre-
# senting nanoseconds since start of epoch, clock ticks, and
# clock frequency.
# -- cons(3)
with open("/dev/time") as f:
s, ns, ct, cf = f.read().split()
return float(ct) / float(cf)
except (OSError, ValueError):
return None
def _uptime_solaris():
"""Returns uptime in seconds or None, on Solaris."""
global _BOOTTIME
try:
kstat = ctypes.CDLL("libkstat.so")
except (AttributeError, OSError):
return None
# kstat doesn't have uptime, but it does have boot time.
# Unfortunately, getting at it isn't perfectly straightforward.
# First, let's pretend to be kstat.h
# Constant
KSTAT_STRLEN = 31 # According to every kstat.h I could find.
# Data structures
class anon_union(ctypes.Union):
# The ``value'' union in kstat_named_t actually has a bunch more
# members, but we're only using it for boot_time, so we only need
# the padding and the one we're actually using.
_fields_ = [("c", ctypes.c_char * 16), ("time", ctypes.c_int)]
class kstat_named_t(ctypes.Structure):
_fields_ = [
("name", ctypes.c_char * KSTAT_STRLEN),
("data_type", ctypes.c_char),
("value", anon_union),
]
# Function signatures
kstat.kstat_open.restype = ctypes.c_void_p
kstat.kstat_lookup.restype = ctypes.c_void_p
kstat.kstat_lookup.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p,
]
kstat.kstat_read.restype = ctypes.c_int
kstat.kstat_read.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
kstat.kstat_data_lookup.restype = ctypes.POINTER(kstat_named_t)
kstat.kstat_data_lookup.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
# Now, let's do something useful.
# Initialise kstat control structure.
kc = kstat.kstat_open()
if not kc:
return None
# We're looking for unix:0:system_misc:boot_time.
ksp = kstat.kstat_lookup(kc, "unix", 0, "system_misc")
if ksp and kstat.kstat_read(kc, ksp, None) != -1:
data = kstat.kstat_data_lookup(ksp, "boot_time")
if data:
_BOOTTIME = data.contents.value.time
# Clean-up.
kstat.kstat_close(kc)
if _BOOTTIME is not None:
return time.time() - _BOOTTIME
return None
def _uptime_syllable():
"""Returns uptime in seconds or None, on Syllable."""
global _BOOTTIME
try:
_BOOTTIME = os.stat("/dev/pty/mst/pty0").st_mtime
return time.time() - _BOOTTIME
except (NameError, OSError):
return None
def _uptime_windows():
"""
Returns uptime in seconds or None, on Windows. Warning: may return
incorrect answers after 49.7 days on versions older than Vista.
"""
if hasattr(xp.LIBC, "GetTickCount64"):
# Vista/Server 2008 or later.
xp.LIBC.GetTickCount64.restype = ctypes.c_uint64
return xp.LIBC.GetTickCount64() / 1000.0
if hasattr(xp.LIBC, "GetTickCount"):
# WinCE and Win2k or later; gives wrong answers after 49.7 days.
xp.LIBC.GetTickCount.restype = ctypes.c_uint32
return xp.LIBC.GetTickCount() / 1000.0
return None
@xl.lazyobject
def _UPTIME_FUNCS():
return {
"amiga": _uptime_amiga,
"aros12": _uptime_amiga,
"beos5": _uptime_beos,
"cygwin": _uptime_linux,
"darwin": _uptime_osx,
"haiku1": _uptime_beos,
"linux": _uptime_linux,
"linux-armv71": _uptime_linux,
"linux2": _uptime_linux,
"minix3": _uptime_minix,
"sunos5": _uptime_solaris,
"syllable": _uptime_syllable,
"win32": _uptime_windows,
"wince": _uptime_windows,
}
def uptime():
"""Returns uptime in seconds if even remotely possible, or None if not."""
if _BOOTTIME is not None:
return time.time() - _BOOTTIME
up = _UPTIME_FUNCS.get(sys.platform, _uptime_bsd)()
if up is None:
up = (
_uptime_bsd()
or _uptime_plan9()
or _uptime_linux()
or _uptime_windows()
or _uptime_solaris()
or _uptime_beos()
or _uptime_amiga()
or _uptime_syllable()
or _uptime_osx()
)
return up
def boottime():
"""Returns boot time if remotely possible, or None if not."""
global _BOOTTIME
if _BOOTTIME is None:
up = uptime()
if up is None:
return None
_BOOTTIME = time.time() - up
return _BOOTTIME
|
import numpy as np
from . import dtypes, nputils, utils
from .duck_array_ops import _dask_or_eager_func, count, fillna, isnull, where_method
from .pycompat import dask_array_type
try:
import dask.array as dask_array
except ImportError:
dask_array = None
def _replace_nan(a, val):
"""
replace nan in a by val, and returns the replaced array and the nan
position
"""
mask = isnull(a)
return where_method(val, mask, a), mask
def _maybe_null_out(result, axis, mask, min_count=1):
"""
xarray version of pandas.core.nanops._maybe_null_out
"""
if hasattr(axis, "__len__"): # if tuple or list
raise ValueError(
"min_count is not available for reduction with more than one dimensions."
)
if axis is not None and getattr(result, "ndim", False):
null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
if null_mask.any():
dtype, fill_value = dtypes.maybe_promote(result.dtype)
result = result.astype(dtype)
result[null_mask] = fill_value
elif getattr(result, "dtype", None) not in dtypes.NAT_TYPES:
null_mask = mask.size - mask.sum()
if null_mask < min_count:
result = np.nan
return result
def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs):
""" In house nanargmin, nanargmax for object arrays. Always return integer
type
"""
valid_count = count(value, axis=axis)
value = fillna(value, fill_value)
data = _dask_or_eager_func(func)(value, axis=axis, **kwargs)
# TODO This will evaluate dask arrays and might be costly.
if (valid_count == 0).any():
raise ValueError("All-NaN slice encountered")
return data
def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs):
""" In house nanmin and nanmax for object array """
valid_count = count(value, axis=axis)
filled_value = fillna(value, fill_value)
data = getattr(np, func)(filled_value, axis=axis, **kwargs)
if not hasattr(data, "dtype"): # scalar case
data = fill_value if valid_count == 0 else data
# we've computed a single min, max value of type object.
# don't let np.array turn a tuple back into an array
return utils.to_0d_object_array(data)
return where_method(data, valid_count != 0)
def nanmin(a, axis=None, out=None):
if a.dtype.kind == "O":
return _nan_minmax_object("min", dtypes.get_pos_infinity(a.dtype), a, axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanmin(a, axis=axis)
def nanmax(a, axis=None, out=None):
if a.dtype.kind == "O":
return _nan_minmax_object("max", dtypes.get_neg_infinity(a.dtype), a, axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanmax(a, axis=axis)
def nanargmin(a, axis=None):
if a.dtype.kind == "O":
fill_value = dtypes.get_pos_infinity(a.dtype)
return _nan_argminmax_object("argmin", fill_value, a, axis=axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanargmin(a, axis=axis)
def nanargmax(a, axis=None):
if a.dtype.kind == "O":
fill_value = dtypes.get_neg_infinity(a.dtype)
return _nan_argminmax_object("argmax", fill_value, a, axis=axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanargmax(a, axis=axis)
def nansum(a, axis=None, dtype=None, out=None, min_count=None):
a, mask = _replace_nan(a, 0)
result = _dask_or_eager_func("sum")(a, axis=axis, dtype=dtype)
if min_count is not None:
return _maybe_null_out(result, axis, mask, min_count)
else:
return result
def _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs):
""" In house nanmean. ddof argument will be used in _nanvar method """
from .duck_array_ops import count, fillna, _dask_or_eager_func, where_method
valid_count = count(value, axis=axis)
value = fillna(value, 0)
# As dtype inference is impossible for object dtype, we assume float
# https://github.com/dask/dask/issues/3162
if dtype is None and value.dtype.kind == "O":
dtype = value.dtype if value.dtype.kind in ["cf"] else float
data = _dask_or_eager_func("sum")(value, axis=axis, dtype=dtype, **kwargs)
data = data / (valid_count - ddof)
return where_method(data, valid_count != 0)
def nanmean(a, axis=None, dtype=None, out=None):
if a.dtype.kind == "O":
return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype)
if isinstance(a, dask_array_type):
return dask_array.nanmean(a, axis=axis, dtype=dtype)
return np.nanmean(a, axis=axis, dtype=dtype)
def nanmedian(a, axis=None, out=None):
return _dask_or_eager_func("nanmedian", eager_module=nputils)(a, axis=axis)
def _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs):
value_mean = _nanmean_ddof_object(
ddof=0, value=value, axis=axis, keepdims=True, **kwargs
)
squared = (value.astype(value_mean.dtype) - value_mean) ** 2
return _nanmean_ddof_object(ddof, squared, axis=axis, keepdims=keepdims, **kwargs)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0):
if a.dtype.kind == "O":
return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof)
return _dask_or_eager_func("nanvar", eager_module=nputils)(
a, axis=axis, dtype=dtype, ddof=ddof
)
def nanstd(a, axis=None, dtype=None, out=None, ddof=0):
return _dask_or_eager_func("nanstd", eager_module=nputils)(
a, axis=axis, dtype=dtype, ddof=ddof
)
def nanprod(a, axis=None, dtype=None, out=None, min_count=None):
a, mask = _replace_nan(a, 1)
result = _dask_or_eager_func("nanprod")(a, axis=axis, dtype=dtype, out=out)
if min_count is not None:
return _maybe_null_out(result, axis, mask, min_count)
else:
return result
def nancumsum(a, axis=None, dtype=None, out=None):
return _dask_or_eager_func("nancumsum", eager_module=nputils)(
a, axis=axis, dtype=dtype
)
def nancumprod(a, axis=None, dtype=None, out=None):
return _dask_or_eager_func("nancumprod", eager_module=nputils)(
a, axis=axis, dtype=dtype
)
|
import random
import os
def move_all(data_type, shape):
dirpath = os.path.join(data_type, shape)
os.makedirs(dirpath, exist_ok=True)
for filename in os.listdir(shape):
if filename.endswith('.png'):
os.rename(os.path.join(shape, filename),
os.path.join(data_type, shape, filename))
def move_data(data_type, shape, count):
dirpath = os.path.join(data_type, shape)
os.makedirs(dirpath, exist_ok=True)
for x in random.sample(range(1, 3700), count):
filename = '{}.png'.format(x)
os.rename(os.path.join(shape, filename),
os.path.join(data_type, shape, filename))
move_data('train', 'circle', 3000)
move_data('train', 'square', 3000)
move_data('train', 'star', 3000)
move_data('train', 'triangle', 3000)
move_all('test', 'circle')
move_all('test', 'square')
move_all('test', 'star')
move_all('test', 'triangle')
|
import torch
#from imsitu_encoder_verbq import imsitu_encoder
from imsitu_encoder_roleqverbq_embdhz import imsitu_encoder
from imsitu_loader import imsitu_loader_roleq_updated
from imsitu_scorer_log import imsitu_scorer
import json
import model_verbq_working
import os
import utils
import time
import random
#from torchviz import make_dot
#from graphviz import Digraph
def train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, max_epoch, model_dir, encoder, gpu_mode, clip_norm, lr_max, model_name, args,eval_frequency=4):
model.train()
train_loss = 0
total_steps = 0
print_freq = 400
dev_score_list = []
time_all = time.time()
if model.gpu_mode >= 0 :
ngpus = 2
device_array = [i for i in range(0,ngpus)]
pmodel = torch.nn.DataParallel(model, device_ids=device_array)
else:
pmodel = model
#pmodel = model
'''if scheduler.get_lr()[0] < lr_max:
scheduler.step()'''
top1 = imsitu_scorer(encoder, 1, 3)
top5 = imsitu_scorer(encoder, 5, 3)
'''print('init param data check :')
for f in model.parameters():
if f.requires_grad:
print(f.data.size())'''
for epoch in range(max_epoch):
#print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())
#sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1
mx = len(train_loader)
for i, (id, img, verb, labels) in enumerate(train_loader):
#print("epoch{}-{}/{} batches\r".format(epoch,i+1,mx)) ,
t0 = time.time()
t1 = time.time()
total_steps += 1
if gpu_mode >= 0:
img = torch.autograd.Variable(img.cuda())
verb = torch.autograd.Variable(verb.cuda())
labels = torch.autograd.Variable(labels.cuda())
else:
img = torch.autograd.Variable(img)
verb = torch.autograd.Variable(verb)
labels = torch.autograd.Variable(labels)
'''print('all inputs')
print(img)
print('=========================================================================')
print(verb)
print('=========================================================================')
print(roles)
print('=========================================================================')
print(labels)'''
verb_predict, loss = pmodel(img, verb, labels)
#verb_predict, rol1pred, role_predict = pmodel.forward_eval5(img)
#print ("forward time = {}".format(time.time() - t1))
t1 = time.time()
'''g = make_dot(verb_predict, model.state_dict())
g.view()'''
#loss = model.calculate_loss(verb_predict, verb)
#loss = model.calculate_eval_loss_new(verb_predict, verb, rol1pred, labels, args)
#loss = loss_ * random.random() #try random loss
#print ("loss time = {}".format(time.time() - t1))
t1 = time.time()
#print('current loss = ', loss)
if gpu_mode >= 0 :
#loss.backward(torch.ones([2,1]).to(torch.device('cuda')))
loss.mean().backward()
else:
loss.backward()
#loss.backward()
#print ("backward time = {}".format(time.time() - t1))
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)
'''for param in filter(lambda p: p.requires_grad,model.parameters()):
print(param.grad.data.sum())'''
#start debugger
#import pdb; pdb.set_trace()
optimizer.step()
'''print('grad check after:')
for f in model.conv.parameters():
print('data is')
print(f.data [0][0])
#print('grad is')
#print(f.grad[0][0].item())
break'''
optimizer.zero_grad()
train_loss += float(loss.mean())
#top1.add_point_eval5(verb_predict, verb, role_predict, labels)
#top5.add_point_eval5(verb_predict, verb, role_predict, labels)
top1.add_point_verb_only_eval(id, verb_predict, verb)
top5.add_point_verb_only_eval(id, verb_predict, verb)
if total_steps % print_freq == 0:
top1_a = top1.get_average_results()
top5_a = top5.get_average_results()
print ("{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}"
.format(total_steps-1,epoch,i, utils.format_dict(top1_a, "{:.2f}", "1-"),
utils.format_dict(top5_a,"{:.2f}","5-"), loss.mean().item(),
train_loss / ((total_steps-1)%eval_frequency) ))
if total_steps % eval_frequency == 0:
top1, top5, val_loss = eval(model, dev_loader, encoder, gpu_mode)
model.train()
top1_avg = top1.get_average_results()
top5_avg = top5.get_average_results()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"]
avg_score /= 8
print ('Dev {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))
#print('Dev loss :', val_loss)
dev_score_list.append(avg_score)
max_score = max(dev_score_list)
if max_score == dev_score_list[-1]:
torch.save(model.state_dict(), model_dir + "/{}_verbq_iter0_change.model".format( model_name))
print ('New best model saved! {0}'.format(max_score))
#eval on the trainset
'''top1, top5, val_loss = eval(model, traindev_loader, encoder, gpu_mode)
model.train()
top1_avg = top1.get_average_results()
top5_avg = top5.get_average_results()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
avg_score /= 8
print ('TRAINDEV {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))'''
print('current train loss', train_loss)
train_loss = 0
top1 = imsitu_scorer(encoder, 1, 3)
top5 = imsitu_scorer(encoder, 5, 3)
del verb_predict, loss, img, verb, labels
#break
print('Epoch ', epoch, ' completed!')
scheduler.step()
#break
def eval(model, dev_loader, encoder, gpu_mode, write_to_file = False):
model.eval()
val_loss = 0
print ('evaluating model...')
top1 = imsitu_scorer(encoder, 1, 3, write_to_file)
top5 = imsitu_scorer(encoder, 5, 3)
with torch.no_grad():
mx = len(dev_loader)
for i, (img_id, img, verb, labels) in enumerate(dev_loader):
#print("{}/{} batches\r".format(i+1,mx)) ,
'''im_data = torch.squeeze(im_data,0)
im_info = torch.squeeze(im_info,0)
gt_boxes = torch.squeeze(gt_boxes,0)
num_boxes = torch.squeeze(num_boxes,0)
verb = torch.squeeze(verb,0)
roles = torch.squeeze(roles,0)
labels = torch.squeeze(labels,0)'''
if gpu_mode >= 0:
img = torch.autograd.Variable(img.cuda())
verb = torch.autograd.Variable(verb.cuda())
labels = torch.autograd.Variable(labels.cuda())
else:
img = torch.autograd.Variable(img)
verb = torch.autograd.Variable(verb)
labels = torch.autograd.Variable(labels)
verb_predict, _= model(img, verb, labels)
'''loss = model.calculate_eval_loss(verb_predict, verb, role_predict, labels)
val_loss += loss.item()'''
top1.add_point_verb_only_eval(img_id, verb_predict, verb)
top5.add_point_verb_only_eval(img_id, verb_predict, verb)
del img, verb, labels
break
#return top1, top5, val_loss/mx
return top1, top5, 0
def main():
import argparse
parser = argparse.ArgumentParser(description="imsitu VSRL. Training, evaluation and prediction.")
parser.add_argument("--gpuid", default=-1, help="put GPU id > -1 in GPU mode", type=int)
#parser.add_argument("--command", choices = ["train", "eval", "resume", 'predict'], required = True)
parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]')
parser.add_argument('--resume_model', type=str, default='', help='The model we resume')
parser.add_argument('--verb_module', type=str, default='', help='pretrained verb module')
parser.add_argument('--role_module', type=str, default='', help='pretrained role module')
parser.add_argument('--train_role', action='store_true', help='cnn fix, verb fix, role train from the scratch')
parser.add_argument('--finetune_verb', action='store_true', help='cnn fix, verb finetune, role train from the scratch')
parser.add_argument('--finetune_cnn', action='store_true', help='cnn finetune, verb finetune, role train from the scratch')
parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model')
parser.add_argument('--evaluate', action='store_true', help='Only use the testing mode')
parser.add_argument('--test', action='store_true', help='Only use the testing mode')
parser.add_argument('--dataset_folder', type=str, default='./imSitu', help='Location of annotations')
parser.add_argument('--imgset_dir', type=str, default='./resized_256', help='Location of original images')
parser.add_argument('--frcnn_feat_dir', type=str, help='Location of output from detectron')
#todo: train role module separately with gt verbs
args = parser.parse_args()
batch_size = 640
#lr = 5e-6
lr = 0.0001
lr_max = 5e-4
lr_gamma = 0.1
lr_step = 15
clip_norm = 0.5
weight_decay = 1e-4
n_epoch = 500
n_worker = 3
#dataset_folder = 'imSitu'
#imgset_folder = 'resized_256'
dataset_folder = args.dataset_folder
imgset_folder = args.imgset_dir
print('model spec :, top down att with role q ')
train_set = json.load(open(dataset_folder + "/updated_train_new.json"))
imsitu_roleq = json.load(open("imsitu_data/imsitu_questions_prev.json"))
verb_templates = json.load(open("imsitu_data/verb_questions_template_new.json"))
encoder = imsitu_encoder(train_set, imsitu_roleq, verb_templates)
model = model_verbq_working.BaseModel(encoder, args.gpuid)
# To group up the features
#cnn_features, role_features = utils.group_features_noun(model)
cnn_features, role_features = utils.group_features_noun(model)
train_set = imsitu_loader_roleq_updated(imgset_folder, train_set, encoder, model.train_preprocess())
train_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True, num_workers=n_worker)
dev_set = json.load(open(dataset_folder +"/dev.json"))
dev_set = imsitu_loader_roleq_updated(imgset_folder, dev_set, encoder, model.dev_preprocess())
dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=4, shuffle=True, num_workers=n_worker)
test_set = json.load(open(dataset_folder +"/test.json"))
test_set = imsitu_loader_roleq_updated(imgset_folder, test_set, encoder, model.dev_preprocess())
test_loader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=True, num_workers=n_worker)
traindev_set = json.load(open(dataset_folder +"/dev.json"))
traindev_set = imsitu_loader_roleq_updated(imgset_folder, traindev_set, encoder, model.dev_preprocess())
traindev_loader = torch.utils.data.DataLoader(traindev_set, batch_size=8, shuffle=True, num_workers=n_worker)
#utils.load_net(args.verb_module, [model.verb_module])
#utils.load_net(args.role_module, [model.role_module])
model_name = 'train_full'
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
torch.manual_seed(1234)
if args.gpuid >= 0:
#print('GPU enabled')
model.cuda()
torch.cuda.manual_seed(1234)
torch.backends.cudnn.deterministic = True
optimizer = torch.optim.Adamax([
{'params': cnn_features, 'lr': 5e-5},
{'params': role_features}
], lr=1e-3)
#optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_step, gamma=lr_gamma)
#gradient clipping, grad check
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
if args.evaluate:
top1, top5, val_loss = eval(model, dev_loader, encoder, args.gpuid, write_to_file = True)
top1_avg = top1.get_average_results()
top5_avg = top5.get_average_results()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
avg_score /= 8
print ('Dev average :{:.2f} {} {}'.format( avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))
#write results to csv file
role_dict = top1.role_dict
fail_val_all = top1.value_all_dict
pass_val_dict = top1.vall_all_correct
with open('role_pred_data.json', 'w') as fp:
json.dump(role_dict, fp, indent=4)
with open('fail_val_all.json', 'w') as fp:
json.dump(fail_val_all, fp, indent=4)
with open('pass_val_all.json', 'w') as fp:
json.dump(pass_val_dict, fp, indent=4)
print('Writing predictions to file completed !')
elif args.test:
top1, top5, val_loss = eval(model, test_loader, encoder, args.gpuid, write_to_file = True)
top1_avg = top1.get_average_results()
top5_avg = top5.get_average_results()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
avg_score /= 8
print ('Test average :{:.2f} {} {}'.format( avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))
else:
print('Model training started!')
train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, lr_max, model_name, args)
if __name__ == "__main__":
main()
|
import pytest
from app import create_app
@pytest.fixture
def request_header_secret():
return "dev"
@pytest.fixture
def request_body_positive():
return {"query": "I am having a great day!"}
@pytest.fixture
def request_body_negative():
return {"query": "I am feeling sad today"}
@pytest.fixture
def http_error_METHOD_NOT_ALLOWED():
return 405
@pytest.fixture
def http_error_BAD_REQUEST():
return 400
@pytest.fixture
def http_OK():
return 200
@pytest.fixture
def flask_client():
app = create_app()
with app.test_client() as client:
yield client
## TESTS
#########
# Index/ Health Check Test
def test_health_check(flask_client):
res = flask_client.get("/")
assert b"up & running" in res.data
## OK REQUESTS Tests
####################
def test_predict_positive(flask_client, http_OK, request_body_positive, request_header_secret):
res = flask_client.post("/predict", json=request_body_positive, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_OK
assert b"POSITIVE" in res.data
def test_predict_negative(flask_client, http_OK, request_body_negative, request_header_secret):
res = flask_client.post("/predict", json=request_body_negative, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_OK
assert b"NEGATIVE" in res.data
## BAD REQUESTS Tests
####################
def test_GET_instead_POST(flask_client, http_error_METHOD_NOT_ALLOWED, request_header_secret):
res = flask_client.get("/predict", json={"query": ""}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_METHOD_NOT_ALLOWED
## Body
def test_None_body(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json=None, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
def test_empty_body(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
## Query
def test_none_query(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={"query": None}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
def test_empty_query(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={"query": ""}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
def test_non_string_numerical(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={"query": 456123}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
def test_non_string_object(flask_client, http_error_BAD_REQUEST, request_header_secret):
res = flask_client.post("/predict", json={"query": ["I am happy"]}, headers={"Secret-Key": request_header_secret})
assert res.status_code == http_error_BAD_REQUEST
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefb_s3user
version_added: '2.8'
short_description: Create or delete FlashBlade Object Store account users
description:
- Create or delete object store account users on a Pure Stoage FlashBlade.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
state:
description:
- Create or delete object store account user
default: present
choices: [ absent, present ]
type: str
name:
description:
- The name of object store user
type: str
account:
description:
- The name of object store account associated with user
type: str
access_key:
description:
- Create secret access key.
- Key can be exposed using the I(debug) module
type: bool
default: true
extends_documentation_fragment:
- purestorage.fb
'''
EXAMPLES = r'''
- name: Crrate object store user (with access ID and key) foo in account bar
purefb_s3user:
name: foo
account: bar
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
debug:
var: ansible_facts.fb_s3user
- name: Delete object store user foo in account bar
purefb_s3user:
name: foo
account: bar
state: absent
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
HAS_PURITY_FB = True
try:
from purity_fb import ObjectStoreAccessKey
except ImportError:
HAS_PURITY_FB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_blade, purefb_argument_spec
MIN_REQUIRED_API_VERSION = '1.3'
def get_s3acc(module, blade):
"""Return Object Store Account or None"""
s3acc = None
accts = blade.object_store_accounts.list_object_store_accounts()
for acct in range(0, len(accts.items)):
if accts.items[acct].name == module.params['account']:
s3acc = accts.items[acct]
return s3acc
def get_s3user(module, blade):
"""Return Object Store Account or None"""
full_user = module.params['account'] + "/" + module.params['name']
s3user = None
s3users = blade.object_store_users.list_object_store_users()
for user in range(0, len(s3users.items)):
if s3users.items[user].name == full_user:
s3user = s3users.items[user]
return s3user
def update_s3user(module, blade):
"""Update Object Store User"""
changed = False
s3user_facts = {}
user = module.params['account'] + "/" + module.params['name']
if module.params['access_key']:
try:
result = blade.object_store_access_keys.create_object_store_access_keys(
object_store_access_key=ObjectStoreAccessKey(user={'name': user}))
s3user_facts['fb_s3user'] = {'user': user,
'access_key': result.items[0].secret_access_key,
'access_id': result.items[0].name}
except Exception:
delete_s3user(module, blade)
module.fail_json(msg='Object Store User {0}: Creation failed'.format(user))
changed = True
module.exit_json(changed=changed, ansible_facts=s3user_facts)
def create_s3user(module, blade):
"""Create Object Store Account"""
s3user_facts = {}
changed = False
user = module.params['account'] + "/" + module.params['name']
try:
blade.object_store_users.create_object_store_users(names=[user])
if module.params['access_key']:
try:
result = blade.object_store_access_keys.create_object_store_access_keys(
object_store_access_key=ObjectStoreAccessKey(user={'name': user}))
s3user_facts['fb_s3user'] = {'user': user,
'access_key': result.items[0].secret_access_key,
'access_id': result.items[0].name}
except Exception:
delete_s3user(module, blade)
module.fail_json(msg='Object Store User {0}: Creation failed'.format(user))
changed = True
except Exception:
module.fail_json(msg='Object Store User {0}: Creation failed'.format(user))
module.exit_json(changed=changed, ansible_facts=s3user_facts)
def delete_s3user(module, blade):
"""Delete Object Store Account"""
changed = False
user = module.params['account'] + "/" + module.params['name']
try:
blade.object_store_users.delete_object_store_users(names=[user])
changed = True
except Exception:
module.fail_json(msg='Object Store Account {0}: Deletion failed'.format(module.params['name']))
module.exit_json(changed=changed)
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
account=dict(required=True, type='str'),
access_key=dict(default='true', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
))
module = AnsibleModule(argument_spec,
supports_check_mode=False)
if not HAS_PURITY_FB:
module.fail_json(msg='purity_fb sdk is required for this module')
state = module.params['state']
blade = get_blade(module)
versions = blade.api_version.list_versions().versions
if MIN_REQUIRED_API_VERSION not in versions:
module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
s3acc = get_s3acc(module, blade)
if not s3acc:
module.fail_json(msg='Object Store Account {0} does not exist'.format(module.params['account']))
s3user = get_s3user(module, blade)
if state == 'absent' and s3user:
delete_s3user(module, blade)
elif state == 'present' and s3user:
update_s3user(module, blade)
elif not s3user and state == 'present':
create_s3user(module, blade)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
# Code generated by protoc-gen-twirp_python v7.1.0, DO NOT EDIT.
# source: service.proto
try:
import httplib
from urllib2 import Request, HTTPError, urlopen
except ImportError:
import http.client as httplib
from urllib.request import Request, urlopen
from urllib.error import HTTPError
import json
from google.protobuf import symbol_database as _symbol_database
import sys
_sym_db = _symbol_database.Default()
class TwirpException(httplib.HTTPException):
def __init__(self, code, message, meta):
self.code = code
self.message = message
self.meta = meta
super(TwirpException, self).__init__(message)
@classmethod
def from_http_err(cls, err):
try:
jsonerr = json.load(err)
code = jsonerr["code"]
msg = jsonerr["msg"]
meta = jsonerr.get("meta")
if meta is None:
meta = {}
except:
code = "internal"
msg = "Error from intermediary with HTTP status code {} {}".format(
err.code, httplib.responses[err.code],
)
meta = {}
return cls(code, msg, meta)
class HaberdasherClient(object):
"""
A Haberdasher makes hats for clients.
"""
def __init__(self, server_address):
"""Creates a new client for the Haberdasher service.
Args:
server_address: The address of the server to send requests to, in
the full protocol://host:port form.
"""
if sys.version_info[0] > 2:
self.__target = server_address
else:
self.__target = server_address.encode('ascii')
self.__service_name = "twirp.internal.twirptest.Haberdasher"
def __make_request(self, body, full_method):
req = Request(
url=self.__target + "/twirp" + full_method,
data=body,
headers={"Content-Type": "application/protobuf"},
)
try:
resp = urlopen(req)
except HTTPError as err:
raise TwirpException.from_http_err(err)
return resp.read()
def make_hat(self, size):
"""
MakeHat produces a hat of mysterious, randomly-selected color!
"""
serialize = _sym_db.GetSymbol("twirp.internal.twirptest.Size").SerializeToString
deserialize = _sym_db.GetSymbol("twirp.internal.twirptest.Hat").FromString
full_method = "/{}/{}".format(self.__service_name, "MakeHat")
body = serialize(size)
resp_str = self.__make_request(body=body, full_method=full_method)
return deserialize(resp_str)
|
#!/usr/bin/python
"""
Robert Ramsay <robert.alan.ramsay@gmail.com>
Packing your Dropbox
When you're working with petabytes of data, you have to store files wherever they can fit. All of us here at Dropbox are always searching for more ways to efficiently pack data into smaller and more manageable chunks. The fun begins when you bend the rules a little bit and visualize it in two dimensions.
You'll be given a list of rectangular "files" that you'll need to pack into as small a "Dropbox" as possible. The dimensions of each file will be specified by a tuple (width, height), both of which will be integers. The output of your function should be the area of the smallest rectangular Dropbox that can enclose all of them without any overlap. Files can be rotated 90(deg) if it helps. Bonus points if you can draw pictures of the winning configurations along the way. While drawing pictures, any files sharing dimensions should be considered identical/interchangeable.
Input
Your program must read a small integer N (1 <= N <= 100) from stdin representing the maximum number of files to consider, followed by the width and height of each file, one per line.
Output
Output should be simply be the area of the smallest containing Dropbox. If you want to print pretty pictures, send that to stderr. Only the output on stdout will be judged.
Sample Input
3
8 8
4 3
3 4
Sample Output
88
"""
#from __future__ import print_function
import sys
class DropBox:
w = 0
h = 0
x = 0
y = 0
def __init__(self,vector=None, w=0, h=0):
if vector:
self.w, self.h = vector
else:
self.w = w
self.h = h
def rotate(self):
t = self.w
self.w = self.h
self.h = t
def align(self):
if self.w > self.h:
self.rotate()
return self.h
#free space = (lowest left x, lowest left y, width, height)
def fit(size, free, box):
x, y, w, h = free
box.x = x
box.y = y
if h < box.h and w < box.w:
# Our box will not fit inside the current freespace.
size = (size[0]+box.w-w, size[1]+box.h-h)
x += box.w
w = 0
h = box.h
elif w < box.w:
size = (size[0] + box.w - w, size[1])
w = box.w
y += box.h
h -= box.h
elif h < box.h:
x += box.w
w -= box.w
else:
box.rotate()
if w < box.w:
size = (size[0] + box.w - w, size[1])
w = box.w
y += box.h
h -= box.h
else:
x += box.w
w -= box.w
free = (x, y, w, h)
return size, free
def pretty(boxes,w,h):
'''Pretty print the list of boxes'''
print >> sys.stderr, str(w) + 'x' + str(h) + ':'
graph = [[' ' for l in range(h+1)] for m in range(w+1)]
for box in boxes:
try:
# Vertices
graph[box.x][box.y] = '+'
graph[box.x+box.w][box.y] = '+'
graph[box.x][box.y+box.h] = '+'
graph[box.x+box.w][box.y+box.h] = '+'
# Edges
for x in range(box.x+1, box.x+box.w):
graph[x][box.y] = '|'
graph[x][box.y+box.h] = '|'
for y in range(box.y+1, box.y+box.h):
graph[box.x][y] = '-'
graph[box.x+box.w][y] = '-'
except Exception as e:
print >> sys.stderr, "Box (", box.x, box.y, box.w, box.h, ") is outside bounds (", w, h,")"
raise e
print >> sys.stderr, '\n'.join([''.join(row) for row in graph])
def pack(boxes):
#Align all the boxes and sort them by height lagest to smallest
boxes.sort(key=lambda box: box.align(), reverse=True)
size = (0, 0)
#free = (left, lower, width, height)
free = (0, 0, 0, 0)
for box in boxes:
size, free = fit(size, free, box)
pretty(boxes, size[0], size[1])
return size[0]*size[1]
class DropNode:
left = None # Left Edge is the parent.
vertex = None # We can store at most one Box
right = None # Right Edge is the child.
direction = [1,0] # direction is the identity ray
def __init__(self,vertex=None, left=None, right=None):
self.vertex = vertex
self.left = left
if self.left:
self.left.right = self
self.right = right
if self.right:
w = self.right.width()
h = self.right.height()
if self.vertex.w > self.vertex.h:
# An increase in width costs less than an increase in height
# if width is already greater.
self.direction = [0,1]
if w < h:
self.right.rotate()
else:
self.direction = [0,1]
if h < w:
self.right.rotate()
self.right.left = self
def rotate(self):
self.direction.reverse()
if self.vertex:
self.vertex.rotate()
if self.right:
self.right.rotate()
def width(self):
w = 0
if self.vertex is not None:
w = self.vertex.w
if self.right is not None:
if self.direction[0]:
w += self.right.width()
return w
def height(self):
h = 0
if self.vertex is not None:
h = self.vertex.h
if self.right is not None:
if self.direction[1]:
h += self.right.height()
return h
def packtree(node, boxes):
'''This is a recursive pack algorithm, similar to a binary search
tree.'''
if node is None:
node = DropNode()
if not boxes: # Stack empty.
while node.left:
node = node.left
return node # Return root
if node is None: #RootNode
print >> sys.stderr, "root node", boxes[-1]
return packtree(DropNode(boxes.pop(0)), boxes)
if node.vertex is None: # Not sure if I agree with this.
print >> sys.stderr, "curious"
node.vertex = boxes.pop()
return packtree(node, boxes)
# Make comparisons simpler
left = (max(boxes[0].w, boxes[0].h), min(boxes[0].w, boxes[0].h))
w = node.width()
h = node.height()
right = (max(w, h), min(w, h))
print >> sys.stderr, "left", left, "right", right,
if left[0] > right[0]:
print >> sys.stderr, "insert left"
if node.left:
return packtree(node.left, boxes)
else:
return packtree(DropNode(boxes.pop(0),None,node), boxes)
#if left[0] < right[1]:
# print >> sys.stderr, "insert right"
# if node.right:
# return packtree(node.right, boxes)
# else:
# return packtree(DropNode(boxes.pop(0),node),boxes)
print >> sys.stderr, "insert middle"
return packtree(DropNode(boxes.pop(0), node.left, node), boxes)
def prettytree(tree):
'''Pretty print the list of boxes'''
w = tree.width()
h = tree.height()
print >> sys.stderr, str(w) + 'x' + str(h) + ':'
graph = [[' ' for l in range(h+1)] for m in range(w+1)]
vx = 0
vy = 0
i = 0
node = tree
while node.right:
i += 1
print >> sys.stderr, '.',
if node.vertex is None:
print >> sys.stderr, "Empty Vertex"
node = node.right
continue
try:
vw = tree.vertex.w
vh = tree.vertex.h
# Vertices
graph[vx][vy] = '+'
graph[vx+vw][vy] = '+'
graph[vx][vy+vh] = '+'
graph[vx+vw][vy+vh] = '+'
# Edges
for x in range(vx+1, vx+vw):
graph[x][vy] = '|'
graph[x][vy+vh] = '|'
for y in range(vy+1, vy+vh):
graph[vx][y] = '-'
graph[vx+vw][y] = '-'
vx += tree.direction[0]*vw
vy += tree.direction[1]*vh
except Exception as e:
raise e
node = node.right
print >> sys.stderr
print >> sys.stderr, '\n'.join([''.join(row) for row in graph])
if __name__ == '__main__':
import sys
inp = input() #Number of boxes
try:
boxcount = int(inp)
if boxcount < 1 or boxcount > 100:
raise
except:
sys.exit("Box count must be between 1 and 100 (inclusive)")
boxes = []
for i in range(boxcount):
inp = raw_input('') #Box: width height
box = DropBox()
try:
w, h = inp.split(" ")
box.w = int(w)
box.h = int(h)
except:
sys.exit("Box definition should be integers seperated "\
"by whitespace")
boxes.append(box)
print(pack(boxes))
sys.exit()
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
Basic progressbar example script
adapted for use on MagTag.
"""
import time
import board
import displayio
import digitalio
from adafruit_progressbar.progressbar import HorizontalProgressBar
# use built in display (PyPortal, PyGamer, PyBadge, CLUE, etc.)
# see guide for setting up external displays (TFT / OLED breakouts, RGB matrices, etc.)
# https://learn.adafruit.com/circuitpython-display-support-using-displayio/display-and-display-bus
display = board.DISPLAY
time.sleep(display.time_to_refresh)
# B/up button will be used to increase the progress
up_btn = digitalio.DigitalInOut(board.BUTTON_B)
up_btn.direction = digitalio.Direction.INPUT
up_btn.pull = digitalio.Pull.UP
# C/down button will be used to increase the progress
down_btn = digitalio.DigitalInOut(board.BUTTON_C)
down_btn.direction = digitalio.Direction.INPUT
down_btn.pull = digitalio.Pull.UP
# Make the display context
splash = displayio.Group()
display.show(splash)
# set progress bar width and height relative to board's display
BAR_WIDTH = display.width - 40
BAR_HEIGHT = 30
x = display.width // 2 - BAR_WIDTH // 2
y = display.height // 3
# Create a new progress_bar object at (x, y)
progress_bar = HorizontalProgressBar(
(x, y),
(BAR_WIDTH, BAR_HEIGHT),
bar_color=0xFFFFFF,
outline_color=0xAAAAAA,
fill_color=0x777777,
)
# Append progress_bar to the splash group
splash.append(progress_bar)
# Get a random starting value within our min/max range
current_progress = time.monotonic() % 101
print(current_progress)
progress_bar.value = current_progress
# refresh the display
display.refresh()
value_incrementor = 3
prev_up = up_btn.value
prev_down = down_btn.value
while True:
cur_up = up_btn.value
cur_down = down_btn.value
do_refresh = False
# if up_btn was just pressed down
if not cur_up and prev_up:
current_progress += value_incrementor
# Wrap if we get over the maximum value
if current_progress > progress_bar.maximum:
current_progress = progress_bar.minimum
do_refresh = True
if not cur_down and prev_down:
current_progress -= value_incrementor
# Wrap if we get below the minimum value
if current_progress < progress_bar.minimum:
current_progress = progress_bar.maximum
do_refresh = True
if do_refresh:
print(current_progress)
progress_bar.value = current_progress
time.sleep(display.time_to_refresh)
display.refresh()
time.sleep(display.time_to_refresh)
prev_up = cur_up
prev_down = cur_down
|
#!/usr/bin/env python
"""NbConvert is a utility for conversion of .ipynb files.
Command-line interface for the NbConvert conversion utility.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import logging
import sys
import os
import glob
from jupyter_core.application import JupyterApp, base_aliases, base_flags
from traitlets.config import catch_config_error, Configurable
from traitlets import (
Unicode, List, Instance, DottedObjectName, Type, Bool,
default, observe,
)
from traitlets.utils.importstring import import_item
from .exporters.base import get_export_names, get_exporter
from nbconvert import exporters, preprocessors, writers, postprocessors, __version__
from .utils.base import NbConvertBase
from .utils.exceptions import ConversionException
from .utils.io import unicode_stdin_stream
#-----------------------------------------------------------------------------
#Classes and functions
#-----------------------------------------------------------------------------
class DottedOrNone(DottedObjectName):
"""A string holding a valid dotted object name in Python, such as A.b3._c
Also allows for None type.
"""
default_value = u''
def validate(self, obj, value):
if value is not None and len(value) > 0:
return super(DottedOrNone, self).validate(obj, value)
else:
return value
nbconvert_aliases = {}
nbconvert_aliases.update(base_aliases)
nbconvert_aliases.update({
'to' : 'NbConvertApp.export_format',
'template' : 'TemplateExporter.template_file',
'writer' : 'NbConvertApp.writer_class',
'post': 'NbConvertApp.postprocessor_class',
'output': 'NbConvertApp.output_base',
'output-dir': 'FilesWriter.build_directory',
'reveal-prefix': 'SlidesExporter.reveal_url_prefix',
'nbformat': 'NotebookExporter.nbformat_version',
})
nbconvert_flags = {}
nbconvert_flags.update(base_flags)
nbconvert_flags.update({
'execute' : (
{'ExecutePreprocessor' : {'enabled' : True}},
"Execute the notebook prior to export."
),
'allow-errors' : (
{'ExecutePreprocessor' : {'allow_errors' : True}},
("Continue notebook execution even if one of the cells throws "
"an error and include the error message in the cell output "
"(the default behaviour is to abort conversion). This flag "
"is only relevant if '--execute' was specified, too.")
),
'stdin' : (
{'NbConvertApp' : {
'from_stdin' : True,
}
},
"read a single notebook file from stdin. Write the resulting notebook with default basename 'notebook.*'"
),
'stdout' : (
{'NbConvertApp' : {'writer_class' : "StdoutWriter"}},
"Write notebook output to stdout instead of files."
),
'inplace' : (
{
'NbConvertApp' : {
'use_output_suffix' : False,
'export_format' : 'notebook',
},
'FilesWriter' : {'build_directory': ''},
},
"""Run nbconvert in place, overwriting the existing notebook (only
relevant when converting to notebook format)"""
),
'clear-output' : (
{
'NbConvertApp' : {
'use_output_suffix' : False,
'export_format' : 'notebook',
},
'FilesWriter' : {'build_directory': ''},
'ClearOutputPreprocessor' : {'enabled' : True},
},
"""Clear output of current file and save in place,
overwriting the existing notebook. """
),
'no-prompt' : (
{'TemplateExporter' : {
'exclude_input_prompt' : True,
'exclude_output_prompt' : True,
}
},
"Exclude input and output prompts from converted document."
),
'no-input' : (
{'TemplateExporter' : {
'exclude_output_prompt' : True,
'exclude_input': True,
}
},
"""Exclude input cells and output prompts from converted document.
This mode is ideal for generating code-free reports."""
),
})
class NbConvertApp(JupyterApp):
"""Application used to convert from notebook file type (``*.ipynb``)"""
version = __version__
name = 'jupyter-nbconvert'
aliases = nbconvert_aliases
flags = nbconvert_flags
@default('log_level')
def _log_level_default(self):
return logging.INFO
classes = List()
@default('classes')
def _classes_default(self):
classes = [NbConvertBase]
for pkg in (exporters, preprocessors, writers, postprocessors):
for name in dir(pkg):
cls = getattr(pkg, name)
if isinstance(cls, type) and issubclass(cls, Configurable):
classes.append(cls)
return classes
description = Unicode(
u"""This application is used to convert notebook files (*.ipynb)
to various other formats.
WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.""")
output_base = Unicode('', help='''overwrite base name use for output files.
can only be used when converting one notebook at a time.
''').tag(config=True)
use_output_suffix = Bool(
True,
help="""Whether to apply a suffix prior to the extension (only relevant
when converting to notebook format). The suffix is determined by
the exporter, and is usually '.nbconvert'."""
).tag(config=True)
output_files_dir = Unicode('{notebook_name}_files',
help='''Directory to copy extra files (figures) to.
'{notebook_name}' in the string will be converted to notebook
basename'''
).tag(config=True)
examples = Unicode(u"""
The simplest way to use nbconvert is
> jupyter nbconvert mynotebook.ipynb
which will convert mynotebook.ipynb to the default format (probably HTML).
You can specify the export format with `--to`.
Options include {formats}.
> jupyter nbconvert --to latex mynotebook.ipynb
Both HTML and LaTeX support multiple output templates. LaTeX includes
'base', 'article' and 'report'. HTML includes 'basic' and 'full'. You
can specify the flavor of the format used.
> jupyter nbconvert --to html --template basic mynotebook.ipynb
You can also pipe the output to stdout, rather than a file
> jupyter nbconvert mynotebook.ipynb --stdout
PDF is generated via latex
> jupyter nbconvert mynotebook.ipynb --to pdf
You can get (and serve) a Reveal.js-powered slideshow
> jupyter nbconvert myslides.ipynb --to slides --post serve
Multiple notebooks can be given at the command line in a couple of
different ways:
> jupyter nbconvert notebook*.ipynb
> jupyter nbconvert notebook1.ipynb notebook2.ipynb
or you can specify the notebooks list in a config file, containing::
c.NbConvertApp.notebooks = ["my_notebook.ipynb"]
> jupyter nbconvert --config mycfg.py
""".format(formats=get_export_names()))
# Writer specific variables
writer = Instance('nbconvert.writers.base.WriterBase',
help="""Instance of the writer class used to write the
results of the conversion.""", allow_none=True)
writer_class = DottedObjectName('FilesWriter',
help="""Writer class used to write the
results of the conversion""").tag(config=True)
writer_aliases = {'fileswriter': 'nbconvert.writers.files.FilesWriter',
'debugwriter': 'nbconvert.writers.debug.DebugWriter',
'stdoutwriter': 'nbconvert.writers.stdout.StdoutWriter'}
writer_factory = Type(allow_none=True)
@observe('writer_class')
def _writer_class_changed(self, change):
new = change['new']
if new.lower() in self.writer_aliases:
new = self.writer_aliases[new.lower()]
self.writer_factory = import_item(new)
# Post-processor specific variables
postprocessor = Instance('nbconvert.postprocessors.base.PostProcessorBase',
help="""Instance of the PostProcessor class used to write the
results of the conversion.""", allow_none=True)
postprocessor_class = DottedOrNone(
help="""PostProcessor class used to write the
results of the conversion"""
).tag(config=True)
postprocessor_aliases = {'serve': 'nbconvert.postprocessors.serve.ServePostProcessor'}
postprocessor_factory = Type(None, allow_none=True)
@observe('postprocessor_class')
def _postprocessor_class_changed(self, change):
new = change['new']
if new.lower() in self.postprocessor_aliases:
new = self.postprocessor_aliases[new.lower()]
if new:
self.postprocessor_factory = import_item(new)
ipywidgets_base_url = Unicode("https://unpkg.com/",
help="URL base for ipywidgets package").tag(config=True)
export_format = Unicode(
'html',
allow_none=False,
help="""The export format to be used, either one of the built-in formats
{formats}
or a dotted object name that represents the import path for an
`Exporter` class""".format(formats=get_export_names())
).tag(config=True)
notebooks = List([], help="""List of notebooks to convert.
Wildcards are supported.
Filenames passed positionally will be added to the list.
"""
).tag(config=True)
from_stdin = Bool(False, help="read a single notebook from stdin.").tag(config=True)
@catch_config_error
def initialize(self, argv=None):
"""Initialize application, notebooks, writer, and postprocessor"""
self.init_syspath()
super(NbConvertApp, self).initialize(argv)
self.init_notebooks()
self.init_writer()
self.init_postprocessor()
def init_syspath(self):
"""Add the cwd to the sys.path ($PYTHONPATH)"""
sys.path.insert(0, os.getcwd())
def init_notebooks(self):
"""Construct the list of notebooks.
If notebooks are passed on the command-line,
they override (rather than add) notebooks specified in config files.
Glob each notebook to replace notebook patterns with filenames.
"""
# Specifying notebooks on the command-line overrides (rather than
# adds) the notebook list
if self.extra_args:
patterns = self.extra_args
else:
patterns = self.notebooks
# Use glob to replace all the notebook patterns with filenames.
filenames = []
for pattern in patterns:
# Use glob to find matching filenames. Allow the user to convert
# notebooks without having to type the extension.
globbed_files = glob.glob(pattern)
globbed_files.extend(glob.glob(pattern + '.ipynb'))
if not globbed_files:
self.log.warning("pattern %r matched no files", pattern)
for filename in globbed_files:
if not filename in filenames:
filenames.append(filename)
self.notebooks = filenames
def init_writer(self):
"""Initialize the writer (which is stateless)"""
self._writer_class_changed({ 'new': self.writer_class })
self.writer = self.writer_factory(parent=self)
if hasattr(self.writer, 'build_directory') and self.writer.build_directory != '':
self.use_output_suffix = False
def init_postprocessor(self):
"""Initialize the postprocessor (which is stateless)"""
self._postprocessor_class_changed({'new': self.postprocessor_class})
if self.postprocessor_factory:
self.postprocessor = self.postprocessor_factory(parent=self)
def start(self):
"""Run start after initialization process has completed"""
super(NbConvertApp, self).start()
self.convert_notebooks()
def init_single_notebook_resources(self, notebook_filename):
"""Step 1: Initialize resources
This initializes the resources dictionary for a single notebook.
Returns
-------
dict
resources dictionary for a single notebook that MUST include the following keys:
- config_dir: the location of the Jupyter config directory
- unique_key: the notebook name
- output_files_dir: a directory where output files (not
including the notebook itself) should be saved
"""
basename = os.path.basename(notebook_filename)
notebook_name = basename[:basename.rfind('.')]
if self.output_base:
# strip duplicate extension from output_base, to avoid Basename.ext.ext
if getattr(self.exporter, 'file_extension', False):
base, ext = os.path.splitext(self.output_base)
if ext == self.exporter.file_extension:
self.output_base = base
notebook_name = self.output_base
self.log.debug("Notebook name is '%s'", notebook_name)
# first initialize the resources we want to use
resources = {}
resources['config_dir'] = self.config_dir
resources['unique_key'] = notebook_name
output_files_dir = (self.output_files_dir
.format(notebook_name=notebook_name))
resources['output_files_dir'] = output_files_dir
resources['ipywidgets_base_url'] = self.ipywidgets_base_url
return resources
def export_single_notebook(self, notebook_filename, resources, input_buffer=None):
"""Step 2: Export the notebook
Exports the notebook to a particular format according to the specified
exporter. This function returns the output and (possibly modified)
resources from the exporter.
Parameters
----------
notebook_filename : str
name of notebook file.
resources : dict
input_buffer :
readable file-like object returning unicode.
if not None, notebook_filename is ignored
Returns
-------
output
dict
resources (possibly modified)
"""
try:
if input_buffer is not None:
output, resources = self.exporter.from_file(input_buffer, resources=resources)
else:
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
except ConversionException:
self.log.error("Error while converting '%s'", notebook_filename, exc_info=True)
self.exit(1)
return output, resources
def write_single_notebook(self, output, resources):
"""Step 3: Write the notebook to file
This writes output from the exporter to file using the specified writer.
It returns the results from the writer.
Parameters
----------
output :
resources : dict
resources for a single notebook including name, config directory
and directory to save output
Returns
-------
file
results from the specified writer output of exporter
"""
if 'unique_key' not in resources:
raise KeyError("unique_key MUST be specified in the resources, but it is not")
notebook_name = resources['unique_key']
if self.use_output_suffix and not self.output_base:
notebook_name += resources.get('output_suffix', '')
write_results = self.writer.write(
output, resources, notebook_name=notebook_name)
return write_results
def postprocess_single_notebook(self, write_results):
"""Step 4: Post-process the written file
Only used if a postprocessor has been specified. After the
converted notebook is written to a file in Step 3, this post-processes
the notebook.
"""
# Post-process if post processor has been defined.
if hasattr(self, 'postprocessor') and self.postprocessor:
self.postprocessor(write_results)
def convert_single_notebook(self, notebook_filename, input_buffer=None):
"""Convert a single notebook.
Performs the following steps:
1. Initialize notebook resources
2. Export the notebook to a particular format
3. Write the exported notebook to file
4. (Maybe) postprocess the written file
Parameters
----------
notebook_filename : str
input_buffer :
If input_buffer is not None, conversion is done and the buffer is
used as source into a file basenamed by the notebook_filename
argument.
"""
if input_buffer is None:
self.log.info("Converting notebook %s to %s", notebook_filename, self.export_format)
else:
self.log.info("Converting notebook into %s", self.export_format)
resources = self.init_single_notebook_resources(notebook_filename)
output, resources = self.export_single_notebook(notebook_filename, resources, input_buffer=input_buffer)
write_results = self.write_single_notebook(output, resources)
self.postprocess_single_notebook(write_results)
def convert_notebooks(self):
"""Convert the notebooks in the self.notebook traitlet """
# check that the output base isn't specified if there is more than
# one notebook to convert
if self.output_base != '' and len(self.notebooks) > 1:
self.log.error(
"""
UsageError: --output flag or `NbConvertApp.output_base` config option
cannot be used when converting multiple notebooks.
"""
)
self.exit(1)
# initialize the exporter
cls = get_exporter(self.export_format)
self.exporter = cls(config=self.config)
# no notebooks to convert!
if len(self.notebooks) == 0 and not self.from_stdin:
self.print_help()
sys.exit(-1)
# convert each notebook
if not self.from_stdin:
for notebook_filename in self.notebooks:
self.convert_single_notebook(notebook_filename)
else:
input_buffer = unicode_stdin_stream()
# default name when conversion from stdin
self.convert_single_notebook("notebook.ipynb", input_buffer=input_buffer)
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
main = launch_new_instance = NbConvertApp.launch_instance
|
#!/usr/bin/env python3
from pathlib import Path
def get_file(fname):
return Path(__file__).resolve().parent / fname
|
def bytes_to_human(n):
symbols = ('KB', 'MB', 'GB', 'TB', 'PB', 'EB')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return '%sB' % n
|
import spartan
from spartan import expr, core
import numpy as np
from sys import stderr
def qr(Y):
''' Compute the thin qr factorization of a matrix.
Factor the matrix Y as QR, where Q is orthonormal and R is
upper-triangular.
Parameters
----------
Y: Spartan array of shape (M, K).
Notes
----------
Y'Y must fit in memory. Y is a Spartan array of shape (M, K).
Since this QR decomposition is mainly used in Stochastic SVD,
K will be the rank of the matrix of shape (M, N) and the assumption
is that the rank K should be far less than M or N.
Returns
-------
Q : Spartan array of shape (M, K).
R : Numpy array of shape (K, K).
'''
# Since the K should be far less than M. So the matrix multiplication
# should be the bottleneck instead of local cholesky decomposition and
# finding inverse of R. So we just parallelize the matrix mulitplication.
# If K is really large, we may consider using our Spartan cholesky
# decomposition, but for now, we use numpy version, it works fine.
# YTY = Y'Y. YTY has shape of (K, K).
YTY = expr.dot(expr.transpose(Y), Y).optimized().glom()
# Do cholesky decomposition and get R.
R = np.linalg.cholesky(YTY).T
# Find the inverse of R
inv_R = np.linalg.inv(R)
# Q = Y * inv(R)
Q = expr.dot(Y, inv_R).optimized().evaluate()
return Q, R
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Server.samba_base_folder'
db.add_column(u'servers_server', 'samba_base_folder',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Server.samba_base_folder'
db.delete_column(u'servers_server', 'samba_base_folder')
models = {
u'servers.server': {
'Meta': {'object_name': 'Server'},
'external_hostname_for_vms_creation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'external_interface': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'external_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_proxmox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_vm': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keymanger_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ngnix_server': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ngnixed_server_set'", 'null': 'True', 'to': u"orm['servers.Server']"}),
'proxmox_node_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'samba_base_folder': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'samba_management': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ssh_connection_string_from_backup': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ssh_connection_string_from_gestion': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vm_host': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'server_set'", 'null': 'True', 'to': u"orm['servers.Server']"})
},
u'servers.serveruser': {
'Meta': {'object_name': 'ServerUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['servers.Server']"})
},
u'servers.sshkey': {
'Meta': {'object_name': 'SshKey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['servers.Server']"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['servers']
|
# -*- coding: utf8 -*-
from ssh_config import ConfigParser
from exceptions import StormValueError
from operator import itemgetter
import getpass
__version__ = '0.5.2'
class Storm(object):
def __init__(self, ssh_config_file=None):
self.ssh_config = ConfigParser(ssh_config_file)
self.ssh_config.load()
def add_entry(self, name, host, user, port, id_file, custom_options=[]):
if self.is_host_in(name):
raise StormValueError('{0} is already in your sshconfig. use storm edit command to modify.'.format(name))
options = self.get_options(host, user, port, id_file, custom_options)
self.ssh_config.add_host(name, options)
self.ssh_config.write_to_ssh_config()
return True
def edit_entry(self, name, host, user, port, id_file, custom_options=[]):
if not self.is_host_in(name):
raise StormValueError('{0} doesn\'t exists in your sshconfig. use storm add command to add.'.format(name))
options = self.get_options(host, user, port, id_file, custom_options)
self.ssh_config.update_host(name, options)
self.ssh_config.write_to_ssh_config()
return True
def delete_entry(self, name):
self.ssh_config.delete_host(name)
self.ssh_config.write_to_ssh_config()
return True
def list_entries(self, order=False, only_servers=False):
config_data = self.ssh_config.config_data
# required for the web api.
if only_servers:
for index, value in enumerate(config_data):
print value
if value.get('type') and value.get("type") != 'entry':
del config_data[index]
if order:
config_data = sorted(config_data, key=itemgetter("host"))
return config_data
def delete_all_entries(self):
self.ssh_config.delete_all_hosts()
return True
def search_host(self, search_string):
results = self.ssh_config.search_host(search_string)
formatted_results = []
for host_entry in results:
formatted_results.append(" {0} -> {1}@{2}:{3}\n".format(
host_entry.get("host"),
host_entry.get("options").get("user", getpass.getuser()),
host_entry.get("options").get("hostname"),
host_entry.get("options").get("port", 22),
))
return formatted_results
def get_options(self, host, user, port, id_file, custom_options):
options = {
'hostname': host,
'user': user,
'port': port,
}
if id_file:
options.update({
'identityfile': id_file,
})
if len(custom_options) > 0:
for custom_option in custom_options:
if '=' in custom_option:
key, value = custom_option.split("=")[0:2]
options.update({
key: value,
})
return options
def is_host_in(self, host):
for host_ in self.ssh_config.config_data:
if host_.get("host") == host:
return True
return False
|
from __future__ import absolute_import
from __future__ import print_function
from functools import wraps
from django.core.cache import cache as djcache
from django.core.cache import caches
from django.conf import settings
from django.db.models import Q
from django.core.cache.backends.base import BaseCache
from typing import Any, Callable, Iterable, Optional, Union, TypeVar
from zerver.lib.utils import statsd, statsd_key, make_safe_digest
import subprocess
import time
import base64
import random
import sys
import os
import os.path
import hashlib
import six
from six import text_type
if False:
from zerver.models import UserProfile, Realm, Message
# These modules have to be imported for type annotations but
# they cannot be imported at runtime due to cyclic dependency.
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
remote_cache_time_start = 0.0
remote_cache_total_time = 0.0
remote_cache_total_requests = 0
def get_remote_cache_time():
# type: () -> float
return remote_cache_total_time
def get_remote_cache_requests():
# type: () -> int
return remote_cache_total_requests
def remote_cache_stats_start():
# type: () -> None
global remote_cache_time_start
remote_cache_time_start = time.time()
def remote_cache_stats_finish():
# type: () -> None
global remote_cache_total_time
global remote_cache_total_requests
global remote_cache_time_start
remote_cache_total_requests += 1
remote_cache_total_time += (time.time() - remote_cache_time_start)
def get_or_create_key_prefix():
# type: () -> text_type
if settings.TEST_SUITE:
# This sets the prefix mostly for the benefit of the JS tests.
# The Python tests overwrite KEY_PREFIX on each test.
return u'test_suite:%s:' % (text_type(os.getpid()),)
# directory `var` should exist in production
subprocess.check_call(["mkdir", "-p", os.path.join(settings.DEPLOY_ROOT, "var")])
filename = os.path.join(settings.DEPLOY_ROOT, "var", "remote_cache_prefix")
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o444)
random_hash = hashlib.sha256(text_type(random.getrandbits(256)).encode('utf-8')).digest()
prefix = base64.b16encode(random_hash)[:32].decode('utf-8').lower() + ':'
# This does close the underlying file
with os.fdopen(fd, 'w') as f:
f.write(prefix + "\n")
except OSError:
# The file already exists
tries = 1
while tries < 10:
with open(filename, 'r') as f:
prefix = f.readline()[:-1]
if len(prefix) == 33:
break
tries += 1
prefix = ''
time.sleep(0.5)
if not prefix:
print("Could not read remote cache key prefix file")
sys.exit(1)
return prefix
KEY_PREFIX = get_or_create_key_prefix() # type: text_type
def bounce_key_prefix_for_testing(test_name):
# type: (text_type) -> None
global KEY_PREFIX
KEY_PREFIX = test_name + u':' + text_type(os.getpid()) + u':'
def get_cache_backend(cache_name):
# type: (Optional[str]) -> BaseCache
if cache_name is None:
return djcache
return caches[cache_name]
def cache_with_key(keyfunc, cache_name=None, timeout=None, with_statsd_key=None):
# type: (Any, Optional[str], Optional[int], Optional[str]) -> Any
# This function can't be typed perfectly because returning a generic function
# isn't supported in mypy - https://github.com/python/mypy/issues/1551.
"""Decorator which applies Django caching to a function.
Decorator argument is a function which computes a cache key
from the original function's arguments. You are responsible
for avoiding collisions with other uses of this decorator or
other uses of caching."""
def decorator(func):
# type: (Callable[..., Any]) -> (Callable[..., Any])
@wraps(func)
def func_with_caching(*args, **kwargs):
# type: (*Any, **Any) -> Callable[..., Any]
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
extra = ""
if cache_name == 'database':
extra = ".dbcache"
if with_statsd_key is not None:
metric_key = with_statsd_key
else:
metric_key = statsd_key(key)
status = "hit" if val is not None else "miss"
statsd.incr("cache%s.%s.%s" % (extra, metric_key, status))
# Values are singleton tuples so that we can distinguish
# a result of None from a missing key.
if val is not None:
return val[0]
val = func(*args, **kwargs)
cache_set(key, val, cache_name=cache_name, timeout=timeout)
return val
return func_with_caching
return decorator
def cache_set(key, val, cache_name=None, timeout=None):
# type: (text_type, Any, Optional[str], Optional[int]) -> None
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
cache_backend.set(KEY_PREFIX + key, (val,), timeout=timeout)
remote_cache_stats_finish()
def cache_get(key, cache_name=None):
# type: (text_type, Optional[str]) -> Any
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
ret = cache_backend.get(KEY_PREFIX + key)
remote_cache_stats_finish()
return ret
def cache_get_many(keys, cache_name=None):
# type: (List[text_type], Optional[str]) -> Dict[text_type, Any]
keys = [KEY_PREFIX + key for key in keys]
remote_cache_stats_start()
ret = get_cache_backend(cache_name).get_many(keys)
remote_cache_stats_finish()
return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()])
def cache_set_many(items, cache_name=None, timeout=None):
# type: (Dict[text_type, Any], Optional[str], Optional[int]) -> None
new_items = {}
for key in items:
new_items[KEY_PREFIX + key] = items[key]
items = new_items
remote_cache_stats_start()
get_cache_backend(cache_name).set_many(items, timeout=timeout)
remote_cache_stats_finish()
def cache_delete(key, cache_name=None):
# type: (text_type, Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete(KEY_PREFIX + key)
remote_cache_stats_finish()
def cache_delete_many(items, cache_name=None):
# type: (Iterable[text_type], Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete_many(
KEY_PREFIX + item for item in items)
remote_cache_stats_finish()
# Required Arguments are as follows:
# * object_ids: The list of object ids to look up
# * cache_key_function: object_id => cache key
# * query_function: [object_ids] => [objects from database]
# Optional keyword arguments:
# * setter: Function to call before storing items to cache (e.g. compression)
# * extractor: Function to call on items returned from cache
# (e.g. decompression). Should be the inverse of the setter
# function.
# * id_fetcher: Function mapping an object from database => object_id
# (in case we're using a key more complex than obj.id)
# * cache_transformer: Function mapping an object from database =>
# value for cache (in case the values that we're caching are some
# function of the objects, not the objects themselves)
ObjKT = TypeVar('ObjKT', int, text_type)
ItemT = Any # https://github.com/python/mypy/issues/1721
CompressedItemT = Any # https://github.com/python/mypy/issues/1721
def generic_bulk_cached_fetch(cache_key_function, # type: Callable[[ObjKT], text_type]
query_function, # type: Callable[[List[ObjKT]], Iterable[Any]]
object_ids, # type: Iterable[ObjKT]
extractor=lambda obj: obj, # type: Callable[[CompressedItemT], ItemT]
setter=lambda obj: obj, # type: Callable[[ItemT], CompressedItemT]
id_fetcher=lambda obj: obj.id, # type: Callable[[Any], ObjKT]
cache_transformer=lambda obj: obj # type: Callable[[Any], ItemT]
):
# type: (...) -> Dict[ObjKT, Any]
cache_keys = {} # type: Dict[ObjKT, text_type]
for object_id in object_ids:
cache_keys[object_id] = cache_key_function(object_id)
cached_objects = cache_get_many([cache_keys[object_id]
for object_id in object_ids])
for (key, val) in cached_objects.items():
cached_objects[key] = extractor(cached_objects[key][0])
needed_ids = [object_id for object_id in object_ids if
cache_keys[object_id] not in cached_objects]
db_objects = query_function(needed_ids)
items_for_remote_cache = {} # type: Dict[text_type, Any]
for obj in db_objects:
key = cache_keys[id_fetcher(obj)]
item = cache_transformer(obj)
items_for_remote_cache[key] = (setter(item),)
cached_objects[key] = item
if len(items_for_remote_cache) > 0:
cache_set_many(items_for_remote_cache)
return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids
if cache_keys[object_id] in cached_objects)
def cache(func):
# type: (FuncT) -> FuncT
"""Decorator which applies Django caching to a function.
Uses a key based on the function's name, filename, and
the repr() of its arguments."""
func_uniqifier = '%s-%s' % (func.__code__.co_filename, func.__name__) # type: ignore # https://github.com/python/mypy/issues/1923
@wraps(func)
def keyfunc(*args, **kwargs):
# type: (*Any, **Any) -> str
# Django complains about spaces because memcached rejects them
key = func_uniqifier + repr((args, kwargs))
return key.replace('-', '--').replace(' ', '-s')
return cache_with_key(keyfunc)(func)
def display_recipient_cache_key(recipient_id):
# type: (int) -> text_type
return u"display_recipient_dict:%d" % (recipient_id,)
def user_profile_by_email_cache_key(email):
# type: (text_type) -> text_type
# See the comment in zerver/lib/avatar_hash.py:gravatar_hash for why we
# are proactively encoding email addresses even though they will
# with high likelihood be ASCII-only for the foreseeable future.
return u'user_profile_by_email:%s' % (make_safe_digest(email.strip()),)
def user_profile_by_id_cache_key(user_profile_id):
# type: (int) -> text_type
return u"user_profile_by_id:%s" % (user_profile_id,)
# TODO: Refactor these cache helpers into another file that can import
# models.py so that python3-style type annotations can also work.
def cache_save_user_profile(user_profile):
# type: (UserProfile) -> None
cache_set(user_profile_by_id_cache_key(user_profile.id), user_profile, timeout=3600*24*7)
active_user_dict_fields = ['id', 'full_name', 'short_name', 'email', 'is_realm_admin', 'is_bot'] # type: List[str]
def active_user_dicts_in_realm_cache_key(realm):
# type: (Realm) -> text_type
return u"active_user_dicts_in_realm:%s" % (realm.id,)
active_bot_dict_fields = ['id', 'full_name', 'short_name',
'email', 'default_sending_stream__name',
'default_events_register_stream__name',
'default_all_public_streams', 'api_key',
'bot_owner__email', 'avatar_source'] # type: List[str]
def active_bot_dicts_in_realm_cache_key(realm):
# type: (Realm) -> text_type
return u"active_bot_dicts_in_realm:%s" % (realm.id,)
def get_stream_cache_key(stream_name, realm):
# type: (text_type, Union[Realm, int]) -> text_type
from zerver.models import Realm
if isinstance(realm, Realm):
realm_id = realm.id
else:
realm_id = realm
return u"stream_by_realm_and_name:%s:%s" % (
realm_id, make_safe_digest(stream_name.strip().lower()))
def delete_user_profile_caches(user_profiles):
# type: (Iterable[UserProfile]) -> None
keys = []
for user_profile in user_profiles:
keys.append(user_profile_by_email_cache_key(user_profile.email))
keys.append(user_profile_by_id_cache_key(user_profile.id))
cache_delete_many(keys)
# Called by models.py to flush the user_profile cache whenever we save
# a user_profile object
def flush_user_profile(sender, **kwargs):
# type: (Any, **Any) -> None
user_profile = kwargs['instance']
delete_user_profile_caches([user_profile])
# Invalidate our active_users_in_realm info dict if any user has changed
# the fields in the dict or become (in)active
if kwargs.get('update_fields') is None or \
len(set(active_user_dict_fields + ['is_active']) & set(kwargs['update_fields'])) > 0:
cache_delete(active_user_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate our active_bots_in_realm info dict if any bot has
# changed the fields in the dict or become (in)active
if user_profile.is_bot and (kwargs['update_fields'] is None or
(set(active_bot_dict_fields + ['is_active']) &
set(kwargs['update_fields']))):
cache_delete(active_bot_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate realm-wide alert words cache if any user in the realm has changed
# alert words
if kwargs.get('update_fields') is None or "alert_words" in kwargs['update_fields']:
cache_delete(realm_alert_words_cache_key(user_profile.realm))
# Called by models.py to flush various caches whenever we save
# a Realm object. The main tricky thing here is that Realm info is
# generally cached indirectly through user_profile objects.
def flush_realm(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance']
users = realm.get_active_users()
delete_user_profile_caches(users)
if realm.deactivated:
cache_delete(active_user_dicts_in_realm_cache_key(realm))
cache_delete(active_bot_dicts_in_realm_cache_key(realm))
cache_delete(realm_alert_words_cache_key(realm))
def realm_alert_words_cache_key(realm):
# type: (Realm) -> text_type
return u"realm_alert_words:%s" % (realm.domain,)
# Called by models.py to flush the stream cache whenever we save a stream
# object.
def flush_stream(sender, **kwargs):
# type: (Any, **Any) -> None
from zerver.models import UserProfile
stream = kwargs['instance']
items_for_remote_cache = {}
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm)] = (stream,)
cache_set_many(items_for_remote_cache)
if kwargs.get('update_fields') is None or 'name' in kwargs['update_fields'] and \
UserProfile.objects.filter(
Q(default_sending_stream=stream) |
Q(default_events_register_stream=stream)
).exists():
cache_delete(active_bot_dicts_in_realm_cache_key(stream.realm))
# TODO: Rename to_dict_cache_key_id and to_dict_cache_key
def to_dict_cache_key_id(message_id, apply_markdown):
# type: (int, bool) -> text_type
return u'message_dict:%d:%d' % (message_id, apply_markdown)
def to_dict_cache_key(message, apply_markdown):
# type: (Message, bool) -> text_type
return to_dict_cache_key_id(message.id, apply_markdown)
def flush_message(sender, **kwargs):
# type: (Any, **Any) -> None
message = kwargs['instance']
cache_delete(to_dict_cache_key(message, False))
cache_delete(to_dict_cache_key(message, True))
|
#! /usr/bin/env python3
from SWEET import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
from mule.plotting.Plotting import *
sys.path.append('../')
import pretty_plotting as pp
sys.path.pop()
#
# Load data
#
j = JobsData('job_bench_*', verbosity=0)
#
# Create groups
#
groups = ['runtime.timestepping_method']
c = JobsDataConsolidate(j)
job_groups = c.create_groups(groups)
print("Groups:")
for key, g in job_groups.items():
print(key)
tagname_x = 'parallelization.num_threads_per_rank'
tagname_y = 'output.simulation_benchmark_timings.main_timestepping'
#
# Make ready for plotting
#
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y
)
data_plotting = d.get_data_float()
# Make pretty
for key, data in data_plotting.items():
data['label'] = pp.get_pretty_name(key)
#
# Plot!
#
p = Plotting_ScatteredData()
p.plot(
data_plotting = data_plotting,
xlabel = "Number of threads",
ylabel = "Wallclock time (seconds)",
title = "Wallclock time",
outfile = "output_threads_vs_wallclock_time.pdf"
)
#
# Scalability
#
for key, values in data_plotting.items():
label = key
x_values = values['x_values']
y_values = values['y_values']
# Basis for scalability (number of cores)
basis_scalability = 1.0
# Get index of x value for scalability
i = x_values.index(basis_scalability)
if i == None:
raise Exception("Scalability basis not found")
# Convert to scalability
values['y_values'] = [y_values[i]/y for y in y_values]
p.plot(
data_plotting,
xlabel="Number of threads",
ylabel="Scalability",
title = "Scalability",
outfile="output_threads_vs_scalability.pdf"
)
|
import setuptools
import re
with open("README.md", "r") as fh:
long_description = fh.read()
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('mrtopo/__main__.py').read(),
re.M
).group(1)
setuptools.setup(
name='mrtopo',
version=version,
packages=setuptools.find_packages(),
url='https://github.com/FaizChishtie/mrtopo',
license='MIT',
author='faizchishtie',
author_email='faizchishtie@gmail.com',
description='Mutate Mininet topology files with MrTopo',
python_requires='>=3.0',
entry_points={'console_scripts': ['mrtopo = mrtopo.cli:cli']},
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[
'mininet', 'click'
],
keywords='topology network startup',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Utilities',
'Typing :: Typed',
],
)
|
from setuptools import setup
def get_version(filename):
"""
Parse the value of the __version__ var from a Python source file
without running/importing the file.
"""
import re
version_pattern = r"^ *__version__ *= *['\"](\d+\.\d+\.\d+)['\"] *$"
match = re.search(version_pattern, open(filename).read(), re.MULTILINE)
assert match, ("No version found in file: {!r} matching pattern: {!r}"
.format(filename, version_pattern))
return match.group(1)
setup(
name="jsonlogging",
description="jsonlogging provides structured log output from the "
"logging module in JSON format",
author="Hal Blackburn",
author_email="hwtb2@cam.ac.uk",
url="https://github.com/ucamhal/ravenpy",
version=get_version("jsonlogging/__init__.py"),
packages=["jsonlogging"],
license="BSD",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: System :: Logging"
],
long_description=open("README.md").read(),
test_suite="jsonlogging.tests.test_all",
tests_require="mock >= 1.0.0, < 2.0.0"
)
|
"""Plotting module for SymPy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from SymPy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Do not use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from collections.abc import Callable
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import arity, Function
from sympy.core.symbol import (Dummy, Symbol)
from sympy.core.sympify import sympify
from sympy.external import import_module
from sympy.printing.latex import latex
from sympy.utilities.exceptions import sympy_deprecation_warning
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/testing/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
"""
Disable show(). For use in the tests.
"""
global _show
_show = False
def _str_or_latex(label):
if isinstance(label, Basic):
return latex(label, mode='inline')
return str(label)
##############################################################################
# The public interface
##############################################################################
class Plot:
"""The central class of the plotting module.
Explanation
===========
For interactive work the function ``plot`` is better suited.
This class permits the plotting of SymPy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of SymPy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str or Symbol
- ylabel : str or Symbol
- zlabel : str or Symbol
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
- backend : {'default', 'matplotlib', 'text'} or a subclass of BaseBackend
- size : optional tuple of two floats, (width, height); default: None
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : string, or float, or function, optional
Specifies the color for the plot, which depends on the backend being
used.
For example, if ``MatplotlibBackend`` is being used, then
Matplotlib string colors are acceptable ("red", "r", "cyan", "c", ...).
Alternatively, we can use a float number `0 < color < 1` wrapped in a
string (for example, `line_color="0.5"`) to specify grayscale colors.
Alternatively, We can specify a function returning a single
float value: this will be used to apply a color-loop (for example,
`line_color=lambda x: math.cos(x)`).
Note that by setting line_color, it would be applied simultaneously
to all the series.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args,
title=None, xlabel=None, ylabel=None, zlabel=None, aspect_ratio='auto',
xlim=None, ylim=None, axis_center='auto', axis=True,
xscale='linear', yscale='linear', legend=False, autoscale=True,
margin=0, annotations=None, markers=None, rectangles=None,
fill=None, backend='default', size=None, **kwargs):
super().__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
self.zlabel = zlabel
self.aspect_ratio = aspect_ratio
self.axis_center = axis_center
self.axis = axis
self.xscale = xscale
self.yscale = yscale
self.legend = legend
self.autoscale = autoscale
self.margin = margin
self.annotations = annotations
self.markers = markers
self.rectangles = rectangles
self.fill = fill
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
if isinstance(backend, str):
self.backend = plot_backends[backend]
elif (type(backend) == type) and issubclass(backend, BaseBackend):
self.backend = backend
else:
raise TypeError(
"backend must be either a string or a subclass of BaseBackend")
is_real = \
lambda lim: all(getattr(i, 'is_real', True) for i in lim)
is_finite = \
lambda lim: all(getattr(i, 'is_finite', True) for i in lim)
# reduce code repetition
def check_and_set(t_name, t):
if t:
if not is_real(t):
raise ValueError(
"All numbers from {}={} must be real".format(t_name, t))
if not is_finite(t):
raise ValueError(
"All numbers from {}={} must be finite".format(t_name, t))
setattr(self, t_name, (float(t[0]), float(t[1])))
self.xlim = None
check_and_set("xlim", xlim)
self.ylim = None
check_and_set("ylim", ylim)
self.size = None
check_and_set("size", size)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
.. plot::
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x, show=False)
>>> p2 = plot(x, show=False)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
>>> p1.show()
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
.. plot::
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x**2, show=False)
>>> p2 = plot(x, -x, show=False)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
[2]: cartesian line: -x for x over (-10.0, 10.0)
>>> p1.show()
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
class PlotGrid:
"""This class helps to plot subplots from already created SymPy plots
in a single figure.
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot, plot3d, PlotGrid
>>> x, y = symbols('x, y')
>>> p1 = plot(x, x**2, x**3, (x, -5, 5))
>>> p2 = plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
>>> p3 = plot(x**3, (x, -5, 5))
>>> p4 = plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plotting vertically in a single line:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> PlotGrid(2, 1, p1, p2)
PlotGrid object containing:
Plot[0]:Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[1]:Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
Plotting horizontally in a single line:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> PlotGrid(1, 3, p2, p3, p4)
PlotGrid object containing:
Plot[0]:Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
Plot[1]:Plot object containing:
[0]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[2]:Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Plotting in a grid form:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> PlotGrid(2, 2, p1, p2, p3, p4)
PlotGrid object containing:
Plot[0]:Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[1]:Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
Plot[2]:Plot object containing:
[0]: cartesian line: x**3 for x over (-5.0, 5.0)
Plot[3]:Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
"""
def __init__(self, nrows, ncolumns, *args, show=True, size=None, **kwargs):
"""
Parameters
==========
nrows :
The number of rows that should be in the grid of the
required subplot.
ncolumns :
The number of columns that should be in the grid
of the required subplot.
nrows and ncolumns together define the required grid.
Arguments
=========
A list of predefined plot objects entered in a row-wise sequence
i.e. plot objects which are to be in the top row of the required
grid are written first, then the second row objects and so on
Keyword arguments
=================
show : Boolean
The default value is set to ``True``. Set show to ``False`` and
the function will not display the subplot. The returned instance
of the ``PlotGrid`` class can then be used to save or display the
plot by calling the ``save()`` and ``show()`` methods
respectively.
size : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
"""
self.nrows = nrows
self.ncolumns = ncolumns
self._series = []
self.args = args
for arg in args:
self._series.append(arg._series)
self.backend = DefaultBackend
self.size = size
if show:
self.show()
def show(self):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
plot_strs = [('Plot[%d]:' % i) + str(plot)
for i, plot in enumerate(self.args)]
return 'PlotGrid object containing:\n' + '\n'.join(plot_strs)
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries:
"""Base class for the data objects containing stuff to be plotted.
Explanation
===========
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
# Different from is_contour as the colormap in backend will be
# different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super().__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super().__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_data(self):
""" Return lists of coordinates for plotting the line.
Returns
=======
x: list
List of x-coordinates
y: list
List of y-coordinates
y: list
List of z-coordinates in case of Parametric3DLineSeries
"""
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
if len(points) == 2:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
else:
x = np.repeat(points[0], 3)[2:]
y = np.repeat(points[1], 3)[:-2]
z = np.repeat(points[2], 3)[1:-1]
points = (x, y, z)
return points
def get_segments(self):
sympy_deprecation_warning(
"""
The Line2DBaseSeries.get_segments() method is deprecated.
Instead, use the MatplotlibBackend.get_segments() method, or use
The get_points() or get_data() methods.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-get-segments")
np = import_module('numpy')
points = type(self).get_data(self)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
nargs = arity(c)
if nargs == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super().__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super().__init__()
self.expr = sympify(expr)
self.label = kwargs.get('label', None) or self.expr
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
self.xscale = kwargs.get('xscale', 'linear')
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_points(self):
""" Return lists of coordinates for plotting. Depending on the
`adaptive` option, this function will either use an adaptive algorithm
or it will uniformly sample the expression over the provided range.
Returns
=======
x: list
List of x-coordinates
y: list
List of y-coordinates
Explanation
===========
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return self._uniform_sampling()
else:
f = lambdify([self.var], self.expr)
x_coords = []
y_coords = []
np = import_module('numpy')
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
# Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
if self.xscale == 'log':
xnew = 10**(np.log10(p[0]) + random * (np.log10(q[0]) -
np.log10(p[0])))
else:
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > self.depth:
x_coords.append(q[0])
y_coords.append(q[1])
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif p[1] is None and q[1] is None:
if self.xscale == 'log':
xarray = np.logspace(p[0], q[0], 10)
else:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if not all(y is None for y in yarray):
for i in range(len(yarray) - 1):
if not (yarray[i] is None and yarray[i + 1] is None):
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
# Sample further if one of the end points in None (i.e. a
# complex value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
x_coords.append(q[0])
y_coords.append(q[1])
f_start = f(self.start)
f_end = f(self.end)
x_coords.append(self.start)
y_coords.append(f_start)
sample(np.array([self.start, f_start]),
np.array([self.end, f_end]), 0)
return (x_coords, y_coords)
def _uniform_sampling(self):
np = import_module('numpy')
if self.only_integers is True:
if self.xscale == 'log':
list_x = np.logspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
if self.xscale == 'log':
list_x = np.logspace(self.start, self.end, num=self.nb_of_points)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric SymPy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super().__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = kwargs.get('label', None) or \
Tuple(self.expr_x, self.expr_y)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def _uniform_sampling(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_points(self):
""" Return lists of coordinates for plotting. Depending on the
`adaptive` option, this function will either use an adaptive algorithm
or it will uniformly sample the expression over the provided range.
Returns
=======
x: list
List of x-coordinates
y: list
List of y-coordinates
Explanation
===========
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return self._uniform_sampling()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
x_coords = []
y_coords = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
# Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > self.depth:
x_coords.append(q[0])
y_coords.append(q[1])
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if not all(x is None and y is None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
# Sample further if one of the end points in None (i.e. a complex
# value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
x_coords.append(q[0])
y_coords.append(q[1])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
x_coords.append(f_start_x)
y_coords.append(f_start_y)
sample(self.start, self.end, start, end, 0)
return x_coords, y_coords
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super().__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of three parametric SymPy
expressions and a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super().__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = kwargs.get('label', None) or \
Tuple(self.expr_x, self.expr_y)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
self._xlim = None
self._ylim = None
self._zlim = None
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
np = import_module('numpy')
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
list_x = np.array(list_x, dtype=np.float64)
list_y = np.array(list_y, dtype=np.float64)
list_z = np.array(list_z, dtype=np.float64)
list_x = np.ma.masked_invalid(list_x)
list_y = np.ma.masked_invalid(list_y)
list_z = np.ma.masked_invalid(list_z)
self._xlim = (np.amin(list_x), np.amax(list_x))
self._ylim = (np.amin(list_y), np.amax(list_y))
self._zlim = (np.amin(list_z), np.amax(list_z))
return list_x, list_y, list_z
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super().__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
nargs = arity(c)
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
if isinstance(self, SurfaceOver2DRangeSeries):
return c*np.ones(min(self.nb_of_points_x, self.nb_of_points_y))
else:
return c*np.ones(min(self.nb_of_points_u, self.nb_of_points_v))
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a SymPy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super().__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
self._xlim = (self.start_x, self.end_x)
self._ylim = (self.start_y, self.end_y)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
mesh_z = f(mesh_x, mesh_y)
mesh_z = np.array(mesh_z, dtype=np.float64)
mesh_z = np.ma.masked_invalid(mesh_z)
self._zlim = (np.amin(mesh_z), np.amax(mesh_z))
return mesh_x, mesh_y, mesh_z
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric SymPy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super().__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
np = import_module('numpy')
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
mesh_x = fx(mesh_u, mesh_v)
mesh_y = fy(mesh_u, mesh_v)
mesh_z = fz(mesh_u, mesh_v)
mesh_x = np.array(mesh_x, dtype=np.float64)
mesh_y = np.array(mesh_y, dtype=np.float64)
mesh_z = np.array(mesh_z, dtype=np.float64)
mesh_x = np.ma.masked_invalid(mesh_x)
mesh_y = np.ma.masked_invalid(mesh_y)
mesh_z = np.ma.masked_invalid(mesh_z)
self._xlim = (np.amin(mesh_x), np.amax(mesh_x))
self._ylim = (np.amin(mesh_y), np.amax(mesh_y))
self._zlim = (np.amin(mesh_z), np.amax(mesh_z))
return mesh_x, mesh_y, mesh_z
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
# The code is mostly repetition of SurfaceOver2DRange.
# Presently used in contour_plot function
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super().__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
self._xlim = (self.start_x, self.end_x)
self._ylim = (self.start_y, self.end_y)
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend:
"""Base class for all backends. A backend represents the plotting library,
which implements the necessary functionalities in order to use SymPy
plotting functions.
How the plotting module works:
1. Whenever a plotting function is called, the provided expressions are
processed and a list of instances of the `BaseSeries` class is created,
containing the necessary information to plot the expressions (eg the
expression, ranges, series name, ...). Eventually, these objects will
generate the numerical data to be plotted.
2. A Plot object is instantiated, which stores the list of series and the
main attributes of the plot (eg axis labels, title, ...).
3. When the "show" command is executed, a new backend is instantiated,
which loops through each series object to generate and plot the
numerical data. The backend is also going to set the axis labels, title,
..., according to the values stored in the Plot instance.
The backend should check if it supports the data series that it's given
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of data series
that it's given. Note that the current implementation of the `*Series`
classes is "matplotlib-centric": the numerical data returned by the
`get_points` and `get_meshes` methods is meant to be used directly by
Matplotlib. Therefore, the new backend will have to pre-process the
numerical data to make it compatible with the chosen plotting library.
Keep in mind that future SymPy versions may improve the `*Series` classes in
order to return numerical data "non-matplotlib-centric", hence if you code
a new backend you have the responsibility to check if its working on each
SymPy release.
Please, explore the `MatplotlibBackend` source code to understand how a
backend should be coded.
Methods
=======
In order to be used by SymPy plotting functions, a backend must implement
the following methods:
* `show(self)`: used to loop over the data series, generate the numerical
data, plot it and set the axis labels, title, ...
* save(self, path): used to save the current plot to the specified file
path.
* close(self): used to close the current plot backend (note: some plotting
library does not support this functionality. In that case, just raise a
warning).
See also
========
MatplotlibBackend
"""
def __init__(self, parent):
super().__init__()
self.parent = parent
def show(self):
raise NotImplementedError
def save(self, path):
raise NotImplementedError
def close(self):
raise NotImplementedError
# Don't have to check for the success of importing matplotlib in each case;
# we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
""" This class implements the functionalities to use Matplotlib with SymPy
plotting functions.
"""
def __init__(self, parent):
super().__init__(parent)
self.matplotlib = import_module('matplotlib',
import_kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
aspect = getattr(self.parent, 'aspect_ratio', 'auto')
if aspect != 'auto':
aspect = float(aspect[1]) / aspect[0]
if isinstance(self.parent, Plot):
nrows, ncolumns = 1, 1
series_list = [self.parent._series]
elif isinstance(self.parent, PlotGrid):
nrows, ncolumns = self.parent.nrows, self.parent.ncolumns
series_list = self.parent._series
self.ax = []
self.fig = self.plt.figure(figsize=parent.size)
for i, series in enumerate(series_list):
are_3D = [s.is_3D for s in series]
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend cannot mix 2D and 3D.')
elif all(are_3D):
# mpl_toolkits.mplot3d is necessary for
# projection='3d'
mpl_toolkits = import_module('mpl_toolkits', # noqa
import_kwargs={'fromlist': ['mplot3d']})
self.ax.append(self.fig.add_subplot(nrows, ncolumns, i + 1, projection='3d', aspect=aspect))
elif not any(are_3D):
self.ax.append(self.fig.add_subplot(nrows, ncolumns, i + 1, aspect=aspect))
self.ax[i].spines['left'].set_position('zero')
self.ax[i].spines['right'].set_color('none')
self.ax[i].spines['bottom'].set_position('zero')
self.ax[i].spines['top'].set_color('none')
self.ax[i].xaxis.set_ticks_position('bottom')
self.ax[i].yaxis.set_ticks_position('left')
@staticmethod
def get_segments(x, y, z=None):
""" Convert two list of coordinates to a list of segments to be used
with Matplotlib's LineCollection.
Parameters
==========
x: list
List of x-coordinates
y: list
List of y-coordinates
z: list
List of z-coordinates for a 3D line.
"""
np = import_module('numpy')
if z is not None:
dim = 3
points = (x, y, z)
else:
dim = 2
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def _process_series(self, series, ax, parent):
np = import_module('numpy')
mpl_toolkits = import_module(
'mpl_toolkits', import_kwargs={'fromlist': ['mplot3d']})
# XXX Workaround for matplotlib issue
# https://github.com/matplotlib/matplotlib/issues/17130
xlims, ylims, zlims = [], [], []
for s in series:
# Create the collections
if s.is_2Dline:
x, y = s.get_data()
if (isinstance(s.line_color, (int, float)) or
callable(s.line_color)):
segments = self.get_segments(x, y)
collection = self.LineCollection(segments)
collection.set_array(s.get_color_array())
ax.add_collection(collection)
else:
lbl = _str_or_latex(s.label)
line, = ax.plot(x, y, label=lbl, color=s.line_color)
elif s.is_contour:
ax.contour(*s.get_meshes())
elif s.is_3Dline:
x, y, z = s.get_data()
if (isinstance(s.line_color, (int, float)) or
callable(s.line_color)):
art3d = mpl_toolkits.mplot3d.art3d
segments = self.get_segments(x, y, z)
collection = art3d.Line3DCollection(segments)
collection.set_array(s.get_color_array())
ax.add_collection(collection)
else:
lbl = _str_or_latex(s.label)
ax.plot(x, y, z, label=lbl, color=s.line_color)
xlims.append(s._xlim)
ylims.append(s._ylim)
zlims.append(s._zlim)
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = ax.plot_surface(x, y, z,
cmap=getattr(self.cm, 'viridis', self.cm.jet),
rstride=1, cstride=1, linewidth=0.1)
if isinstance(s.surface_color, (float, int, Callable)):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
xlims.append(s._xlim)
ylims.append(s._ylim)
zlims.append(s._zlim)
elif s.is_implicit:
points = s.get_raster()
if len(points) == 2:
# interval math plotting
x, y = _matplotlib_list(points[0])
ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
# XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
ax.contour(xarray, yarray, zarray, cmap=colormap,
label=_str_or_latex(s.label))
else:
ax.contourf(xarray, yarray, zarray, cmap=colormap,
label=_str_or_latex(s.label))
else:
raise NotImplementedError(
'{} is not supported in the SymPy plotting module '
'with matplotlib backend. Please report this issue.'
.format(ax))
Axes3D = mpl_toolkits.mplot3d.Axes3D
if not isinstance(ax, Axes3D):
ax.autoscale_view(
scalex=ax.get_autoscalex_on(),
scaley=ax.get_autoscaley_on())
else:
# XXX Workaround for matplotlib issue
# https://github.com/matplotlib/matplotlib/issues/17130
if xlims:
xlims = np.array(xlims)
xlim = (np.amin(xlims[:, 0]), np.amax(xlims[:, 1]))
ax.set_xlim(xlim)
else:
ax.set_xlim([0, 1])
if ylims:
ylims = np.array(ylims)
ylim = (np.amin(ylims[:, 0]), np.amax(ylims[:, 1]))
ax.set_ylim(ylim)
else:
ax.set_ylim([0, 1])
if zlims:
zlims = np.array(zlims)
zlim = (np.amin(zlims[:, 0]), np.amax(zlims[:, 1]))
ax.set_zlim(zlim)
else:
ax.set_zlim([0, 1])
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
if parent.xscale and not isinstance(ax, Axes3D):
ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(ax, Axes3D):
ax.set_yscale(parent.yscale)
if not isinstance(ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(ax, Axes3D):
pass
elif val == 'center':
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = ax.get_xlim()
yl, yh = ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
ax.spines['left'].set_position(pos_left)
ax.spines['bottom'].set_position(pos_bottom)
else:
ax.spines['left'].set_position(('data', val[0]))
ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
ax.set_axis_off()
if parent.legend:
if ax.legend():
ax.legend_.set_visible(parent.legend)
if parent.margin:
ax.set_xmargin(parent.margin)
ax.set_ymargin(parent.margin)
if parent.title:
ax.set_title(parent.title)
if parent.xlabel:
xlbl = _str_or_latex(parent.xlabel)
ax.set_xlabel(xlbl, position=(1, 0))
if parent.ylabel:
ylbl = _str_or_latex(parent.ylabel)
ax.set_ylabel(ylbl, position=(0, 1))
if isinstance(ax, Axes3D) and parent.zlabel:
zlbl = _str_or_latex(parent.zlabel)
ax.set_zlabel(zlbl, position=(0, 1))
if parent.annotations:
for a in parent.annotations:
ax.annotate(**a)
if parent.markers:
for marker in parent.markers:
# make a copy of the marker dictionary
# so that it doesn't get altered
m = marker.copy()
args = m.pop('args')
ax.plot(*args, **m)
if parent.rectangles:
for r in parent.rectangles:
rect = self.matplotlib.patches.Rectangle(**r)
ax.add_patch(rect)
if parent.fill:
ax.fill_between(**parent.fill)
# xlim and ylim shoulld always be set at last so that plot limits
# doesn't get altered during the process.
if parent.xlim:
ax.set_xlim(parent.xlim)
if parent.ylim:
ax.set_ylim(parent.ylim)
def process_series(self):
"""
Iterates over every ``Plot`` object and further calls
_process_series()
"""
parent = self.parent
if isinstance(parent, Plot):
series_list = [parent._series]
else:
series_list = parent._series
for i, (series, ax) in enumerate(zip(series_list, self.ax)):
if isinstance(self.parent, PlotGrid):
parent = self.parent.args[i]
self._process_series(series, ax, parent)
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.fig.tight_layout()
self.plt.show()
else:
self.close()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super().__init__(parent)
def show(self):
if not _show:
return
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.mean(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.mean(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1:],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float64)
vector_b = (z - y).astype(np.float64)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend((None, None, None, None))
ylist.extend((None, None, None, None))
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
def plot(*args, show=True, **kwargs):
"""Plots a function of a single variable as a curve.
Parameters
==========
args :
The first argument is the expression representing the function
of single variable to be plotted.
The last argument is a 3-tuple denoting the range of the free
variable. e.g. ``(x, 0, 5)``
Typical usage examples are in the followings:
- Plotting a single expression with a single range.
``plot(expr, range, **kwargs)``
- Plotting a single expression with the default range (-10, 10).
``plot(expr, **kwargs)``
- Plotting multiple expressions with a single range.
``plot(expr1, expr2, ..., range, **kwargs)``
- Plotting multiple expressions with multiple ranges.
``plot((expr1, range1), (expr2, range2), ..., **kwargs)``
It is best practice to specify range explicitly because default
range may change in the future if a more advanced default range
detection algorithm is implemented.
show : bool, optional
The default value is set to ``True``. Set show to ``False`` and
the function will not display the plot. The returned instance of
the ``Plot`` class can then be used to save or display the plot
by calling the ``save()`` and ``show()`` methods respectively.
line_color : string, or float, or function, optional
Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
Note that by setting ``line_color``, it would be applied simultaneously
to all the series.
title : str, optional
Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
label : str, optional
The label of the expression in the plot. It will be used when
called with ``legend``. Default is the name of the expression.
e.g. ``sin(x)``
xlabel : str or expression, optional
Label for the x-axis.
ylabel : str or expression, optional
Label for the y-axis.
xscale : 'linear' or 'log', optional
Sets the scaling of the x-axis.
yscale : 'linear' or 'log', optional
Sets the scaling of the y-axis.
axis_center : (float, float), optional
Tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
xlim : (float, float), optional
Denotes the x-axis limits, ``(min, max)```.
ylim : (float, float), optional
Denotes the y-axis limits, ``(min, max)```.
annotations : list, optional
A list of dictionaries specifying the type of annotation
required. The keys in the dictionary should be equivalent
to the arguments of the matplotlib's annotate() function.
markers : list, optional
A list of dictionaries specifying the type the markers required.
The keys in the dictionary should be equivalent to the arguments
of the matplotlib's plot() function along with the marker
related keyworded arguments.
rectangles : list, optional
A list of dictionaries specifying the dimensions of the
rectangles to be plotted. The keys in the dictionary should be
equivalent to the arguments of the matplotlib's
patches.Rectangle class.
fill : dict, optional
A dictionary specifying the type of color filling required in
the plot. The keys in the dictionary should be equivalent to the
arguments of the matplotlib's fill_between() function.
adaptive : bool, optional
The default value is set to ``True``. Set adaptive to ``False``
and specify ``nb_of_points`` if uniform sampling is required.
The plotting uses an adaptive algorithm which samples
recursively to accurately plot. The adaptive algorithm uses a
random point near the midpoint of two points that has to be
further sampled. Hence the same plots can appear slightly
different.
depth : int, optional
Recursion depth of the adaptive algorithm. A depth of value
``n`` samples a maximum of `2^{n}` points.
If the ``adaptive`` flag is set to ``False``, this will be
ignored.
nb_of_points : int, optional
Used when the ``adaptive`` is set to ``False``. The function
is uniformly sampled at ``nb_of_points`` number of points.
If the ``adaptive`` flag is set to ``True``, this will be
ignored.
size : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x)
kwargs.setdefault('ylabel', Function('f')(x))
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot_parametric(*args, show=True, **kwargs):
"""
Plots a 2D parametric curve.
Parameters
==========
args
Common specifications are:
- Plotting a single parametric curve with a range
``plot_parametric((expr_x, expr_y), range)``
- Plotting multiple parametric curves with the same range
``plot_parametric((expr_x, expr_y), ..., range)``
- Plotting multiple parametric curves with different ranges
``plot_parametric((expr_x, expr_y, range), ...)``
``expr_x`` is the expression representing $x$ component of the
parametric function.
``expr_y`` is the expression representing $y$ component of the
parametric function.
``range`` is a 3-tuple denoting the parameter symbol, start and
stop. For example, ``(u, 0, 5)``.
If the range is not specified, then a default range of (-10, 10)
is used.
However, if the arguments are specified as
``(expr_x, expr_y, range), ...``, you must specify the ranges
for each expressions manually.
Default range may change in the future if a more advanced
algorithm is implemented.
adaptive : bool, optional
Specifies whether to use the adaptive sampling or not.
The default value is set to ``True``. Set adaptive to ``False``
and specify ``nb_of_points`` if uniform sampling is required.
depth : int, optional
The recursion depth of the adaptive algorithm. A depth of
value $n$ samples a maximum of $2^n$ points.
nb_of_points : int, optional
Used when the ``adaptive`` flag is set to ``False``.
Specifies the number of the points used for the uniform
sampling.
line_color : string, or float, or function, optional
Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
Note that by setting ``line_color``, it would be applied simultaneously
to all the series.
label : str, optional
The label of the expression in the plot. It will be used when
called with ``legend``. Default is the name of the expression.
e.g. ``sin(x)``
xlabel : str, optional
Label for the x-axis.
ylabel : str, optional
Label for the y-axis.
xscale : 'linear' or 'log', optional
Sets the scaling of the x-axis.
yscale : 'linear' or 'log', optional
Sets the scaling of the y-axis.
axis_center : (float, float), optional
Tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
xlim : (float, float), optional
Denotes the x-axis limits, ``(min, max)```.
ylim : (float, float), optional
Denotes the y-axis limits, ``(min, max)```.
size : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import plot_parametric, symbols, cos, sin
>>> u = symbols('u')
A parametric plot with a single expression:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot_parametric((cos(u), sin(u)), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
A parametric plot with multiple expressions with the same range:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)), (u, -10, 10))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
A parametric plot with multiple expressions with different ranges
for each curve:
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
Notes
=====
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the curve. The adaptive algorithm uses a random point
near the midpoint of two points that has to be further sampled.
Hence, repeating the same plot command can give slightly different
results because of the random sampling.
If there are multiple plots, then the same optional arguments are
applied to all the plots drawn in the same canvas. If you want to
set these options separately, you can index the returned ``Plot``
object and set it.
For example, when you specify ``line_color`` once, it would be
applied simultaneously to both series.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy import pi
>>> expr1 = (u, cos(2*pi*u)/2 + 1/2)
>>> expr2 = (u, sin(2*pi*u)/2 + 1/2)
>>> p = plot_parametric(expr1, expr2, (u, 0, 1), line_color='blue')
If you want to specify the line color for the specific series, you
should index each item and apply the property manually.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> p[0].line_color = 'red'
>>> p.show()
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d_parametric_line(*args, show=True, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: string, or float, or function, optional
Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
Note that by setting ``line_color``, it would be applied simultaneously
to all the series.
``label``: str
The label to the plot. It will be used when called with ``legend=True``
to denote the function with the given label in the plot.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
``size`` : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
kwargs.setdefault("xlabel", "x")
kwargs.setdefault("ylabel", "y")
kwargs.setdefault("zlabel", "z")
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d(*args, show=True, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
``size`` : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of the
overall figure. The default value is set to ``None``, meaning the size will
be set by the default backend.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
kwargs.setdefault("xlabel", series[0].var_x)
kwargs.setdefault("ylabel", series[0].var_y)
kwargs.setdefault("zlabel", Function('f')(series[0].var_x, series[0].var_y))
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot3d_parametric_surface(*args, show=True, **kwargs):
"""
Plots a 3D parametric surface plot.
Explanation
===========
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
``size`` : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of the
overall figure. The default value is set to ``None``, meaning the size will
be set by the default backend.
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
kwargs.setdefault("xlabel", "x")
kwargs.setdefault("ylabel", "y")
kwargs.setdefault("zlabel", "z")
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def plot_contour(*args, show=True, **kwargs):
"""
Draws contour plot of a function
Usage
=====
Single plot
``plot_contour(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot_contour(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_contour((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``ContourSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
``size`` : (float, float), optional
A tuple in the form (width, height) in inches to specify the size of
the overall figure. The default value is set to ``None``, meaning
the size will be set by the default backend.
See Also
========
Plot, ContourSeries
"""
args = list(map(sympify, args))
plot_expr = check_arguments(args, 1, 2)
series = [ContourSeries(*arg) for arg in plot_expr]
plot_contours = Plot(*series, **kwargs)
if len(plot_expr[0].free_symbols) > 2:
raise ValueError('Contour Plot cannot Plot for more than two variables.')
if show:
plot_contours.show()
return plot_contours
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges).
Examples
========
.. plot::
:context: reset
:format: doctest
:include-source: True
>>> from sympy import cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if not args:
return []
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
# Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(nb_of_free_symbols - len(free_symbols)):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
# Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
from collections.abc import Iterable
import warnings
import inspect
import numpy as np
import numpy as np
import numpy as np
from qutip.qip.circuit_latex import _latex_compile
from qutip.qip.gates import *
from qutip.qip.qubits import qubit_states
__all__ = ['Gate', 'QubitCircuit']
class Gate(object):
"""
Representation of a quantum gate, with its required parametrs, and target
and control qubits.
Parameters
----------
name : string
Gate name.
targets : list or int
Gate targets.
controls : list or int
Gate controls.
arg_value : float
Argument value(phi).
arg_label : string
Label for gate representation.
"""
def __init__(self, name, targets=None, controls=None, arg_value=None,
arg_label=None):
"""
Create a gate with specified parameters.
"""
self.name = name
self.targets = None
self.controls = None
if not isinstance(targets, Iterable) and targets is not None:
self.targets = [targets]
else:
self.targets = targets
if not isinstance(controls, Iterable) and controls is not None:
self.controls = [controls]
else:
self.controls = controls
for ind_list in [self.targets, self.controls]:
if isinstance(ind_list, Iterable):
all_integer = all(
[isinstance(ind, np.int) for ind in ind_list])
if not all_integer:
raise ValueError("Index of a qubit must be an integer")
if name in ["SWAP", "ISWAP", "SQRTISWAP", "SQRTSWAP", "BERKELEY",
"SWAPalpha"]:
if (self.targets is None) or (len(self.targets) != 2):
raise ValueError("Gate %s requires two targets" % name)
if self.controls is not None:
raise ValueError("Gate %s cannot have a control" % name)
elif name in ["CNOT", "CSIGN", "CRX", "CRY", "CRZ"]:
if self.targets is None or len(self.targets) != 1:
raise ValueError("Gate %s requires one target" % name)
if self.controls is None or len(self.controls) != 1:
raise ValueError("Gate %s requires one control" % name)
elif name in ["SNOT", "RX", "RY", "RZ", "PHASEGATE"]:
if self.controls is not None:
raise ValueError("Gate %s does not take controls" % name)
elif name in ["RX", "RY", "RZ", "CPHASE", "SWAPalpha", "PHASEGATE",
"GLOBALPHASE", "CRX", "CRY", "CRZ"]:
if arg_value is None:
raise ValueError("Gate %s requires an argument value" % name)
self.arg_value = arg_value
self.arg_label = arg_label
def __str__(self):
s = "Gate(%s, targets=%s, controls=%s)" % (self.name,
self.targets,
self.controls)
return s
def __repr__(self):
return str(self)
def _repr_latex_(self):
return str(self)
_gate_name_to_label = {
'RX': r'R_x',
'RY': r'R_y',
'RZ': r'R_z',
'CRX': r'R_x',
'CRY': r'R_y',
'CRZ': r'R_z',
'SQRTNOT': r'\sqrt{\rm NOT}',
'SNOT': r'{\rm H}',
'PHASEGATE': r'{\rm PHASE}',
'CPHASE': r'{\rm R}',
'CNOT': r'{\rm CNOT}',
'CSIGN': r'{\rm Z}',
'BERKELEY': r'{\rm BERKELEY}',
'SWAPalpha': r'{\rm SWAPalpha}',
'SWAP': r'{\rm SWAP}',
'ISWAP': r'{i}{\rm SWAP}',
'SQRTSWAP': r'\sqrt{\rm SWAP}',
'SQRTISWAP': r'\sqrt{{i}\rm SWAP}',
'FREDKIN': r'{\rm FREDKIN}',
'TOFFOLI': r'{\rm TOFFOLI}',
'GLOBALPHASE': r'{\rm Ph}',
}
def _gate_label(name, arg_label):
if name in _gate_name_to_label:
gate_label = _gate_name_to_label[name]
else:
warnings.warn("Unknown gate %s" % name)
gate_label = name
if arg_label:
return r'%s(%s)' % (gate_label, arg_label)
else:
return r'%s' % gate_label
class QubitCircuit(object):
"""
Representation of a quantum program/algorithm, maintaining a sequence
of gates.
Parameters
----------
N : int
Number of qubits in the system.
user_gates : dict
Define a dictionary of the custom gates. See examples for detail.
input_states : list
A list of string such as `0`,'+', "A", "Y". Only used for latex.
Examples
--------
>>> def user_gate():
... mat = np.array([[1., 0],
... [0., 1.j]])
... return Qobj(mat, dims=[[2], [2]])
>>> qc.QubitCircuit(2, user_gates={"T":user_gate})
>>> qc.add_gate("T", targets=[0])
"""
def __init__(self, N, input_states=None, output_states=None,
reverse_states=True, user_gates=None):
# number of qubits in the register
self.N = N
self.reverse_states = reverse_states
self.gates = []
self.U_list = []
self.input_states = [None for i in range(N)]
self.output_states = [None for i in range(N)]
if user_gates is None:
self.user_gates = {}
else:
if isinstance(user_gates, dict):
self.user_gates = user_gates
else:
raise ValueError(
"`user_gate` takes a python dictionary of the form"
"{{str: gate_function}}, not {}".format(user_gates))
def add_state(self, state, targets=None, state_type="input"):
"""
Add an input or ouput state to the circuit. By default all the input
and output states will be initialized to `None`. A particular state can
be added by specifying the state and the qubit where it has to be added
along with the type as input or output.
Parameters
----------
state: str
The state that has to be added. It can be any string such as `0`,
'+', "A", "Y"
targets: list
A list of qubit positions where the given state has to be added.
state_type: str
One of either "input" or "output". This specifies whether the state
to be added is an input or output.
default: "input"
"""
if state_type == "input":
for i in targets:
self.input_states[i] = state
if state_type == "output":
for i in targets:
self.output_states[i] = state
def add_gate(self, gate, targets=None, controls=None, arg_value=None,
arg_label=None, index=None):
"""
Adds a gate with specified parameters to the circuit.
Parameters
----------
gate: string or `Gate`
Gate name. If gate is an instance of `Gate`, parameters are
unpacked and added.
targets: list
Gate targets.
controls: list
Gate controls.
arg_value: float
Argument value(phi).
arg_label: string
Label for gate representation.
index : list
Positions to add the gate.
"""
if isinstance(gate, Gate):
name = gate.name
targets = gate.targets
controls = gate.controls
arg_value = gate.arg_value
arg_label = gate.arg_label
else:
name = gate
if index is None:
self.gates.append(Gate(name, targets=targets, controls=controls,
arg_value=arg_value, arg_label=arg_label))
else:
for position in index:
self.gates.insert(position, Gate(name, targets=targets,
controls=controls,
arg_value=arg_value,
arg_label=arg_label))
def add_1q_gate(self, name, start=0, end=None, qubits=None,
arg_value=None, arg_label=None):
"""
Adds a single qubit gate with specified parameters on a variable
number of qubits in the circuit. By default, it applies the given gate
to all the qubits in the register.
Parameters
----------
name : string
Gate name.
start : int
Starting location of qubits.
end : int
Last qubit for the gate.
qubits : list
Specific qubits for applying gates.
arg_value : float
Argument value(phi).
arg_label : string
Label for gate representation.
"""
if name not in ["RX", "RY", "RZ", "SNOT", "SQRTNOT", "PHASEGATE"]:
raise ValueError("%s is not a single qubit gate" % name)
if qubits is not None:
for i in range(len(qubits)):
self.gates.append(Gate(name, targets=qubits[i], controls=None,
arg_value=arg_value,
arg_label=arg_label))
else:
if end is None:
end = self.N - 1
for i in range(start, end):
self.gates.append(Gate(name, targets=i, controls=None,
arg_value=arg_value,
arg_label=arg_label))
def add_circuit(self, qc, start=0):
"""
Adds a block of a qubit circuit to the main circuit.
Globalphase gates are not added.
Parameters
----------
qc : QubitCircuit
The circuit block to be added to the main circuit.
start : int
The qubit on which the first gate is applied.
"""
if self.N - start < qc.N:
raise NotImplementedError("Targets exceed number of qubits.")
for gate in qc.gates:
if gate.name in ["RX", "RY", "RZ", "SNOT", "SQRTNOT", "PHASEGATE"]:
self.add_gate(gate.name, gate.targets[0] + start, None,
gate.arg_value, gate.arg_label)
elif gate.name in ["CPHASE", "CNOT", "CSIGN", "CRX", "CRY", "CRZ"]:
self.add_gate(gate.name, gate.targets[0] + start,
gate.controls[0] + start, gate.arg_value,
gate.arg_label)
elif gate.name in ["BERKELEY", "SWAPalpha", "SWAP", "ISWAP",
"SQRTSWAP", "SQRTISWAP"]:
self.add_gate(gate.name, None,
[gate.controls[0] + start,
gate.controls[1] + start], None, None)
elif gate.name in ["TOFFOLI"]:
self.add_gate(gate.name, gate.targets[0] + start,
[gate.controls[0] + start,
gate.controls[1] + start], None, None)
elif gate.name in ["FREDKIN"]:
self.add_gate(gate.name,
[gate.targets[0] + start,
gate.targets[1] + start],
gate.controls + start, None, None)
elif gate.name in self.user_gates:
self.add_gate(
gate.name, targets=gate.targets,
arg_value=gate.arg_value)
def remove_gate(self, index=None, end=None, name=None, remove="first"):
"""
Remove a gate from a specific index or between two indexes or the
first, last or all instances of a particular gate.
Parameters
----------
index : int
Location of gate to be removed.
name : string
Gate name to be removed.
remove : string
If first or all gate are to be removed.
"""
if index is not None and index <= self.N:
if end is not None and end <= self.N:
for i in range(end - index):
self.gates.pop(index + i)
elif end is not None and end > self.N:
raise ValueError("End target exceeds number of gates.")
else:
self.gates.pop(index)
elif name is not None and remove == "first":
for gate in self.gates:
if name == gate.name:
self.gates.remove(gate)
break
elif name is not None and remove == "last":
for i in range(self.N + 1):
if name == self.gates[self.N - i].name:
self.gates.remove(self.gates[self.N - i])
break
elif name is not None and remove == "all":
for j in range(self.N + 1):
if name == self.gates[self.N - j].name:
self.gates.remove(self.gates[self.N - j])
else:
self.gates.pop()
def reverse_circuit(self):
"""
Reverse an entire circuit of unitary gates.
Returns
----------
qc : QubitCircuit
Return QubitCircuit of resolved gates for the qubit circuit in the
reverse order.
"""
temp = QubitCircuit(self.N, self.reverse_states)
for gate in reversed(self.gates):
temp.add_gate(gate)
return temp
def resolve_gates(self, basis=["CNOT", "RX", "RY", "RZ"]):
"""
Unitary matrix calculator for N qubits returning the individual
steps as unitary matrices operating from left to right in the specified
basis.
Parameters
----------
basis : list.
Basis of the resolved circuit.
Returns
-------
qc : QubitCircuit
Return QubitCircuit of resolved gates for the qubit circuit in the
desired basis.
"""
qc_temp = QubitCircuit(self.N, self.reverse_states)
temp_resolved = []
basis_1q_valid = ["RX", "RY", "RZ"]
basis_2q_valid = ["CNOT", "CSIGN", "ISWAP", "SQRTSWAP", "SQRTISWAP"]
if isinstance(basis, list):
basis_1q = []
basis_2q = []
for gate in basis:
if gate in basis_2q_valid:
basis_2q.append(gate)
elif gate in basis_1q_valid:
basis_1q.append(gate)
else:
raise NotImplementedError(
"%s is not a valid basis gate" % gate)
if len(basis_1q) == 1:
raise ValueError("Not sufficient single-qubit gates in basis")
elif len(basis_1q) == 0:
basis_1q = ["RX", "RY", "RZ"]
else: # only one 2q gate is given as basis
basis_1q = ["RX", "RY", "RZ"]
if basis in basis_2q_valid:
basis_2q = [basis]
else:
raise ValueError("%s is not a valid two-qubit basis gate"
% basis)
for gate in self.gates:
if gate.name == "RX":
temp_resolved.append(gate)
elif gate.name == "RY":
temp_resolved.append(gate)
elif gate.name == "RZ":
temp_resolved.append(gate)
elif gate.name == "SQRTNOT":
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 4,
arg_label=r"\pi/4"))
temp_resolved.append(Gate("RX", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
elif gate.name == "SNOT":
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RX", gate.targets, None,
arg_value=np.pi, arg_label=r"\pi"))
elif gate.name == "PHASEGATE":
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=gate.arg_value / 2,
arg_label=gate.arg_label))
temp_resolved.append(Gate("RZ", gate.targets, None,
gate.arg_value, gate.arg_label))
elif gate.name in basis_2q: # ignore all gate in 2q basis
temp_resolved.append(gate)
elif gate.name == "CPHASE":
raise NotImplementedError("Cannot be resolved in this basis")
elif gate.name == "CNOT":
temp_resolved.append(gate)
elif gate.name == "CSIGN":
temp_resolved.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RX", gate.targets, None,
arg_value=np.pi, arg_label=r"\pi"))
temp_resolved.append(Gate("CNOT", gate.targets, gate.controls))
temp_resolved.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RX", gate.targets, None,
arg_value=np.pi, arg_label=r"\pi"))
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi, arg_label=r"\pi"))
elif gate.name == "BERKELEY":
raise NotImplementedError("Cannot be resolved in this basis")
elif gate.name == "SWAPalpha":
raise NotImplementedError("Cannot be resolved in this basis")
elif gate.name == "SWAP":
if "ISWAP" in basis_2q: # dealed with separately
temp_resolved.append(gate)
else:
temp_resolved.append(
Gate("CNOT", gate.targets[0], gate.targets[1]))
temp_resolved.append(
Gate("CNOT", gate.targets[1], gate.targets[0]))
temp_resolved.append(
Gate("CNOT", gate.targets[0], gate.targets[1]))
elif gate.name == "ISWAP":
temp_resolved.append(Gate("CNOT", gate.targets[0],
gate.targets[1]))
temp_resolved.append(Gate("CNOT", gate.targets[1],
gate.targets[0]))
temp_resolved.append(Gate("CNOT", gate.targets[0],
gate.targets[1]))
temp_resolved.append(Gate("RZ", gate.targets[0], None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RZ", gate.targets[1], None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RY", gate.targets[0], None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RX", gate.targets, None,
arg_value=np.pi, arg_label=r"\pi"))
temp_resolved.append(Gate("CNOT", gate.targets[0],
gate.targets[1]))
temp_resolved.append(Gate("RY", gate.targets[0], None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RX", gate.targets, None,
arg_value=np.pi, arg_label=r"\pi"))
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi, arg_label=r"\pi"))
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
elif gate.name == "SQRTSWAP":
raise NotImplementedError("Cannot be resolved in this basis")
elif gate.name == "SQRTISWAP":
raise NotImplementedError("Cannot be resolved in this basis")
elif gate.name == "FREDKIN":
temp_resolved.append(Gate("CNOT", gate.targets[0],
gate.targets[1]))
temp_resolved.append(Gate("CNOT", gate.targets[0],
gate.controls))
temp_resolved.append(Gate("RZ", gate.controls, None,
arg_value=np.pi / 8,
arg_label=r"\pi/8"))
temp_resolved.append(Gate("RZ", [gate.targets[0]], None,
arg_value=-np.pi / 8,
arg_label=r"-\pi/8"))
temp_resolved.append(Gate("CNOT", gate.targets[0],
gate.controls))
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RY", gate.targets[1], None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RY", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
temp_resolved.append(Gate("RZ", gate.targets, None,
arg_value=np.pi, arg_label=r"\pi"))
temp_resolved.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RZ", gate.targets[0], None,
arg_value=np.pi / 8,
arg_label=r"\pi/8"))
temp_resolved.append(Gate("RZ", gate.targets[1], None,
arg_value=np.pi / 8,
arg_label=r"\pi/8"))
temp_resolved.append(Gate("CNOT", gate.targets[1],
gate.controls))
temp_resolved.append(Gate("RZ", gate.targets[1], None,
arg_value=-np.pi / 8,
arg_label=r"-\pi/8"))
temp_resolved.append(Gate("CNOT", gate.targets[1],
gate.targets[0]))
temp_resolved.append(Gate("RZ", gate.targets[1], None,
arg_value=np.pi / 8,
arg_label=r"\pi/8"))
temp_resolved.append(Gate("CNOT", gate.targets[1],
gate.controls))
temp_resolved.append(Gate("RZ", gate.targets[1], None,
arg_value=-np.pi / 8,
arg_label=r"-\pi/8"))
temp_resolved.append(Gate("CNOT", gate.targets[1],
gate.targets[0]))
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RY", gate.targets[1], None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RY", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
temp_resolved.append(Gate("RZ", gate.targets, None,
arg_value=np.pi, arg_label=r"\pi"))
temp_resolved.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("CNOT", gate.targets[0],
gate.targets[1]))
elif gate.name == "TOFFOLI":
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=1 * np.pi / 8,
arg_label=r"\pi/8"))
temp_resolved.append(Gate("RZ", gate.controls[1], None,
arg_value=np.pi/2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RZ", gate.controls[0], None,
arg_value=np.pi / 4,
arg_label=r"\pi/4"))
temp_resolved.append(Gate("CNOT", gate.controls[1],
gate.controls[0]))
temp_resolved.append(Gate("RZ", gate.controls[1], None,
arg_value=-np.pi / 4,
arg_label=r"-\pi/4"))
temp_resolved.append(Gate("CNOT", gate.controls[1],
gate.controls[0]))
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RX", gate.targets, None,
arg_value=np.pi, arg_label=r"\pi"))
temp_resolved.append(Gate("RZ", gate.controls[1], None,
arg_value=-np.pi / 4,
arg_label=r"-\pi/4"))
temp_resolved.append(Gate("RZ", gate.targets, None,
arg_value=np.pi / 4,
arg_label=r"\pi/4"))
temp_resolved.append(Gate("CNOT", gate.targets,
gate.controls[0]))
temp_resolved.append(Gate("RZ", gate.targets, None,
arg_value=-np.pi / 4,
arg_label=r"-\pi/4"))
temp_resolved.append(Gate("CNOT", gate.targets,
gate.controls[1]))
temp_resolved.append(Gate("RZ", gate.targets, None,
arg_value=np.pi / 4,
arg_label=r"\pi/4"))
temp_resolved.append(Gate("CNOT", gate.targets,
gate.controls[0]))
temp_resolved.append(Gate("RZ", gate.targets, None,
arg_value=-np.pi / 4,
arg_label=r"-\pi/4"))
temp_resolved.append(Gate("CNOT", gate.targets,
gate.controls[1]))
temp_resolved.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
temp_resolved.append(Gate("RX", gate.targets, None,
arg_value=np.pi, arg_label=r"\pi"))
elif gate.name == "GLOBALPHASE":
temp_resolved.append(Gate(gate.name, gate.targets,
gate.controls,
gate.arg_value, gate.arg_label))
else:
raise NotImplementedError(
"Gate {} "
"cannot be resolved.".format(gate.name))
if "CSIGN" in basis_2q:
for gate in temp_resolved:
if gate.name == "CNOT":
qc_temp.gates.append(Gate("RY", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("CSIGN", gate.targets,
gate.controls))
qc_temp.gates.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
else:
qc_temp.gates.append(gate)
elif "ISWAP" in basis_2q:
for gate in temp_resolved:
if gate.name == "CNOT":
qc_temp.gates.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 4,
arg_label=r"\pi/4"))
qc_temp.gates.append(Gate("ISWAP", [gate.controls[0],
gate.targets[0]],
None))
qc_temp.gates.append(Gate("RZ", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("RY", gate.controls, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("RZ", gate.controls, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
qc_temp.gates.append(Gate("ISWAP", [gate.controls[0],
gate.targets[0]],
None))
qc_temp.gates.append(Gate("RY", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("RZ", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
elif gate.name == "SWAP":
qc_temp.gates.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 4,
arg_label=r"\pi/4"))
qc_temp.gates.append(Gate("ISWAP", gate.targets, None))
qc_temp.gates.append(Gate("RX", gate.targets[0], None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("ISWAP", gate.targets, None))
qc_temp.gates.append(Gate("RX", gate.targets[1], None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("ISWAP", [gate.targets[1],
gate.targets[0]],
None))
qc_temp.gates.append(Gate("RX", gate.targets[0], None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
else:
qc_temp.gates.append(gate)
elif "SQRTSWAP" in basis_2q:
for gate in temp_resolved:
if gate.name == "CNOT":
qc_temp.gates.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
qc_temp.gates.append(Gate("SQRTSWAP", [gate.controls[0],
gate.targets[0]],
None))
qc_temp.gates.append(Gate("RZ", gate.controls, None,
arg_value=np.pi,
arg_label=r"\pi"))
qc_temp.gates.append(Gate("SQRTSWAP", [gate.controls[0],
gate.targets[0]],
None))
qc_temp.gates.append(Gate("RZ", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("RY", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("RZ", gate.controls, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
else:
qc_temp.gates.append(gate)
elif "SQRTISWAP" in basis_2q:
for gate in temp_resolved:
if gate.name == "CNOT":
qc_temp.gates.append(Gate("RY", gate.controls, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("RX", gate.controls, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
qc_temp.gates.append(Gate("RX", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("SQRTISWAP", [gate.controls[0],
gate.targets[0]],
None))
qc_temp.gates.append(Gate("RX", gate.controls, None,
arg_value=np.pi,
arg_label=r"\pi"))
qc_temp.gates.append(Gate("SQRTISWAP", [gate.controls[0],
gate.targets[0]],
None))
qc_temp.gates.append(Gate("RY", gate.controls, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
qc_temp.gates.append(Gate("GLOBALPHASE", None, None,
arg_value=np.pi / 4,
arg_label=r"\pi/4"))
qc_temp.gates.append(Gate("RZ", gate.controls, None,
arg_value=np.pi,
arg_label=r"\pi"))
qc_temp.gates.append(Gate("GLOBALPHASE", None, None,
arg_value=3 * np.pi / 2,
arg_label=r"3\pi/2"))
else:
qc_temp.gates.append(gate)
else:
qc_temp.gates = temp_resolved
if len(basis_1q) == 2:
temp_resolved = qc_temp.gates
qc_temp.gates = []
for gate in temp_resolved:
if gate.name == "RX" and "RX" not in basis_1q:
qc_temp.gates.append(Gate("RY", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("RZ", gate.targets, None,
gate.arg_value, gate.arg_label))
qc_temp.gates.append(Gate("RY", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
elif gate.name == "RY" and "RY" not in basis_1q:
qc_temp.gates.append(Gate("RZ", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("RX", gate.targets, None,
gate.arg_value, gate.arg_label))
qc_temp.gates.append(Gate("RZ", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
elif gate.name == "RZ" and "RZ" not in basis_1q:
qc_temp.gates.append(Gate("RX", gate.targets, None,
arg_value=-np.pi / 2,
arg_label=r"-\pi/2"))
qc_temp.gates.append(Gate("RY", gate.targets, None,
gate.arg_value, gate.arg_label))
qc_temp.gates.append(Gate("RX", gate.targets, None,
arg_value=np.pi / 2,
arg_label=r"\pi/2"))
else:
qc_temp.gates.append(gate)
return qc_temp
def adjacent_gates(self):
"""
Method to resolve two qubit gates with non-adjacent control/s or
target/s in terms of gates with adjacent interactions.
Returns
-------
qc : QubitCircuit
Return QubitCircuit of the gates for the qubit circuit with the
resolved non-adjacent gates.
"""
temp = QubitCircuit(self.N, reverse_states=self.reverse_states)
swap_gates = ["SWAP", "ISWAP", "SQRTISWAP", "SQRTSWAP", "BERKELEY",
"SWAPalpha"]
for gate in self.gates:
if gate.name == "CNOT" or gate.name == "CSIGN":
start = min([gate.targets[0], gate.controls[0]])
end = max([gate.targets[0], gate.controls[0]])
i = start
while i < end:
if start + end - i - i == 1 and (end - start + 1) % 2 == 0:
# Apply required gate if control, target are adjacent
# to each other, provided |control-target| is even.
if end == gate.controls[0]:
temp.gates.append(Gate(gate.name, targets=[i],
controls=[i + 1]))
else:
temp.gates.append(Gate(gate.name, targets=[i + 1],
controls=[i]))
elif (start + end - i - i == 2 and
(end - start + 1) % 2 == 1):
# Apply a swap between i and its adjacent gate, then
# the required gate if and then another swap if control
# and target have one qubit between them, provided
# |control-target| is odd.
temp.gates.append(Gate("SWAP", targets=[i, i + 1]))
if end == gate.controls[0]:
temp.gates.append(Gate(gate.name, targets=[i + 1],
controls=[i + 2]))
else:
temp.gates.append(Gate(gate.name, targets=[i + 2],
controls=[i + 1]))
temp.gates.append(Gate("SWAP", targets=[i, i + 1]))
i += 1
else:
# Swap the target/s and/or control with their adjacent
# qubit to bring them closer.
temp.gates.append(Gate("SWAP", targets=[i, i + 1]))
temp.gates.append(Gate("SWAP",
targets=[start + end - i - 1,
start + end - i]))
i += 1
elif gate.name in swap_gates:
start = min([gate.targets[0], gate.targets[1]])
end = max([gate.targets[0], gate.targets[1]])
i = start
while i < end:
if start + end - i - i == 1 and (end - start + 1) % 2 == 0:
temp.gates.append(Gate(gate.name, targets=[i, i + 1]))
elif ((start + end - i - i) == 2 and
(end - start + 1) % 2 == 1):
temp.gates.append(Gate("SWAP", targets=[i, i + 1]))
temp.gates.append(
Gate(gate.name, targets=[i + 1, i + 2]))
temp.gates.append(Gate("SWAP", targets=[i, i + 1]))
i += 1
else:
temp.gates.append(Gate("SWAP", targets=[i, i + 1]))
temp.gates.append(Gate("SWAP",
targets=[start + end - i - 1,
start + end - i]))
i += 1
else:
raise NotImplementedError(
"`adjacent_gates` is not defined for "
"gate {}.".format(gate.name))
return temp
def propagators(self):
"""
Propagator matrix calculator for N qubits returning the individual
steps as unitary matrices operating from left to right.
Returns
-------
U_list : list
Return list of unitary matrices for the qubit circuit.
"""
self.U_list = []
for gate in self.gates:
if gate.name == "RX":
self.U_list.append(rx(gate.arg_value, self.N, gate.targets[0]))
elif gate.name == "RY":
self.U_list.append(ry(gate.arg_value, self.N, gate.targets[0]))
elif gate.name == "RZ":
self.U_list.append(rz(gate.arg_value, self.N, gate.targets[0]))
elif gate.name == "SQRTNOT":
self.U_list.append(sqrtnot(self.N, gate.targets[0]))
elif gate.name == "SNOT":
self.U_list.append(snot(self.N, gate.targets[0]))
elif gate.name == "PHASEGATE":
self.U_list.append(phasegate(gate.arg_value, self.N,
gate.targets[0]))
elif gate.name == "CRX":
self.U_list.append(controlled_gate(rx(gate.arg_value),
N=self.N,
control=gate.controls[0],
target=gate.targets[0]))
elif gate.name == "CRY":
self.U_list.append(controlled_gate(ry(gate.arg_value),
N=self.N,
control=gate.controls[0],
target=gate.targets[0]))
elif gate.name == "CRZ":
self.U_list.append(controlled_gate(rz(gate.arg_value),
N=self.N,
control=gate.controls[0],
target=gate.targets[0]))
elif gate.name == "CPHASE":
self.U_list.append(cphase(gate.arg_value, self.N,
gate.controls[0], gate.targets[0]))
elif gate.name == "CNOT":
self.U_list.append(cnot(self.N,
gate.controls[0], gate.targets[0]))
elif gate.name == "CSIGN":
self.U_list.append(csign(self.N,
gate.controls[0], gate.targets[0]))
elif gate.name == "BERKELEY":
self.U_list.append(berkeley(self.N, gate.targets))
elif gate.name == "SWAPalpha":
self.U_list.append(swapalpha(gate.arg_value, self.N,
gate.targets))
elif gate.name == "SWAP":
self.U_list.append(swap(self.N, gate.targets))
elif gate.name == "ISWAP":
self.U_list.append(iswap(self.N, gate.targets))
elif gate.name == "SQRTSWAP":
self.U_list.append(sqrtswap(self.N, gate.targets))
elif gate.name == "SQRTISWAP":
self.U_list.append(sqrtiswap(self.N, gate.targets))
elif gate.name == "FREDKIN":
self.U_list.append(fredkin(self.N, gate.controls[0],
gate.targets))
elif gate.name == "TOFFOLI":
self.U_list.append(toffoli(self.N, gate.controls,
gate.targets[0]))
elif gate.name == "GLOBALPHASE":
self.U_list.append(globalphase(gate.arg_value, self.N))
elif gate.name in self.user_gates:
if gate.controls is not None:
raise ValueError(
"A user defined gate {} takes only "
"`targets` variable.".format(gate.name))
func = self.user_gates[gate.name]
para_num = len(inspect.getfullargspec(func)[0])
if para_num == 0:
oper = func()
elif para_num == 1:
oper = func(gate.arg_value)
else:
raise ValueError(
"gate function takes at most one parameters.")
self.U_list.append(expand_oper(oper, self.N, gate.targets))
else:
raise NotImplementedError(
"{} gate is an unknown gate.".format(gate.name))
return self.U_list
def latex_code(self):
rows = []
gates = self.gates
for gate in gates:
col = []
for n in range(self.N):
if gate.targets and n in gate.targets:
if len(gate.targets) > 1:
if gate.name == "SWAP":
col.append(r" \qswap \qwx ")
elif ((self.reverse_states and
n == max(gate.targets)) or
(not self.reverse_states and
n == min(gate.targets))):
col.append(r" \multigate{%d}{%s} " %
(len(gate.targets) - 1,
_gate_label(gate.name,
gate.arg_label)))
else:
col.append(r" \ghost{%s} " %
(_gate_label(gate.name,
gate.arg_label)))
elif gate.name == "CNOT":
col.append(r" \targ ")
elif gate.name == "TOFFOLI":
col.append(r" \targ ")
else:
col.append(r" \gate{%s} " %
_gate_label(gate.name, gate.arg_label))
elif gate.controls and n in gate.controls:
m = (gate.targets[0] - n) * (-1 if self.reverse_states
else 1)
col.append(r" \ctrl{%d} " % m)
elif (not gate.controls and not gate.targets):
# global gate
if ((self.reverse_states and n == self.N - 1) or
(not self.reverse_states and n == 0)):
col.append(r" \multigate{%d}{%s} " %
(self.N - 1,
_gate_label(gate.name, gate.arg_label)))
else:
col.append(r" \ghost{%s} " %
(_gate_label(gate.name, gate.arg_label)))
else:
col.append(r" \qw ")
col.append(r" \qw ")
rows.append(col)
input_states = ["\lstick{\ket{" + x + "}}" if x is not None
else "" for x in self.input_states]
code = ""
n_iter = (reversed(range(self.N)) if self.reverse_states
else range(self.N))
for n in n_iter:
code += r" & %s" % input_states[n]
for m in range(len(gates)):
code += r" & %s" % rows[m][n]
code += r" & \qw \\ " + "\n"
return code
def _repr_png_(self):
return _latex_compile(self.latex_code(), format="png")
def _repr_svg_(self):
return _latex_compile(self.latex_code(), format="svg")
@property
def png(self):
from IPython.display import Image
return Image(self._repr_png_(), embed=True)
@property
def svg(self):
from IPython.display import SVG
return SVG(self._repr_svg_())
def qasm(self):
code = "# qasm code generated by QuTiP\n\n"
for n in range(self.N):
code += "\tqubit\tq%d\n" % n
code += "\n"
for gate in self.gates:
code += "\t%s\t" % gate.name
qtargets = ["q%d" %
t for t in gate.targets] if gate.targets else []
qcontrols = (["q%d" % c for c in gate.controls] if gate.controls
else [])
code += ",".join(qtargets + qcontrols)
code += "\n"
return code
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OSMnx documentation build configuration file.
Created by sphinx-quickstart on Sun Feb 4 13:53:34 2018.
This file is execfile()d with the current directory set to its
containing dir.
Note that not all possible configuration values are present in this
autogenerated file.
All configuration values have a default; values that are commented out
serve to show the default.
If extensions (or modules to document with autodoc) are in another directory,
add these directories to sys.path here. If the directory is relative to the
documentation root, use os.path.abspath to make it absolute, like shown here.
"""
import os
import sys
# go up two levels from /docs/source to the package root
sys.path.insert(0, os.path.abspath("../.."))
# mock import these packages because readthedocs doesn't have them installed
autodoc_mock_imports = [
"dateutil",
"geopandas",
"matplotlib",
"matplotlib.cm",
"matplotlib.colors",
"matplotlib.pyplot",
"networkx",
"numpy",
"pandas",
"pyproj",
"requests",
"scipy",
"scipy.spatial",
"shapely",
"shapely.geometry",
"shapely.ops",
"sklearn",
"sklearn.neighbors",
]
# -- General configuration ------------------------------------------------
# General information about the project.
project = "OSMnx"
copyright = "2020, Geoff Boeing"
author = "Geoff Boeing"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
version = release = "0.15.1"
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "OSMnxdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "OSMnx.tex", "OSMnx Documentation", "Geoff Boeing", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "osmnx", "OSMnx Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"OSMnx",
"OSMnx Documentation",
author,
"OSMnx",
"Python for street networks.",
"Miscellaneous",
),
]
|
import os
import sys
from unittest import mock
import pytest
from praw.config import Config
from praw.exceptions import ClientException
class TestConfig:
@staticmethod
def _assert_config_read(environment, mock_config):
mock_instance = mock_config.return_value
Config.CONFIG = None # Force config file reload
prev_environment = {environment: None}
for env_name in ["APPDATA", "HOME", "XDG_CONFIG_HOME"]:
if env_name in os.environ:
prev_environment[env_name] = os.environ[env_name]
del os.environ[env_name]
os.environ[environment] = "/MOCK"
module_dir = os.path.dirname(sys.modules["praw"].__file__)
environ_path = os.path.join(
"/MOCK", ".config" if environment == "HOME" else "", "praw.ini"
)
locations = [
os.path.join(module_dir, "praw.ini"),
environ_path,
"praw.ini",
]
try:
Config._load_config()
mock_instance.read.assert_called_with(locations)
finally:
Config.CONFIG = None # Force config file reload
for env_name in prev_environment:
if prev_environment[env_name] is None:
del os.environ[env_name]
else:
os.environ[env_name] = prev_environment[env_name]
def test_check_for_updates__false(self):
for value in [False, "False", "other"]:
config = Config("DEFAULT", check_for_updates=value)
assert config.check_for_updates is False
def test_custom__extra_values_set(self):
config = Config("DEFAULT", user1="foo", user2="bar")
assert config.custom == {"user1": "foo", "user2": "bar"}
def test_custom__no_extra_values_set(self):
config = Config("DEFAULT")
assert config.custom == {}
def test_check_for_updates__true(self):
for value in [True, "1", "true", "YES", "on"]:
config = Config("DEFAULT", check_for_updates=value)
assert config.check_for_updates is True
@mock.patch("configparser.ConfigParser")
def test_load_ini_from_appdata(self, mock_config):
self._assert_config_read("APPDATA", mock_config)
@mock.patch("configparser.ConfigParser")
def test_load_ini_from_home(self, mock_config):
self._assert_config_read("HOME", mock_config)
@mock.patch("configparser.ConfigParser")
def test_load_ini_from_xdg_config_home(self, mock_config):
self._assert_config_read("XDG_CONFIG_HOME", mock_config)
@mock.patch("configparser.ConfigParser")
def test_load_ini_with_no_config_directory(self, mock_config):
mock_instance = mock_config.return_value
Config.CONFIG = None # Force config file reload
prev_environment = {}
for key in ["APPDATA", "HOME", "XDG_CONFIG_HOME"]:
if key in os.environ:
prev_environment[key] = os.environ[key]
del os.environ[key]
module_dir = os.path.dirname(sys.modules["praw"].__file__)
locations = [os.path.join(module_dir, "praw.ini"), "praw.ini"]
try:
Config._load_config()
mock_instance.read.assert_called_with(locations)
finally:
Config.CONFIG = None # Force config file reload
for key, value in prev_environment.items():
os.environ[key] = value
def test_short_url(self):
config = Config("DEFAULT")
assert config.short_url == "https://redd.it"
def test_short_url_not_defined(self):
config = Config("DEFAULT", short_url=None)
with pytest.raises(ClientException) as excinfo:
config.short_url
assert str(excinfo.value) == "No short domain specified."
def test_unset_value_has_useful_string_representation(self):
config = Config("DEFAULT", password=Config.CONFIG_NOT_SET)
assert str(config.password) == "NotSet"
class TestConfigInterpolation:
def test_no_interpolation(self):
Config.CONFIG = None # Force config file reload
with mock.patch.dict(
"os.environ",
{
"APPDATA": os.path.dirname(__file__),
"XDG_CONFIG_HOME": os.path.dirname(__file__),
},
):
config = Config("INTERPOLATION")
assert config.custom["basic_interpolation"] == "%(reddit_url)s"
assert config.custom["extended_interpolation"] == "${reddit_url}"
def test_basic_interpolation(self):
Config.CONFIG = None # Force config file reload
with mock.patch.dict(
"os.environ",
{
"APPDATA": os.path.dirname(__file__),
"XDG_CONFIG_HOME": os.path.dirname(__file__),
},
):
config = Config("INTERPOLATION", config_interpolation="basic")
assert config.custom["basic_interpolation"] == config.reddit_url
assert config.custom["extended_interpolation"] == "${reddit_url}"
def test_extended_interpolation(self):
Config.CONFIG = None # Force config file reload
with mock.patch.dict(
"os.environ",
{
"APPDATA": os.path.dirname(__file__),
"XDG_CONFIG_HOME": os.path.dirname(__file__),
},
):
config = Config("INTERPOLATION", config_interpolation="extended")
assert config.custom["basic_interpolation"] == "%(reddit_url)s"
assert config.custom["extended_interpolation"] == config.reddit_url
|
#! /usr/bin/env python
#Script to
#1-check for cmsScimarkLaunch (infinite loop) scripts
#2-kill them
#3-report their results using cmsScimarkParser.py
from __future__ import print_function
import subprocess,os,sys
def main():
#Use ps -ef to look for cmsScimarkLaunch processes
ps_stdouterr=subprocess.Popen("ps -efww|grep cmsScimarkLaunch|grep -v grep|grep -v 'sh -c'",shell=True,stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
if ps_stdouterr:
ps_lines=ps_stdouterr.readlines()
#print ps_lines
if ps_lines:
for line in ps_lines:
tokens=line.split()
#Look up the PID
PID=tokens[1]
#Look up the cpu core
core=tokens[9]
print("Found process:\n%s"%line[:-1]) #to eliminate the extra \n
#Kill the PID
print("Killing process with PID %s"%PID)
kill_stdouterr=subprocess.Popen("kill %s"%PID,shell=True,stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
print(kill_stdouterr)
#Harvest the cmsScimark scores
#Look for the cmsScimark log:
if os.path.exists("cmsScimark_%s.log"%core):
#Create the results dir
mkdir_stdouterr=subprocess.Popen("mkdir cmsScimarkResults_cpu%s"%core,shell=True,stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
print(mkdir_stdouterr)
#Execute the harvesting scrip cmsScimarkParser.py (it is in the release)
harvest_stdouterr=subprocess.Popen("cmsScimarkParser.py -i cmsScimark_%s.log -o cmsScimarkResults_cpu%s"%(core,core),shell=True,stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
print(harvest_stdouterr)
else:
print("No cmsScimark_%s.log file was found for cpu%s, log might be in another directory!"%(core,core))
else:
print("No cmsScimarkLaunch processes found in the ps -ef output")
return 0
if __name__ == "__main__":
sys.exit(main())
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#from pylab import plot, show, xlim,figure,hold, ylim,legend, boxplot, setup, axes
import seaborn as sns
# Is this a personal or work computer
# Are you graphing for hood or no hood
Computer = 'personal' #or 'personal' or 'work'
Hood_or_no = 'no_hood' # 'no_hood' or 'hood'
#what household do you want to remove make sure it is in ascending order
# if there is nothing, then put a placeholder of 1045 or higher
Household_removal = [1045]
#Household_removal = Household_removal.sort(reverse=False)
Household_removal_NO_Hood_fuel_day_adult = [1045]
Household_removal_Hood_fuel_day_adult = [2020]
Household_removal_NO_Hood_PM = [1045]
Household_removal_Hood_PM = [2020]
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
if Hood_or_no == 'hood':
C_Place_holder = 2001
else:
C_Place_holder = 1001
if Computer == 'personal' and Hood_or_no == 'no_hood':
# 1N
datafile_path_day_1N ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_Summary_Day_1_exact.csv"
Day_1N = pd.read_csv(datafile_path_day_1N, skiprows=2)
datafile_path_event_1N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_Summary_Event_1_exact.csv"
Event_1N = pd.read_csv(datafile_path_event_1N, skiprows=2)
# there is no second exact in phase 1N
#1N Survey
datafile_path_survey_1N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv"
Filter_1n_survey = pd.read_csv(datafile_path_survey_1N, skiprows=0)
#print(Filter_1n_survey.iloc[0:40, :])
Survey_1N = Filter_1n_survey.iloc[0:40,:]
#24 hour Kitchen pm breakdown
data_file_path_24_PM_1N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_24_hour_Kitchen_PM.csv"
Kit_PM_1N_24hr = pd.read_csv(data_file_path_24_PM_1N, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_Fuel_1N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_24_hour_Fuel_removal.csv"
Fuel_remove_1N_24hr = pd.read_csv(data_file_path_24_Fuel_1N, skiprows=0)
#2N
datafile_path_day_2N ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Summary_Day_1_exact.csv"
Day_2N = pd.read_csv(datafile_path_day_2N, skiprows=2)
datafile_path_event_2N_1 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Summary_Event_1_exact.csv"
Event_2N_1 = pd.read_csv(datafile_path_event_2N_1, skiprows=2)
#2N second Exact
datafile_path_event_2N_2 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Summary_Event_2_exact.csv"
Event_2N_2 = pd.read_csv(datafile_path_event_2N_2, skiprows=2)
#2N Survey
datafile_path_survey_2N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_Survey_summary_.csv"
Survey_2N = pd.read_csv(datafile_path_survey_2N, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_2N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_24_hour_Kitchen_PM.csv"
Kit_PM_2N_24hr = pd.read_csv(data_file_path_24_PM_2N, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_Fuel_2N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2N/2N_24_hour_Fuel_removal.csv"
Fuel_remove_2N_24hr = pd.read_csv(data_file_path_24_Fuel_2N, skiprows=0)
#3N
datafile_path_day_3N ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Summary_Day_1_exact.csv"
Day_3N = pd.read_csv(datafile_path_day_3N, skiprows=2)
datafile_path_event_3N_1 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Summary_Event_1_exact.csv"
Event_3N_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)
#3N second Exact
datafile_path_event_3N_2 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Summary_Event_2_exact.csv"
Event_3N_2 = pd.read_csv(datafile_path_event_3N_2, skiprows=2)
#3N Survey
datafile_path_survey_3N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_Survey_summary_.csv"
Survey_3N = pd.read_csv(datafile_path_survey_3N, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_3N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_24_hour_Kitchen_PM.csv"
Kit_PM_3N_24hr = pd.read_csv(data_file_path_24_PM_3N, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_Fuel_3N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3N/3N_24_hour_Fuel_removal.csv"
Fuel_remove_3N_24hr = pd.read_csv(data_file_path_24_Fuel_3N, skiprows=0)
#4N
datafile_path_day_4N ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Summary_Day_1_exact.csv"
Day_4N = pd.read_csv(datafile_path_day_4N, skiprows=2)
datafile_path_event_4N_1 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Summary_Event_1_exact.csv"
Event_4N_1 = pd.read_csv(datafile_path_event_4N_1, skiprows=2)
#4N second Exact
datafile_path_event_4N_2 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Summary_Event_2_exact.csv"
Event_4N_2 = pd.read_csv(datafile_path_event_4N_2, skiprows=2)
#4N Survey
datafile_path_survey_4N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_Survey_summary_.csv"
Survey_4N = pd.read_csv(datafile_path_survey_4N, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_4N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_24_hour_Kitchen_PM.csv"
Kit_PM_4N_24hr = pd.read_csv(data_file_path_24_PM_4N, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_Fuel_4N = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/4N/4N_24_hour_Fuel_removal.csv"
Fuel_remove_4N_24hr = pd.read_csv(data_file_path_24_Fuel_4N, skiprows=0)
elif Computer == 'personal' and Hood_or_no == 'hood':
#1H
datafile_path_day_1H ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_Summary_Day_1_exact.csv"
Day_1H = pd.read_csv(datafile_path_day_1H, skiprows=2)
datafile_path_event_1H ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_Summary_Event_1_exact.csv"
Event_1H = pd.read_csv(datafile_path_event_1H, skiprows=2)
#there is no second exact in phase 1H
#1H Survey (row 40 or so afterward is Hood portion column 1 is houshold number)
datafile_path_survey_1H = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv"
Survey_1H = pd.read_csv(datafile_path_survey_1H, skiprows=40)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_1H = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_24_hour_Kitchen_PM.csv"
Kit_PM_1H_24hr = pd.read_csv(data_file_path_24_PM_1H, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_fuel_1H = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/1H/1H_24_hour_Fuel_removal.csv"
Fuel_remove_1H_24hr = pd.read_csv(data_file_path_24_fuel_1H, skiprows=0)
#2H
datafile_path_day_2H ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Summary_Day_1_exact.csv"
Day_2H = pd.read_csv(datafile_path_day_2H, skiprows=2)
datafile_path_event_2H_1 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Summary_Event_1_exact.csv"
Event_2H_1 = pd.read_csv(datafile_path_event_2H_1, skiprows=2)
#2H second Exact
datafile_path_event_2H_2 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Summary_Event_2_exact.csv"
Event_2H_2 = pd.read_csv(datafile_path_event_2H_2, skiprows=2)
#2H survey
datafile_path_survey_2H = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_Survey_summary_.csv"
Survey_2H = pd.read_csv(datafile_path_survey_2H, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_2H = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_24_hour_Kitchen_PM.csv"
Kit_PM_2H_24hr = pd.read_csv(data_file_path_24_PM_2H, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_fuel_2H = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/2H/2H_24_hour_Fuel_removal.csv"
Fuel_remove_2H_24hr = pd.read_csv(data_file_path_24_fuel_2H, skiprows=0)
#3H
datafile_path_day_3H ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Summary_Day_1_exact.csv"
Day_3H = pd.read_csv(datafile_path_day_3H, skiprows=2)
datafile_path_event_3N_1 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Summary_Event_1_exact.csv"
Event_3H_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)
#3H second Exact
datafile_path_event_3H_2 ="C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Summary_Event_2_exact.csv"
Event_3H_2 = pd.read_csv(datafile_path_event_3H_2, skiprows=2)
#3H survey
datafile_path_survey_3H = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_Survey_summary_.csv"
Survey_3H = pd.read_csv(datafile_path_survey_3H, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_3H = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_24_hour_Kitchen_PM.csv"
Kit_PM_3H_24hr = pd.read_csv(data_file_path_24_PM_3H, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_fuel_3H = "C:/Users/gvros/Desktop/Oregon State Masters/Work/OSU, CSC, CQC Project files/3H/3H_24_hour_Fuel_removal.csv"
Fuel_remove_3H_24hr = pd.read_csv(data_file_path_24_fuel_3H, skiprows=0)
#work uses box information and not local data
elif Computer == 'work' and Hood_or_no == 'no_hood':
# 1N for box file system
datafile_path_day_1N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_Summary_Day_1_exact.csv"
Day_1N = pd.read_csv(datafile_path_day_1N, skiprows=2)
datafile_path_event_1N ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_Summary_Event_1_exact.csv"
Event_1N = pd.read_csv(datafile_path_event_1N, skiprows=2)
# there is no second exact in phase 1N
#1N Survey
datafile_path_survey_1N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv"
Filter_1n_survey = pd.read_csv(datafile_path_survey_1N, skiprows=0)
#print(Filter_1n_survey.iloc[0:40, :])
Survey_1N = Filter_1n_survey.iloc[0:40,:]
#24 hour Kitchen pm breakdown
data_file_path_24_PM_1N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_24_hour_Kitchen_PM.csv"
Kit_PM_1N_24hr = pd.read_csv(data_file_path_24_PM_1N, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_Fuel_1N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_24_hour_Fuel_removal.csv"
Fuel_remove_1N_24hr = pd.read_csv(data_file_path_24_Fuel_1N, skiprows=0)
#2N
datafile_path_day_2N ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Summary_Day_1_exact.csv"
Day_2N = pd.read_csv(datafile_path_day_2N, skiprows=2)
datafile_path_event_2N_1 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Summary_Event_1_exact.csv"
Event_2N_1 = pd.read_csv(datafile_path_event_2N_1, skiprows=2)
#2N second Exact
datafile_path_event_2N_2 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Summary_Event_2_exact.csv"
Event_2N_2 = pd.read_csv(datafile_path_event_2N_2, skiprows=2)
#2N Survey
datafile_path_survey_2N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_Survey_summary_.csv"
Survey_2N = pd.read_csv(datafile_path_survey_2N, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_2N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_24_hour_Kitchen_PM.csv"
Kit_PM_2N_24hr = pd.read_csv(data_file_path_24_PM_2N, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_Fuel_2N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2N/2N_24_hour_Fuel_removal.csv"
Fuel_remove_2N_24hr = pd.read_csv(data_file_path_24_Fuel_2N, skiprows=0)
#3N
datafile_path_day_3N ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Summary_Day_1_exact.csv"
Day_3N = pd.read_csv(datafile_path_day_3N, skiprows=2)
datafile_path_event_3N_1 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Summary_Event_1_exact.csv"
Event_3N_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)
#3N second Exact
datafile_path_event_3N_2 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Summary_Event_2_exact.csv"
Event_3N_2 = pd.read_csv(datafile_path_event_3N_2, skiprows=2)
#3N survey
datafile_path_survey_3N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_Survey_summary_.csv"
Survey_3N = pd.read_csv(datafile_path_survey_3N, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_3N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_24_hour_Kitchen_PM.csv"
Kit_PM_3N_24hr = pd.read_csv(data_file_path_24_PM_3N, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_Fuel_3N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3N/3N_24_hour_Fuel_removal.csv"
Fuel_remove_3N_24hr = pd.read_csv(data_file_path_24_Fuel_3N, skiprows=0)
#4N
datafile_path_day_4N ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Summary_Day_1_exact.csv"
Day_4N = pd.read_csv(datafile_path_day_4N, skiprows=2)
datafile_path_event_4N_1 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Summary_Event_1_exact.csv"
Event_4N_1 = pd.read_csv(datafile_path_event_4N_1, skiprows=2)
#4N second Exact
datafile_path_event_4N_2 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Summary_Event_2_exact.csv"
Event_4N_2 = pd.read_csv(datafile_path_event_4N_2, skiprows=2)
#4N Survey
datafile_path_survey_4N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_Survey_summary_.csv"
Survey_4N = pd.read_csv(datafile_path_survey_4N, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_4N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_24_hour_Kitchen_PM.csv"
Kit_PM_4N_24hr = pd.read_csv(data_file_path_24_PM_4N, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_Fuel_4N = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/4N/4N_24_hour_Fuel_removal.csv"
Fuel_remove_4N_24hr = pd.read_csv(data_file_path_24_Fuel_4N, skiprows=0)
else:
#1H
datafile_path_day_1H ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_Summary_Day_1_exact.csv"
Day_1H = pd.read_csv(datafile_path_day_1H, skiprows=2)
datafile_path_event_1H ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_Summary_Event_1_exact.csv"
Event_1H = pd.read_csv(datafile_path_event_1H, skiprows=2)
#there is no second exact in phase 1H
#1H Survey (row 40 or so afterward is Hood portion column 1 is houshold number)
datafile_path_survey_1H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1N/1N_1H_Survey_summary_.csv"
Survey_1H = pd.read_csv(datafile_path_survey_1H, skiprows=40)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_1H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_24_hour_Kitchen_PM.csv"
Kit_PM_1H_24hr = pd.read_csv(data_file_path_24_PM_1H, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_fuel_1H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/1H/1H_24_hour_Fuel_removal.csv"
Fuel_remove_1H_24hr = pd.read_csv(data_file_path_24_fuel_1H, skiprows=0)
#2H
datafile_path_day_2H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Summary_Day_1_exact.csv"
Day_2H = pd.read_csv(datafile_path_day_2H, skiprows=2)
datafile_path_event_2H_1 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Summary_Event_1_exact.csv"
Event_2H_1 = pd.read_csv(datafile_path_event_2H_1, skiprows=2)
#2H second Exact
datafile_path_event_2H_2 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Summary_Event_2_exact.csv"
Event_2H_2 = pd.read_csv(datafile_path_event_2H_2, skiprows=2)
#2H survey
datafile_path_survey_2H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_Survey_summary_.csv"
Survey_2H = pd.read_csv(datafile_path_survey_2H, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_2H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_24_hour_Kitchen_PM.csv"
Kit_PM_2H_24hr = pd.read_csv(data_file_path_24_PM_2H, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_fuel_2H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/2H/2H_24_hour_Fuel_removal.csv"
Fuel_remove_2H_24hr = pd.read_csv(data_file_path_24_fuel_2H, skiprows=0)
#3H
datafile_path_day_3H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Summary_Day_1_exact.csv"
Day_3H = pd.read_csv(datafile_path_day_3H, skiprows=2)
datafile_path_event_3N_1 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Summary_Event_1_exact.csv"
Event_3H_1 = pd.read_csv(datafile_path_event_3N_1, skiprows=2)
#3H second Exact
datafile_path_event_3H_2 ="C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Summary_Event_2_exact.csv"
Event_3H_2 = pd.read_csv(datafile_path_event_3H_2, skiprows=2)
#3H survey
datafile_path_survey_3H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_Survey_summary_.csv"
Survey_3H = pd.read_csv(datafile_path_survey_3H, skiprows=0)
#24 hour Kitchen pm breakdown
data_file_path_24_PM_3H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_24_hour_Kitchen_PM.csv"
Kit_PM_3H_24hr = pd.read_csv(data_file_path_24_PM_3H, skiprows=0)
#24 hour Fuel Removal breakdown
data_file_path_24_fuel_3H = "C:/Users/rossgra/Box/OSU, CSC, CQC Project files/3H/3H_24_hour_Fuel_removal.csv"
Fuel_remove_3H_24hr = pd.read_csv(data_file_path_24_fuel_3H, skiprows=0)
#time to start ploting fun things
#1st starting with the fuel per day per adult histogram and box plot
NO_hood_counter = np.arange(0,39)
hood_counter = np.arange(0,14)
#what household do you want to remove from the graphs (1046 is a dummy spacer)
print('---------------Fuel per Day per Adult No-Hood Phase---------------------')
if Hood_or_no == 'no_hood':
Fuel_per_day_per_adult_1N = []
f_d_a_1N = []
Fuel_per_day_per_adult_2N = []
f_d_a_2N = []
Fuel_per_day_per_adult_3N = []
f_d_a_3N = []
Fuel_per_day_per_adult_4N = []
f_d_a_4N =[]
count_t = 0
count_f = 0
for c in NO_hood_counter:
if c == (Household_removal[count_t] - C_Place_holder):
count_t = count_t + 1
if count_t == len(Household_removal):
count_t = 0
continue
if c == (Household_removal_NO_Hood_fuel_day_adult[count_f] - C_Place_holder):
count_f = count_f + 1
if count_f == len(Household_removal_NO_Hood_fuel_day_adult):
count_f = 0
continue
if Fuel_remove_1N_24hr.iloc[c,6]!= -1.00:
Fuel_per_day_per_adult_1N.append(Fuel_remove_1N_24hr.iloc[c,6]/Survey_1N.iloc[c,7])
f_d_a_1N.append(Day_1N.iloc[c,0])
if Fuel_remove_2N_24hr.iloc[c,6] != -1.00:
Fuel_per_day_per_adult_2N.append(Fuel_remove_2N_24hr.iloc[c,6] / Survey_2N.iloc[c, 7])
f_d_a_2N.append(Day_2N.iloc[c,0])
if Fuel_remove_3N_24hr.iloc[c,6] != -1.00:
Fuel_per_day_per_adult_3N.append(Fuel_remove_3N_24hr.iloc[c,6]/ Survey_3N.iloc[c, 7])
f_d_a_3N.append(Day_3N.iloc[c, 0])
if Fuel_remove_4N_24hr.iloc[c,6] != -1.00:
Fuel_per_day_per_adult_4N.append(Fuel_remove_4N_24hr.iloc[c,6] / Survey_4N.iloc[c, 7])
f_d_a_4N.append(Day_3N.iloc[c, 0])
# percentage Change of Fuel per day between the phases
Fuel_per_day_per_adult_2N_1N = []
f_d_a_2N_1N = []
Fuel_per_day_per_adult_3N_1N = []
f_d_a_3N_1N = []
Fuel_per_day_per_adult_4N_1N = []
f_d_a_4N_1N = []
Fuel_per_day_per_adult_3N_2N = []
f_d_a_3N_2N = []
Fuel_per_day_per_adult_4N_3N = []
f_d_a_4N_3N = []
Fuel_per_day_per_adult_4N_2N = []
f_d_a_4N_2N = []
count_t = 0
count_f = 0
for c in NO_hood_counter:
if c == (Household_removal[count_t] - C_Place_holder):
count_t = count_t + 1
if count_t == len(Household_removal):
count_t = 0
continue
if c == (Household_removal_NO_Hood_fuel_day_adult[count_f] - C_Place_holder):
count_f = count_f + 1
if count_f == len(Household_removal_NO_Hood_fuel_day_adult):
count_f = 0
continue
if (len(Fuel_per_day_per_adult_2N)-1) >= c and (len(Fuel_per_day_per_adult_1N)-1) >= c:
if Day_1N.iloc[c,13] > 0 and Day_2N.iloc[c,13] > 0 and Day_1N.iloc[c,0] == Day_2N.iloc[c,0]:
Fuel_per_day_per_adult_2N_1N.append(Fuel_per_day_per_adult_2N[c]/Fuel_per_day_per_adult_1N[c])
f_d_a_2N_1N.append(Day_1N.iloc[c,0])
if (len(Fuel_per_day_per_adult_3N)-1) >= c and (len(Fuel_per_day_per_adult_1N)-1) >= c:
if Day_3N.iloc[c,13] > 0 and Day_1N.iloc[c,13] > 0 and Day_3N.iloc[c,0] == Day_1N.iloc[c,0]:
Fuel_per_day_per_adult_3N_1N.append(Fuel_per_day_per_adult_3N[c]/Fuel_per_day_per_adult_1N[c])
f_d_a_3N_1N.append(Day_1N.iloc[c,0])
if (len(Fuel_per_day_per_adult_4N)-1) >= c and (len(Fuel_per_day_per_adult_1N)-1) >= c:
if Day_4N.iloc[c,13] > 0 and Day_1N.iloc[c,13] > 0 and Day_4N.iloc[c,0] == Day_1N.iloc[c,0]:
Fuel_per_day_per_adult_4N_1N.append(Fuel_per_day_per_adult_4N[c]/Fuel_per_day_per_adult_1N[c])
f_d_a_4N_1N.append(Day_1N.iloc[c,0])
if (len(Fuel_per_day_per_adult_3N)-1) >= c and (len(Fuel_per_day_per_adult_2N)-1) >= c:
if Day_3N.iloc[c,13] > 0 and Day_2N.iloc[c,13] > 0 and Day_3N.iloc[c,0] == Day_2N.iloc[c,0]:
Fuel_per_day_per_adult_3N_2N.append(Fuel_per_day_per_adult_3N[c]/Fuel_per_day_per_adult_2N[c])
f_d_a_3N_2N.append(Day_2N.iloc[c,0])
if (len(Fuel_per_day_per_adult_4N)-1) >= c and (len(Fuel_per_day_per_adult_3N)-1) >= c:
if Day_4N.iloc[c,13] > 0 and Day_3N.iloc[c,13] > 0 and Day_4N.iloc[c,0] == Day_3N.iloc[c,0]:
Fuel_per_day_per_adult_4N_3N.append(Fuel_per_day_per_adult_4N[c]/Fuel_per_day_per_adult_3N[c])
f_d_a_4N_3N.append(Day_3N.iloc[c,0])
if (len(Fuel_per_day_per_adult_4N)-1) >= c and (len(Fuel_per_day_per_adult_2N)-1) >= c:
if Day_4N.iloc[c,13] > 0 and Day_2N.iloc[c,13] > 0 and Day_4N.iloc[c,0] == Day_2N.iloc[c,0]:
Fuel_per_day_per_adult_4N_2N.append(Fuel_per_day_per_adult_4N[c]/Fuel_per_day_per_adult_2N[c])
f_d_a_4N_2N.append(Day_4N.iloc[c,0])
# now for box plotting for Fuel per day beteen Phases
#1N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Fuel_per_day_per_adult_1N, ax=ax_box, color='b')
sns.distplot(Fuel_per_day_per_adult_1N, ax=ax_hist, color='b')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('1N Fuel per Day per Adult')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#2N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Fuel_per_day_per_adult_2N, ax=ax_box, color='g')
sns.distplot(Fuel_per_day_per_adult_2N, ax=ax_hist, color='g')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('2N Fuel per Day per Adult')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#3N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Fuel_per_day_per_adult_3N, ax=ax_box, color='r')
sns.distplot(Fuel_per_day_per_adult_3N, ax=ax_hist, color='r')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('3N Fuel per Day per Adult')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#4N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Fuel_per_day_per_adult_4N, ax=ax_box, color='y')
sns.distplot(Fuel_per_day_per_adult_4N, ax=ax_hist, color='y')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('4N Fuel per Day per Adult')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#Plotting on the same graph
fig, ax = plt.subplots()
plt.title('No-Hood Fuel per Day per Adult')
#plt.hold(True)
#1N
quant_1_1N = np.percentile(Fuel_per_day_per_adult_1N, [25,50,75])
Top_lim_1_1N = quant_1_1N[2] + 1.5*(quant_1_1N[2] - quant_1_1N[0])
Low_lim_1_1N = quant_1_1N[0] - 1.5*(quant_1_1N[2] - quant_1_1N[0])
bp_1 = plt.boxplot(Fuel_per_day_per_adult_1N, positions = [1], widths = 0.6)
Fuel_D_A_1N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_1N):
if a > Top_lim_1_1N or a < Low_lim_1_1N:
Fuel_D_A_1N_outlier.append(f_d_a_1N[v])
plt.text(1,a,f_d_a_1N[v])
plt.text(1,0.1,'1N',color='b')
#2N
quant_1_2N = np.percentile(Fuel_per_day_per_adult_2N, [25,50,75])
Top_lim_1_2N = quant_1_2N[2] + 1.5*(quant_1_2N[2] - quant_1_2N[0])
Low_lim_1_2N = quant_1_2N[0] - 1.5*(quant_1_2N[2] - quant_1_2N[0])
bp_1 = plt.boxplot(Fuel_per_day_per_adult_2N,positions = [2], widths = 0.6)
Fuel_D_A_2N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_2N):
if a > Top_lim_1_2N or a < Low_lim_1_2N:
Fuel_D_A_2N_outlier.append(f_d_a_2N[v])
plt.text(2,a,f_d_a_2N[v])
plt.text(2,0.1,'2N', color= 'g')
#3N
quant_1_3N = np.percentile(Fuel_per_day_per_adult_3N, [25,50,75])
Top_lim_1_3N = quant_1_3N[2] + 1.5*(quant_1_3N[2] - quant_1_3N[0])
Low_lim_1_3N = quant_1_3N[0] - 1.5*(quant_1_3N[2] - quant_1_3N[0])
bp_1 = plt.boxplot(Fuel_per_day_per_adult_3N,positions = [3], widths = 0.6)
count = 0
Fuel_D_A_3N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_3N):
if a > Top_lim_1_3N or a < Low_lim_1_3N:
Fuel_D_A_3N_outlier.append(f_d_a_3N[v])
count = count + 1
if count == 2:
plt.text(3,a,f_d_a_3N[v],ha='left',va='bottom')
elif count != 2:
plt.text(3,a,f_d_a_3N[v],ha='right',va='bottom')
plt.text(3,0.1,'3N', color='r')
#4N
quant_1_4N = np.percentile(Fuel_per_day_per_adult_4N, [25,50,75])
Top_lim_1_4N = quant_1_4N[2] + 1.5*(quant_1_4N[2] - quant_1_4N[0])
Low_lim_1_4N = quant_1_4N[0] - 1.5*(quant_1_4N[2] - quant_1_4N[0])
bp_1 = plt.boxplot(Fuel_per_day_per_adult_4N,positions = [4], widths = 0.6)
Fuel_D_A_4N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_4N):
if a > Top_lim_1_4N or a < Low_lim_1_4N:
Fuel_D_A_4N_outlier.append(f_d_a_4N[v])
plt.text(4,a,f_d_a_4N[v])
plt.text(4,0.1,'4N', color='y')
plt.xlim(0,5)
plt.ylim(0,2.3)
print('Fuel/Day/Adult 1N had these values as outliers ', Fuel_D_A_1N_outlier)
print('Fuel/Day/Adult 2N had these values as outliers ', Fuel_D_A_2N_outlier)
print('Fuel/Day/Adult 3N had these values as outliers ', Fuel_D_A_3N_outlier)
print('Fuel/Day/Adult 4N had these values as outliers ', Fuel_D_A_4N_outlier)
plt.show()
# % change of fuel per day per adult between each phase
fig_2, ax2 = plt.subplots()
plt.title('% No_hood Change from Fuel per Day per Adult' )
#plt.hold(True)
#2N to 1N
quant_1_2N_1N = np.percentile(Fuel_per_day_per_adult_2N_1N, [25,50,75])
Top_lim_1_2N_1N = quant_1_2N_1N[2] + 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])
Low_lim_1_2N_1N = quant_1_2N_1N[0] - 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])
bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_2N_1N, positions=[1], widths= 0.6)
Fuel_D_A_2N_1N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_2N_1N):
if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:
Fuel_D_A_2N_1N_outlier.append(f_d_a_2N_1N[v])
plt.text(1, a, f_d_a_2N_1N[v])
plt.text(0.5, 0, '2N / 1N', color= 'g')
#3N to 1N
quant_1_3N_1N = np.percentile(Fuel_per_day_per_adult_3N_1N, [25,50,75])
Top_lim_1_3N_1N = quant_1_3N_1N[2] + 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])
Low_lim_1_3N_1N = quant_1_3N_1N[0] - 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])
bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3N_1N, positions=[2], widths= 0.6)
Fuel_D_A_3N_1N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_3N_1N):
if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:
Fuel_D_A_3N_1N_outlier.append(f_d_a_3N_1N[v])
plt.text(2, a, f_d_a_3N_1N[v])
plt.text(1.5, 0, '3N / 1N', color= 'r')
#4N to 1N
quant_1_4N_1N = np.percentile(Fuel_per_day_per_adult_4N_1N, [25,50,75])
Top_lim_1_4N_1N = quant_1_4N_1N[2] + 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])
Low_lim_1_4N_1N = quant_1_4N_1N[0] - 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])
bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_4N_1N, positions=[3], widths= 0.6)
Fuel_D_A_4N_1N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_4N_1N):
if a > Top_lim_1_4N_1N or a < Low_lim_1_4N_1N:
Fuel_D_A_4N_1N_outlier.append(f_d_a_4N_1N[v])
plt.text(3, a, f_d_a_4N_1N[v])
plt.text(2.5, 0, '4N / 1N', color= 'y')
#3N to 2N
quant_1_3N_2N = np.percentile(Fuel_per_day_per_adult_3N_2N, [25,50,75])
Top_lim_1_3N_2N = quant_1_3N_2N[2] + 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])
Low_lim_1_3N_2N = quant_1_3N_2N[0] - 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])
bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3N_2N, positions=[4], widths= 0.6)
Fuel_D_A_3N_2N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_3N_2N):
if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:
Fuel_D_A_3N_2N_outlier.append(f_d_a_3N_2N[v])
plt.text(4, a, f_d_a_3N_2N[v])
plt.text(3.5, 0, '3N / 2N', color= 'm')
#4N to 3N
quant_1_4N_3N = np.percentile(Fuel_per_day_per_adult_4N_3N, [25,50,75])
Top_lim_1_4N_3N = quant_1_4N_3N[2] + 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])
Low_lim_1_4N_3N = quant_1_4N_3N[0] - 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])
bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_4N_3N, positions=[5], widths= 0.6)
Fuel_D_A_4N_3N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_4N_3N):
if a > Top_lim_1_4N_3N or a < Low_lim_1_4N_3N:
Fuel_D_A_4N_3N_outlier.append(f_d_a_4N_3N[v])
plt.text(5, a, f_d_a_4N_3N[v])
plt.text(4.5, 0, '4N / 3N', color= 'k')
#4N to 2N
quant_1_4N_2N = np.percentile(Fuel_per_day_per_adult_4N_2N, [25,50,75])
Top_lim_1_4N_2N = quant_1_4N_2N[2] + 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])
Low_lim_1_4N_2N = quant_1_4N_2N[0] - 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])
bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_4N_2N, positions=[6], widths= 0.6)
Fuel_D_A_4N_2N_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_4N_2N):
if a > Top_lim_1_4N_2N or a < Low_lim_1_4N_2N:
Fuel_D_A_4N_2N_outlier.append(f_d_a_4N_2N[v])
plt.text(6, a, f_d_a_4N_2N[v])
plt.text(5.5, 0, '4N / 2N', color= 'tab:orange')
plt.xlim(0,7)
plt.ylim(-0.5,4)
print('Fuel/Day/Adult 2N/1N had these values as outliers ', Fuel_D_A_2N_1N_outlier)
print('Fuel/Day/Adult 3N/1N had these values as outliers ', Fuel_D_A_3N_1N_outlier)
print('Fuel/Day/Adult 4N/1N had these values as outliers ', Fuel_D_A_4N_1N_outlier)
print('Fuel/Day/Adult 3N/2N had these values as outliers ', Fuel_D_A_3N_2N_outlier)
print('Fuel/Day/Adult 4N/3N had these values as outliers ', Fuel_D_A_4N_3N_outlier)
print('Fuel/Day/Adult 4N/2N had these values as outliers ', Fuel_D_A_4N_2N_outlier)
plt.show()
#adding averages to the tables
quant_1_1N = np.append(quant_1_1N, np.average(Fuel_per_day_per_adult_1N))
quant_1_2N = np.append(quant_1_2N, np.average(Fuel_per_day_per_adult_2N))
quant_1_3N = np.append(quant_1_3N, np.average(Fuel_per_day_per_adult_3N))
quant_1_4N = np.append(quant_1_4N, np.average(Fuel_per_day_per_adult_4N))
D_50_quant_phase_f_d_a = {'Percentile %': ['25','50','75', 'Avg'], '1N': quant_1_1N, '2N': quant_1_2N,'3N' : quant_1_3N,'4N': quant_1_4N}
F_D_A_50_phase_no_hood = pd.DataFrame(data=D_50_quant_phase_f_d_a, columns=['Percentile %','1N', '2N', '3N','4N'])
quant_1_2N_1N = np.append(quant_1_2N_1N , np.average(Fuel_per_day_per_adult_2N_1N))
quant_1_3N_1N = np.append(quant_1_3N_1N , np.average(Fuel_per_day_per_adult_3N_1N))
quant_1_4N_1N = np.append(quant_1_4N_1N , np.average(Fuel_per_day_per_adult_4N_1N))
quant_1_3N_2N = np.append(quant_1_3N_2N , np.average(Fuel_per_day_per_adult_3N_2N))
quant_1_4N_3N = np.append(quant_1_4N_3N , np.average(Fuel_per_day_per_adult_4N_3N))
quant_1_4N_2N = np.append(quant_1_4N_2N , np.average(Fuel_per_day_per_adult_4N_2N))
D_50_quant_percent_f_d_a ={'Percentile %': ['25','50','75', 'Avg'],'2N / 1N': quant_1_2N_1N,'3N / 1N': quant_1_3N_1N,'4N / 1N': quant_1_4N_1N,
'3N / 2N': quant_1_3N_2N,'4N / 3N': quant_1_4N_3N,'4N / 2N': quant_1_4N_2N}
F_D_A_50_percent_change_no_hood = pd.DataFrame(data=D_50_quant_percent_f_d_a, columns=['Percentile %','2N / 1N','3N / 1N', '4N / 1N'
,'3N / 2N','4N / 3N','4N / 2N'])
print(F_D_A_50_phase_no_hood)
print(F_D_A_50_percent_change_no_hood)
# add more
print ('-------------------Fuel per Day per Adult Hood Phase -------------------')
if Hood_or_no == 'hood':
Fuel_per_day_per_adult_1H = []
f_d_a_1H = []
Fuel_per_day_per_adult_2H = []
f_d_a_2H = []
Fuel_per_day_per_adult_3H = []
f_d_a_3H = []
count_t = 0
count_f = 0
for c in hood_counter:
if c == (Household_removal[count_t] - C_Place_holder):
count_t = count_t + 1
if count_t == len(Household_removal):
count_t = 0
continue
if c == (Household_removal_Hood_fuel_day_adult[count_f] - C_Place_holder):
count_f = count_f + 1
if count_f == len(Household_removal_Hood_fuel_day_adult):
count_f = 0
continue
if Fuel_remove_1H_24hr.iloc[c,6] != -1.00:
Fuel_per_day_per_adult_1H.append(Fuel_remove_1H_24hr.iloc[c,6]/Survey_1H.iloc[c,7])
f_d_a_1H.append(Day_1H.iloc[c,0])
if Fuel_remove_2H_24hr.iloc[c,6] != -1.00:
Fuel_per_day_per_adult_2H.append(Fuel_remove_2H_24hr.iloc[c,6] / Survey_2H.iloc[c, 7])
f_d_a_2H.append(Day_2H.iloc[c,0])
if Fuel_remove_3H_24hr.iloc[c,6] != -1.00:
Fuel_per_day_per_adult_3H.append(Fuel_remove_3H_24hr.iloc[c,6]/ Survey_3H.iloc[c, 7])
f_d_a_3H.append(Day_3H.iloc[c, 0])
# percentage Change of Fuel per day between the phases
Fuel_per_day_per_adult_2H_1H = []
f_d_a_2H_1H = []
Fuel_per_day_per_adult_3H_1H = []
f_d_a_3H_1H = []
Fuel_per_day_per_adult_3H_2H = []
f_d_a_3H_2H = []
count_t = 0
count_f = 0
for c in hood_counter:
if c == (Household_removal[count_t] - C_Place_holder):
count_t = count_t + 1
if count_t == len(Household_removal):
count_t = 0
continue
if c == (Household_removal_Hood_fuel_day_adult[count_f] - C_Place_holder):
count_f = count_f + 1
if count_f == len(Household_removal_Hood_fuel_day_adult):
count_f = 0
continue
if (len(Fuel_per_day_per_adult_2H)-1) >= c and (len(Fuel_per_day_per_adult_1H)-1) >= c:
if Day_1H.iloc[c,13] > 0 and Day_2H.iloc[c,13] > 0 and Day_1H.iloc[c,0] == Day_2H.iloc[c,0]:
Fuel_per_day_per_adult_2H_1H.append(Fuel_per_day_per_adult_2H[c]/Fuel_per_day_per_adult_1H[c])
f_d_a_2H_1H.append(Day_1H.iloc[c,0])
if (len(Fuel_per_day_per_adult_3H)-1) >= c and (len(Fuel_per_day_per_adult_1H)-1) >= c:
if Day_3H.iloc[c,13] > 0 and Day_1H.iloc[c,13] > 0 and Day_3H.iloc[c,0] == Day_1H.iloc[c,0]:
Fuel_per_day_per_adult_3H_1H.append(Fuel_per_day_per_adult_3H[c]/Fuel_per_day_per_adult_1H[c])
f_d_a_3H_1H.append(Day_1H.iloc[c,0])
if (len(Fuel_per_day_per_adult_3H)-1) >= c and (len(Fuel_per_day_per_adult_2H)-1) >= c:
if Day_3H.iloc[c,13] > 0 and Day_2H.iloc[c,13] > 0 and Day_3H.iloc[c,0] == Day_2H.iloc[c,0]:
Fuel_per_day_per_adult_3H_2H.append(Fuel_per_day_per_adult_3H[c]/Fuel_per_day_per_adult_2H[c])
f_d_a_3H_2H.append(Day_1H.iloc[c,0])
# now for plotting
#1H
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Fuel_per_day_per_adult_1H, ax=ax_box, color='b')
sns.distplot(Fuel_per_day_per_adult_1H, ax=ax_hist, color='b')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('1H Fuel per Day per Adult')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#2H
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Fuel_per_day_per_adult_2H, ax=ax_box, color='g')
sns.distplot(Fuel_per_day_per_adult_2H, ax=ax_hist, color='g')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('2H Fuel per Day per Adult')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#3H
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Fuel_per_day_per_adult_3H, ax=ax_box, color='r')
sns.distplot(Fuel_per_day_per_adult_3H, ax=ax_hist, color='r')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('3H Fuel per Day per Adult')
plt.ylim(top=2)
plt.ylim(bottom = 0)
fig_2, ax_2 = plt.subplots()
plt.title('Hood Fuel per Day per Adult')
#plt.hold(True)
quant_1_1H = np.percentile(Fuel_per_day_per_adult_1H, [25,50,75])
Top_lim_1_1H = quant_1_1H[2] + 1.5*(quant_1_1H[2] - quant_1_1H[0])
Low_lim_1_1H = quant_1_1H[0] - 1.5*(quant_1_1H[2] - quant_1_1H[0])
bp_1 = plt.boxplot(Fuel_per_day_per_adult_1H, positions = [1], widths = 0.6)
Fuel_D_A_1H_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_1H):
if a > Top_lim_1_1H or a < Low_lim_1_1H:
Fuel_D_A_1H_outlier.append(f_d_a_1H[v])
plt.text(1,a,f_d_a_1H[v])
plt.text(1,0,'1H',color='b')
quant_1_2H = np.percentile(Fuel_per_day_per_adult_2H, [25,50,75])
Top_lim_1_2H = quant_1_2H[2] + 1.5*(quant_1_2H[2] - quant_1_2H[0])
Low_lim_1_2H = quant_1_2H[0] - 1.5*(quant_1_2H[2] - quant_1_2H[0])
bp_1 = plt.boxplot(Fuel_per_day_per_adult_2H,positions = [2], widths = 0.6)
count = 0
Fuel_D_A_2H_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_2H):
if a > Top_lim_1_2H or a < Low_lim_1_2H:
Fuel_D_A_2H_outlier.append(f_d_a_2H[v])
count = count + 1
if count == 1:
plt.text(2,a,f_d_a_2H[v],ha='left',va='bottom')
elif count !=1:
plt.text(2,a,f_d_a_2H[v],ha='right',va='bottom')
plt.text(2,0,'2H', color= 'g')
quant_1_3H = np.percentile(Fuel_per_day_per_adult_3H, [25,50,75])
Top_lim_1_3H = quant_1_3H[2] + 1.5*(quant_1_3H[2] - quant_1_3H[0])
Low_lim_1_3H = quant_1_3H[0] - 1.5*(quant_1_3H[2] - quant_1_3H[0])
bp_1 = plt.boxplot(Fuel_per_day_per_adult_3H,positions = [3], widths = 0.6)
count = 0
Fuel_D_A_3H_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_3H):
if a > Top_lim_1_3H or a < Low_lim_1_3H:
Fuel_D_A_3H_outlier.append(f_d_a_3H[v])
count = count + 1
if count == 3:
plt.text(3,a,f_d_a_3H[v],ha='left',va='bottom')
elif count != 1:
plt.text(3,a,f_d_a_3H[v],ha='right',va='bottom')
plt.text(3,0,'3H', color='r')
plt.xlim(-0,4)
plt.ylim(-0.25,2.5)
print('Fuel/Day/Adult 1H had these values as outliers ', Fuel_D_A_1H_outlier)
print('Fuel/Day/Adult 2H had these values as outliers ', Fuel_D_A_2H_outlier)
print('Fuel/Day/Adult 3H had these values as outliers ', Fuel_D_A_3H_outlier)
plt.show()
#% change of fuel perday per adult between each phase
fig_2, ax2 = plt.subplots()
plt.title('% No_hood Change from Fuel per Day per Adult' )
#plt.hold(True)
#2H to 1H
quant_1_2H_1H = np.percentile(Fuel_per_day_per_adult_2H_1H, [25,50,75])
Top_lim_1_2H_1H = quant_1_2H_1H[2] + 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])
Low_lim_1_2H_1H = quant_1_2H_1H[0] - 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])
bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_2H_1H, positions=[1], widths= 0.6)
Fuel_D_A_2H_1H_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_2H_1H):
if a > Top_lim_1_2H_1H or a < Low_lim_1_2H_1H:
Fuel_D_A_2H_1H_outlier.append(f_d_a_2H_1H[v])
plt.text(1, a, f_d_a_2H_1H[v])
plt.text(0.75, -0.25, '2H / 1H', color= 'g')
#3H to 1H
quant_1_3H_1H = np.percentile(Fuel_per_day_per_adult_3H_1H, [25,50,75])
Top_lim_1_3H_1H = quant_1_3H_1H[2] + 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])
Low_lim_1_3H_1H = quant_1_3H_1H[0] - 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])
bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3H_1H, positions=[2], widths= 0.6)
Fuel_D_A_3H_1H_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_3H_1H):
if a > Top_lim_1_3H_1H or a < Low_lim_1_3H_1H:
Fuel_D_A_3H_1H_outlier.append(f_d_a_3H_1H[v])
plt.text(2, a, f_d_a_3H_1H[v])
plt.text(1.75, -0.25, '3H / 1H', color= 'r')
#3H to 2H
quant_1_3H_2H = np.percentile(Fuel_per_day_per_adult_3H_2H, [25,50,75])
Top_lim_1_3H_2H = quant_1_3H_2H[2] + 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])
Low_lim_1_3H_2H = quant_1_3H_2H[0] - 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])
bp_1_1 = plt.boxplot(Fuel_per_day_per_adult_3H_2H, positions=[3], widths= 0.6)
Fuel_D_A_3H_2H_outlier = []
for v,a in enumerate(Fuel_per_day_per_adult_3H_2H):
if a > Top_lim_1_3H_2H or a < Low_lim_1_3H_2H:
Fuel_D_A_3H_2H_outlier.append(f_d_a_3H_2H[v])
plt.text(3, a, f_d_a_3H_2H[v])
plt.text(2.75, -0.25, '2H / 1H', color= 'm')
plt.xlim(-0,4)
plt.ylim(-0.25,6)
print('Fuel/Day/Adult 2H/1H had these values as outliers ', Fuel_D_A_2H_1H_outlier)
print('Fuel/Day/Adult 3H/1H had these values as outliers ', Fuel_D_A_3H_1H_outlier)
print('Fuel/Day/Adult 3H/2H had these values as outliers ', Fuel_D_A_3H_2H_outlier)
plt.show()
quant_1_1H = np.append(quant_1_1H, np.average(Fuel_per_day_per_adult_1H))
quant_1_2H = np.append(quant_1_2H, np.average(Fuel_per_day_per_adult_2H))
quant_1_3H = np.append(quant_1_3H, np.average(Fuel_per_day_per_adult_3H))
D_50_quant_phase_f_d_a_hood = {'Percentile %': ['25','50','75', 'Avg'], '1H': quant_1_1H, '2H': quant_1_2H,'3H' : quant_1_3H}
F_D_A_50_phase_hood = pd.DataFrame(data=D_50_quant_phase_f_d_a_hood, columns=['Percentile %','1H', '2H','3H'] )
quant_1_2H_1H = np.append(quant_1_2H_1H , np.average(Fuel_per_day_per_adult_2H_1H))
quant_1_3H_1H = np.append(quant_1_3H_1H , np.average(Fuel_per_day_per_adult_3H_1H))
quant_1_3H_2H = np.append(quant_1_3H_2H , np.average(Fuel_per_day_per_adult_3H_2H))
D_50_quant_percent_f_d_a_hood ={'Percentile %': ['25','50','75', 'Avg'],'2H / 1H': quant_1_2H_1H,'3H / 1H': quant_1_3H_1H,'3H / 2H': quant_1_3H_2H}
F_D_A_50_percent_change_hood = pd.DataFrame(data=D_50_quant_percent_f_d_a_hood, columns=['Percentile %','2H / 1H','3H / 1H','3H / 2H'])
print(F_D_A_50_phase_hood)
print(F_D_A_50_percent_change_hood)
print('----------------------- Kitchen PM per Day -----------------------------')
if Hood_or_no == 'no_hood':
Kit_PM_per_day_1N = []
K_PM_D_1N = []
Kit_PM_per_day_2N = []
K_PM_D_2N = []
Kit_PM_per_day_3N = []
K_PM_D_3N = []
Kit_PM_per_day_4N = []
K_PM_D_4N = []
count_t = 0
count_pm = 0
for c in NO_hood_counter:
if c == (Household_removal[count_t] - C_Place_holder):
count_t = count_t + 1
if count_t == len(Household_removal):
count_t = 0
continue
if c == (Household_removal_NO_Hood_PM[count_pm] - C_Place_holder):
count_pm = count_pm + 1
if count_pm == len(Household_removal_NO_Hood_PM):
count_pm = 0
continue
# if Day_1N.iloc[c,7] != -1.00:
# Kit_PM_per_day_1N.append(Day_1N.iloc[c,7]/Day_1N.iloc[c,1])
# K_PM_D_1N.append(Day_1N.iloc[c,0])
if Kit_PM_1N_24hr.iloc[c,6] != -1.00:
Kit_PM_per_day_1N.append(Kit_PM_1N_24hr.iloc[c,6])
K_PM_D_1N.append(Kit_PM_1N_24hr.iloc[c, 0])
#if Day_2N.iloc[c, 7] != -1.00:
# Kit_PM_per_day_2N.append(Day_2N.iloc[c,7]/Day_2N.iloc[c,1])
# K_PM_D_2N.append(Day_2N.iloc[c,0])
if Kit_PM_2N_24hr.iloc[c, 6] != -1.00:
Kit_PM_per_day_2N.append(Kit_PM_2N_24hr.iloc[c, 6])
K_PM_D_2N.append(Kit_PM_2N_24hr.iloc[c, 0])
# if Day_3N.iloc[c, 7] != -1.00:
# Kit_PM_per_day_3N.append(Day_3N.iloc[c,7]/Day_3N.iloc[c,1])
# K_PM_D_3N.append(Day_3N.iloc[c, 0])
if Kit_PM_3N_24hr.iloc[c, 6] != -1.00:
Kit_PM_per_day_3N.append(Kit_PM_3N_24hr.iloc[c, 6])
K_PM_D_3N.append(Kit_PM_3N_24hr.iloc[c, 0])
# if Day_4N.iloc[c, 7] != -1.00:
# Kit_PM_per_day_4N.append(Day_4N.iloc[c,7]/Day_4N.iloc[c,1])
# K_PM_D_4N.append(Day_4N.iloc[c, 0])
if Kit_PM_4N_24hr.iloc[c, 6] != -1.00:
Kit_PM_per_day_4N.append(Kit_PM_4N_24hr.iloc[c, 6])
K_PM_D_4N.append(Kit_PM_4N_24hr.iloc[c, 0])
# percentages Between Phases of kitchen PM per day
Kit_per_day_2N_1N = []
K_PM_D_2N_1N = []
Kit_per_day_3N_1N = []
K_PM_D_3N_1N = []
Kit_per_day_4N_1N = []
K_PM_D_4N_1N = []
Kit_per_day_3N_2N = []
K_PM_D_3N_2N = []
Kit_per_day_4N_3N = []
K_PM_D_4N_3N = []
Kit_per_day_4N_2N = []
K_PM_D_4N_2N = []
count_t = 0
count_pm = 0
for c in NO_hood_counter:
if c == (Household_removal[count_t] - C_Place_holder):
count_t = count_t + 1
if count_t == len(Household_removal):
count_t = 0
continue
if c == (Household_removal_NO_Hood_PM[count_pm] - C_Place_holder):
count_pm = count_pm + 1
if count_pm == len(Household_removal_NO_Hood_PM):
count_pm = 0
continue
if (len(Kit_PM_per_day_2N)-1) >= c and (len(Kit_PM_per_day_1N)-1) >= c:
#if Day_1N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_1N.iloc[c,0] == Day_2N.iloc[c,0]:
if Kit_PM_1N_24hr.iloc[c,6] > 0 and Kit_PM_2N_24hr.iloc[c,6] > 0 and Kit_PM_1N_24hr.iloc[c,0] == Kit_PM_2N_24hr.iloc[c,0]:
Kit_per_day_2N_1N.append(Kit_PM_per_day_2N[c]/Kit_PM_per_day_1N[c])
K_PM_D_2N_1N.append(Day_1N.iloc[c,0])
if (len(Kit_PM_per_day_3N)-1) >= c and (len(Kit_PM_per_day_1N)-1) >= c:
#if Day_3N.iloc[c,7] > 0 and Day_1N.iloc[c,7] > 0 and Day_3N.iloc[c,0] == Day_1N.iloc[c,0]:
if Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_1N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \
Kit_PM_1N_24hr.iloc[c, 0]:
Kit_per_day_3N_1N.append(Kit_PM_per_day_3N[c]/Kit_PM_per_day_1N[c])
K_PM_D_3N_1N.append(Day_1N.iloc[c,0])
if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_1N)-1) >= c:
#if Day_4N.iloc[c,7] > 0 and Day_1N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_1N.iloc[c,0]:
if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_1N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 0] == \
Kit_PM_1N_24hr.iloc[c, 0]:
Kit_per_day_4N_1N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_1N[c])
K_PM_D_4N_1N.append(Day_1N.iloc[c,0])
if (len(Kit_PM_per_day_3N)-1) >= c and (len(Kit_PM_per_day_2N)-1) >= c:
#if Day_3N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_3N.iloc[c,0] == Day_2N.iloc[c,0]:
if Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_2N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \
Kit_PM_2N_24hr.iloc[c, 0]:
Kit_per_day_3N_2N.append(Kit_PM_per_day_3N[c]/Kit_PM_per_day_2N[c])
K_PM_D_3N_2N.append(Day_2N.iloc[c,0])
if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_3N)-1) >= c:
#if Day_4N.iloc[c,7] > 0 and Day_3N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_3N.iloc[c,0]:
if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \
Kit_PM_4N_24hr.iloc[c, 0]:
Kit_per_day_4N_3N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_3N[c])
K_PM_D_4N_3N.append(Day_3N.iloc[c,0])
if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_2N)-1) >= c:
#if Day_4N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_2N.iloc[c,0]:
if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 0] == \
Kit_PM_2N_24hr.iloc[c, 0]:
Kit_per_day_4N_2N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_2N[c])
K_PM_D_4N_2N.append(Day_4N.iloc[c,0])
# now for box plotting for Kitchen PM per day percent changes
#2N to 1N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_2N_1N, ax=ax_box, color='g')
sns.distplot(Kit_per_day_2N_1N, ax=ax_hist, color='g')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 2N/1N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#3N to 1N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_3N_1N, ax=ax_box, color='r')
sns.distplot(Kit_per_day_3N_1N, ax=ax_hist, color='r')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 3N/1N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#4N to 1N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_4N_1N, ax=ax_box, color='y')
sns.distplot(Kit_per_day_4N_1N, ax=ax_hist, color='y')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 4N/1N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#3N to 2N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_3N_2N, ax=ax_box, color='m')
sns.distplot(Kit_per_day_3N_2N, ax=ax_hist, color='m')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 3N/2N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#4N to 3N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_4N_3N, ax=ax_box, color='k')
sns.distplot(Kit_per_day_4N_3N, ax=ax_hist, color='k')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 4N/3N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#4N to 2N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_4N_2N, ax=ax_box, color='tab:orange')
sns.distplot(Kit_per_day_4N_2N, ax=ax_hist, color='tab:orange')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 4N/2N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#Plotting on the same graph
fig, ax = plt.subplots()
plt.title('No-Hood Kitchen PM per day')
#plt.hold()
#1N
quant_1_1N = np.percentile(Kit_PM_per_day_1N, [25,50,75])
Top_lim_1_1N = quant_1_1N[2] + 1.5*(quant_1_1N[2] - quant_1_1N[0])
Low_lim_1_1N = quant_1_1N[0] - 1.5*(quant_1_1N[2] - quant_1_1N[0])
bp_1 = plt.boxplot(Kit_PM_per_day_1N, positions = [1], widths = 0.6)
kitchen_pm_1N_outlier = []
for v,a in enumerate(Kit_PM_per_day_1N):
if a > Top_lim_1_1N or a < Low_lim_1_1N:
kitchen_pm_1N_outlier.append(K_PM_D_1N[v])
plt.text(1,a,K_PM_D_1N[v])
plt.text(1,0.1,'1N',color='b')
#2N
quant_1_2N = np.percentile(Kit_PM_per_day_2N, [25,50,75])
Top_lim_1_2N = quant_1_2N[2] + 1.5*(quant_1_2N[2] - quant_1_2N[0])
Low_lim_1_2N = quant_1_2N[0] - 1.5*(quant_1_2N[2] - quant_1_2N[0])
bp_1 = plt.boxplot(Kit_PM_per_day_2N,positions = [2], widths = 0.6)
kitchen_pm_2N_outlier = []
for v,a in enumerate(Kit_PM_per_day_2N):
if a > Top_lim_1_2N or a < Low_lim_1_2N:
kitchen_pm_2N_outlier.append(K_PM_D_2N[v])
plt.text(2,a,K_PM_D_2N[v])
plt.text(2,0.1,'2N', color= 'g')
#3N
quant_1_3N = np.percentile(Kit_PM_per_day_3N, [25,50,75])
Top_lim_1_3N = quant_1_3N[2] + 1.5*(quant_1_3N[2] - quant_1_3N[0])
Low_lim_1_3N = quant_1_3N[0] - 1.5*(quant_1_3N[2] - quant_1_3N[0])
kitchen_pm_3N_outlier = []
bp_1 = plt.boxplot(Kit_PM_per_day_3N,positions = [3], widths = 0.6)
count = 0
for v,a in enumerate(Kit_PM_per_day_3N):
if a > Top_lim_1_3N or a < Low_lim_1_3N:
kitchen_pm_3N_outlier.append(K_PM_D_3N[v])
count = count + 1
if count == (3):
plt.text(3,a,K_PM_D_3N[v],ha='left', va='bottom')
if count == (1):
plt.text(3,a,K_PM_D_3N[v],ha='left', va='top')
else:
plt.text(3,a,K_PM_D_3N[v],ha='right', va='bottom')
plt.text(3,0.1,'3N', color='r')
#4N
quant_1_4N = np.percentile(Kit_PM_per_day_4N, [25,50,75])
Top_lim_1_4N = quant_1_4N[2] + 1.5*(quant_1_4N[2] - quant_1_4N[0])
Low_lim_1_4N = quant_1_4N[0] - 1.5*(quant_1_4N[2] - quant_1_4N[0])
bp_1 = plt.boxplot(Kit_PM_per_day_4N,positions = [4], widths = 0.6)
kitchen_pm_4N_outlier = []
for v,a in enumerate(Kit_PM_per_day_4N):
if a > Top_lim_1_4N or a < Low_lim_1_4N:
kitchen_pm_4N_outlier.append(K_PM_D_4N[v])
plt.text(4,a,K_PM_D_4N[v])
plt.text(4,0.1,'4N', color='y')
plt.xlim(0,5)
plt.ylim(0,1200)
print('Kitchen PM 1N had these values as outliers ', kitchen_pm_1N_outlier)
print('Kitchen PM 2N had these values as outliers ', kitchen_pm_2N_outlier)
print('Kitchen PM 3N had these values as outliers ', kitchen_pm_3N_outlier)
print('Kitchen PM 4N had these values as outliers ', kitchen_pm_4N_outlier)
plt.show()
# % change of PM per day
fig_2, ax2 = plt.subplots()
plt.title('% No_hood PM per Day Change' )
#plt.hold(True)
#2N to 1N
quant_1_2N_1N = np.percentile(Kit_per_day_2N_1N, [25,50,75])
Top_lim_1_2N_1N = quant_1_2N_1N[2] + 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])
Low_lim_1_2N_1N = quant_1_2N_1N[0] - 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])
bp_1_1 = plt.boxplot(Kit_per_day_2N_1N, positions=[1], widths= 0.6)
kitchen_pm_2N_1N_outlier = []
for v,a in enumerate(Kit_per_day_2N_1N):
if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:
kitchen_pm_2N_1N_outlier.append(K_PM_D_2N_1N[v])
plt.text(1, a, K_PM_D_2N_1N[v])
plt.text(0.5, -0.25, '2N / 1N', color= 'g')
#3N to 1N
quant_1_3N_1N = np.percentile(Kit_per_day_3N_1N, [25,50,75])
Top_lim_1_3N_1N = quant_1_3N_1N[2] + 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])
Low_lim_1_3N_1N = quant_1_3N_1N[0] - 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])
bp_1_1 = plt.boxplot(Kit_per_day_3N_1N, positions=[2], widths= 0.6)
kitchen_pm_3N_1N_outlier = []
for v,a in enumerate(Kit_per_day_3N_1N):
if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:
kitchen_pm_3N_1N_outlier.append(K_PM_D_3N_1N[v])
plt.text(2, a, K_PM_D_3N_1N[v])
plt.text(1.5, -0.25, '3N / 1N', color= 'r')
#4N to 1N
quant_1_4N_1N = np.percentile(Kit_per_day_4N_1N, [25,50,75])
Top_lim_1_4N_1N = quant_1_4N_1N[2] + 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])
Low_lim_1_4N_1N = quant_1_4N_1N[0] - 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])
bp_1_1 = plt.boxplot(Kit_per_day_4N_1N, positions=[3], widths= 0.6)
kitchen_pm_4N_1N_outlier = []
for v,a in enumerate(Kit_per_day_4N_1N):
if a > Top_lim_1_4N_1N or a < Low_lim_1_4N_1N:
kitchen_pm_4N_1N_outlier.append(K_PM_D_4N_1N[v])
plt.text(3, a, K_PM_D_4N_1N[v])
plt.text(2.5, -0.25, '4N / 1N', color= 'y')
#3N to 2N
quant_1_3N_2N = np.percentile(Kit_per_day_3N_2N, [25,50,75])
Top_lim_1_3N_2N = quant_1_3N_2N[2] + 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])
Low_lim_1_3N_2N = quant_1_3N_2N[0] - 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])
bp_1_1 = plt.boxplot(Kit_per_day_3N_2N, positions=[4], widths= 0.6)
kitchen_pm_3N_2N_outlier = []
for v,a in enumerate(Kit_per_day_3N_2N):
if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:
kitchen_pm_3N_2N_outlier.append(K_PM_D_3N_2N[v])
plt.text(4, a, K_PM_D_3N_2N[v])
plt.text(3.5, -0.25, '3N / 2N', color= 'm')
#4N to 3N
quant_1_4N_3N = np.percentile(Kit_per_day_4N_3N, [25,50,75])
Top_lim_1_4N_3N = quant_1_4N_3N[2] + 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])
Low_lim_1_4N_3N = quant_1_4N_3N[0] - 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])
bp_1_1 = plt.boxplot(Kit_per_day_4N_3N, positions=[5], widths= 0.6)
kitchen_pm_4N_3N_outlier = []
for v,a in enumerate(Kit_per_day_4N_3N):
if a > Top_lim_1_4N_3N or a < Low_lim_1_4N_3N:
kitchen_pm_4N_3N_outlier.append(K_PM_D_4N_3N[v])
plt.text(5, a, K_PM_D_4N_3N[v])
plt.text(4.5, -0.25, '4N / 3N', color= 'k')
#4N to 2N
quant_1_4N_2N = np.percentile(Kit_per_day_4N_2N, [25,50,75])
Top_lim_1_4N_2N = quant_1_4N_2N[2] + 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])
Low_lim_1_4N_2N = quant_1_4N_2N[0] - 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])
bp_1_1 = plt.boxplot(Kit_per_day_4N_2N, positions=[6], widths= 0.6)
kitchen_pm_4N_2N_outlier = []
for v,a in enumerate(Kit_per_day_4N_2N):
if a > Top_lim_1_4N_2N or a < Low_lim_1_4N_2N:
kitchen_pm_4N_2N_outlier.append(K_PM_D_4N_2N[v])
plt.text(6, a, K_PM_D_4N_2N[v])
plt.text(5.5, -0.25, '4N / 2N', color= 'tab:orange')
plt.xlim(0,7)
plt.ylim(-0.5,5)
print('Kitchen PM 2N/1N had these values as outliers ', kitchen_pm_2N_1N_outlier)
print('Kitchen PM 3N/1N had these values as outliers ', kitchen_pm_3N_1N_outlier)
print('Kitchen PM 4N/1N had these values as outliers ', kitchen_pm_4N_1N_outlier)
print('Kitchen PM 3N/2N had these values as outliers ', kitchen_pm_3N_2N_outlier)
print('Kitchen PM 4N/3N had these values as outliers ', kitchen_pm_4N_3N_outlier)
print('Kitchen PM 4N/2N had these values as outliers ', kitchen_pm_4N_2N_outlier)
plt.show()
#adding averages to the tables
quant_1_1N = np.append(quant_1_1N, np.average(Kit_PM_per_day_1N))
quant_1_2N = np.append(quant_1_2N, np.average(Kit_PM_per_day_2N))
quant_1_3N = np.append(quant_1_3N, np.average(Kit_PM_per_day_3N))
quant_1_4N = np.append(quant_1_4N, np.average(Kit_PM_per_day_4N))
D_50_quant_phase_PM_d = {'Percentile %': ['25','50','75', 'Avg'], '1N': quant_1_1N, '2N': quant_1_2N,'3N' : quant_1_3N,'4N': quant_1_4N}
PM_D_50_phase_no_hood = pd.DataFrame(data=D_50_quant_phase_PM_d,columns=['Percentile %','1N', '2N', '3N','4N'])
quant_1_2N_1N = np.append(quant_1_2N_1N , np.average(Kit_per_day_2N_1N))
quant_1_3N_1N = np.append(quant_1_3N_1N , np.average(Kit_per_day_3N_1N))
quant_1_4N_1N = np.append(quant_1_4N_1N , np.average(Kit_per_day_4N_1N))
quant_1_3N_2N = np.append(quant_1_3N_2N , np.average(Kit_per_day_3N_2N))
quant_1_4N_3N = np.append(quant_1_4N_3N , np.average(Kit_per_day_4N_3N))
quant_1_4N_2N = np.append(quant_1_4N_2N , np.average(Kit_per_day_4N_2N))
D_50_quant_percent_PM_d ={'Percentile %': ['25','50','75', 'Avg'],'2N / 1N': quant_1_2N_1N,'3N / 1N': quant_1_3N_1N,'4N / 1N': quant_1_4N_1N,
'3N / 2N': quant_1_3N_2N,'4N / 3N': quant_1_4N_3N,'4N / 2N': quant_1_4N_2N}
PM_D_50_percent_change_no_hood = pd.DataFrame(data=D_50_quant_percent_PM_d, columns=['Percentile %','2N / 1N','3N / 1N', '4N / 1N'
,'3N / 2N','4N / 3N','4N / 2N'])
print(PM_D_50_phase_no_hood)
print(PM_D_50_percent_change_no_hood)
# hood Pm per day
if Hood_or_no == 'hood':
Kit_PM_per_day_1H = []
K_PM_D_1H = []
Kit_PM_per_day_2H = []
K_PM_D_2H = []
Kit_PM_per_day_3H = []
K_PM_D_3H = []
count_t = 0
count_pm = 0
for c in hood_counter:
if c == (Household_removal[count_t] - C_Place_holder):
count_t = count_t + 1
if count_t == len(Household_removal):
count_t = 0
continue
if c == (Household_removal_Hood_PM[count_pm] - C_Place_holder):
count_pm = count_pm + 1
if count_pm == len(Household_removal_Hood_PM):
count_pm = 0
continue
# if Day_1H.iloc[c,7] != -1.00:
# Kit_PM_per_day_1H.append(Day_1H.iloc[c,7]/Day_1H.iloc[c,1])
# K_PM_D_1H.append(Day_1H.iloc[c,0])
if Kit_PM_1H_24hr.iloc[c, 6] != -1.00:
Kit_PM_per_day_1H.append(Kit_PM_1H_24hr.iloc[c,6])
K_PM_D_1H.append(Kit_PM_1H_24hr.iloc[c,0])
# if Day_2H.iloc[c, 7] != -1.00:
# Kit_PM_per_day_2H.append(Day_2H.iloc[c,7]/Day_2H.iloc[c,1])
# K_PM_D_2H.append(Day_2H.iloc[c,0])
if Kit_PM_2H_24hr.iloc[c, 6] != -1.00:
Kit_PM_per_day_2H.append(Kit_PM_2H_24hr.iloc[c,6])
K_PM_D_2H.append(Kit_PM_2H_24hr.iloc[c,0])
# if Day_3H.iloc[c, 7] != -1.00:
# Kit_PM_per_day_3H.append(Day_3H.iloc[c,7]/Day_3H.iloc[c,1])
# K_PM_D_3H.append(Day_3H.iloc[c, 0])
if Kit_PM_3H_24hr.iloc[c, 6] != -1.00:
Kit_PM_per_day_3H.append(Kit_PM_3H_24hr.iloc[c,6])
K_PM_D_3H.append(Kit_PM_3H_24hr.iloc[c,0])
# percentages Between Phases of kitchen PM per day
Kit_per_day_2H_1H = []
K_PM_D_2H_1H = []
Kit_per_day_3H_1H = []
K_PM_D_3H_1H = []
Kit_per_day_3H_2H = []
K_PM_D_3H_2H = []
count_t = 0
count_pm = 0
for c in NO_hood_counter:
if c == (Household_removal[count_t] - C_Place_holder):
count_t = count_t + 1
if count_t == len(Household_removal):
count_t = 0
continue
if c == (Household_removal_Hood_PM[count_pm] - C_Place_holder):
count_pm = count_pm + 1
if count_pm == len(Household_removal_Hood_PM):
count_pm = 0
continue
if (len(Kit_PM_per_day_2H)-1) >= c and (len(Kit_PM_per_day_1H)-1) >= c:
#if Day_1H.iloc[c,7] > 0 and Day_2H.iloc[c,7] > 0 and Day_1H.iloc[c,0] == Day_2H.iloc[c,0]:
if Kit_PM_1H_24hr.iloc[c, 6] > 0 and Kit_PM_2H_24hr.iloc[c, 6] > 0 and Kit_PM_1H_24hr.iloc[c, 0] == Kit_PM_2H_24hr.iloc[c, 0]:
Kit_per_day_2H_1H.append(Kit_PM_per_day_2H[c]/Kit_PM_per_day_1H[c])
K_PM_D_2H_1H.append(Day_1H.iloc[c,0])
if (len(Kit_PM_per_day_3H)-1) >= c and (len(Kit_PM_per_day_1H)-1) >= c:
#if Day_3H.iloc[c,7] > 0 and Day_1H.iloc[c,7] > 0 and Day_3H.iloc[c,0] == Day_1H.iloc[c,0]:
if Kit_PM_3H_24hr.iloc[c, 6] > 0 and Kit_PM_1H_24hr.iloc[c, 6] > 0 and Kit_PM_1H_24hr.iloc[c, 0] == \
Kit_PM_3H_24hr.iloc[c, 0]:
Kit_per_day_3H_1H.append(Kit_PM_per_day_3H[c]/Kit_PM_per_day_1H[c])
K_PM_D_3H_1H.append(Day_1H.iloc[c,0])
if (len(Kit_PM_per_day_3H)-1) >= c and (len(Kit_PM_per_day_2H)-1) >= c:
#if Day_3H.iloc[c,7] > 0 and Day_2H.iloc[c,7] > 0 and Day_3H.iloc[c,0] == Day_2H.iloc[c,0]:
if Kit_PM_3H_24hr.iloc[c, 6] > 0 and Kit_PM_2H_24hr.iloc[c, 6] > 0 and Kit_PM_3H_24hr.iloc[c, 0] == \
Kit_PM_2H_24hr.iloc[c, 0]:
Kit_per_day_3H_2H.append(Kit_PM_per_day_3H[c]/Kit_PM_per_day_2H[c])
K_PM_D_3H_2H.append(Day_2H.iloc[c,0])
# now for box plotting for Kitchen PM per day percent changes
#2H to 1H
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_2H_1H, ax=ax_box, color='g')
sns.distplot(Kit_per_day_2H_1H, ax=ax_hist, color='g')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 2H/1H (Kitchen PM per Day)')
plt.ylim(top=1.5)
plt.ylim(bottom = 0)
#3H to 1H
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_3H_1H, ax=ax_box, color='r')
sns.distplot(Kit_per_day_3H_1H, ax=ax_hist, color='r')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 3H/1H (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#3H to 2H
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_3H_2H, ax=ax_box, color='m')
sns.distplot(Kit_per_day_3H_2H, ax=ax_hist, color='m')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 3H/2H (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#Plotting on the same graph
fig, ax = plt.subplots()
plt.title('Hood Kitchen PM per day')
#1H
quant_1_1H = np.percentile(Kit_PM_per_day_1H, [25,50,75])
Top_lim_1_1H = quant_1_1H[2] + 1.5*(quant_1_1H[2] - quant_1_1H[0])
Low_lim_1_1H = quant_1_1H[0] - 1.5*(quant_1_1H[2] - quant_1_1H[0])
bp_1 = plt.boxplot(Kit_PM_per_day_1H, positions = [1], widths = 0.6)
kitchen_pm_1H_outlier = []
for v,a in enumerate(Kit_PM_per_day_1H):
if a > Top_lim_1_1H or a < Low_lim_1_1H:
kitchen_pm_1H_outlier.append(K_PM_D_1H[v])
plt.text(1,a,K_PM_D_1H[v])
plt.text(0.5,0.1,'1H',color='b')
#2N
quant_1_2H = np.percentile(Kit_PM_per_day_2H, [25,50,75])
Top_lim_1_2N = quant_1_2H[2] + 1.5*(quant_1_2H[2] - quant_1_2H[0])
Low_lim_1_2N = quant_1_2H[0] - 1.5*(quant_1_2H[2] - quant_1_2H[0])
bp_1 = plt.boxplot(Kit_PM_per_day_2H,positions = [2], widths = 0.6)
kitchen_pm_2H_outlier = []
for v,a in enumerate(Kit_PM_per_day_2H):
if a > Top_lim_1_2N or a < Low_lim_1_2N:
kitchen_pm_2H_outlier.append(K_PM_D_2H[v])
plt.text(2,a,K_PM_D_2H[v])
plt.text(1.5,0.1,'2H', color= 'g')
#3H
quant_1_3H = np.percentile(Kit_PM_per_day_3H, [25,50,75])
Top_lim_1_3N = quant_1_3H[2] + 1.5*(quant_1_3H[2] - quant_1_3H[0])
Low_lim_1_3N = quant_1_3H[0] - 1.5*(quant_1_3H[2] - quant_1_3H[0])
kitchen_3H_outlier = []
bp_1 = plt.boxplot(Kit_PM_per_day_3H,positions = [3], widths = 0.6)
count = 0
kitchen_pm_3H_outlier = []
for v,a in enumerate(Kit_PM_per_day_3H):
if a > Top_lim_1_3N or a < Low_lim_1_3N:
kitchen_pm_3H_outlier.append(K_PM_D_3H[v])
plt.text(3,a,K_PM_D_3H[v])
# kitchen_3N_outlier.append(K_PM_D_3N[v])
# count = count + 1
# if count == (3):
# plt.text(3,a,K_PM_D_3N[v],ha='left', va='bottom')
# if count == (1):
# plt.text(3,a,K_PM_D_3N[v],ha='left', va='top')
# else:
# plt.text(3,a,K_PM_D_3N[v],ha='right', va='bottom')
plt.text(2.5,0.1,'3H', color='r')
plt.xlim(0,4)
plt.ylim(0,1200)
print('Kitchen PM 1H had these values as outliers ', kitchen_pm_1H_outlier)
print('Kitchen PM 2H had these values as outliers ', kitchen_pm_2H_outlier)
print('Kitchen PM 3H had these values as outliers ', kitchen_pm_3H_outlier)
plt.show()
#print('3N had these values as outliers ' , kitchen_3N_outlier)
# % change of PM per day
fig_2, ax2 = plt.subplots()
plt.title('% hood PM per Day Change' )
#plt.hold(True)
#2H to 1H
quant_1_2H_1H = np.percentile(Kit_per_day_2H_1H, [25,50,75])
Top_lim_1_2N_1N = quant_1_2H_1H[2] + 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])
Low_lim_1_2N_1N = quant_1_2H_1H[0] - 1.5*(quant_1_2H_1H[2]-quant_1_2H_1H[0])
bp_1_1 = plt.boxplot(Kit_per_day_2H_1H, positions=[1], widths= 0.6)
kitchen_pm_2H_1H_outlier = []
for v,a in enumerate(Kit_per_day_2H_1H):
if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:
kitchen_pm_2H_1H_outlier.append(K_PM_D_2H_1H[v])
plt.text(1, a, K_PM_D_2H_1H[v])
plt.text(0.75, -0.25, '2H / 1H', color= 'g')
#3H to 1H
quant_1_3H_1H = np.percentile(Kit_per_day_3H_1H, [25,50,75])
Top_lim_1_3N_1N = quant_1_3H_1H[2] + 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])
Low_lim_1_3N_1N = quant_1_3H_1H[0] - 1.5*(quant_1_3H_1H[2]-quant_1_3H_1H[0])
bp_1_1 = plt.boxplot(Kit_per_day_3H_1H, positions=[2], widths= 0.6)
kitchen_pm_3H_1H_outlier = []
for v,a in enumerate(Kit_per_day_3H_1H):
if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:
kitchen_pm_3H_1H_outlier.append(K_PM_D_3H_1H[v])
plt.text(2, a, K_PM_D_3H_1H[v])
plt.text(1.75, -0.25, '3H / 1H', color= 'r')
#3H to 2H
quant_1_3H_2H = np.percentile(Kit_per_day_3H_2H, [25,50,75])
Top_lim_1_3N_2N = quant_1_3H_2H[2] + 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])
Low_lim_1_3N_2N = quant_1_3H_2H[0] - 1.5*(quant_1_3H_2H[2]-quant_1_3H_2H[0])
bp_1_1 = plt.boxplot(Kit_per_day_3H_2H, positions=[3], widths= 0.6)
kitchen_pm_3H_2H_outlier = []
for v,a in enumerate(Kit_per_day_3H_2H):
if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:
kitchen_pm_3H_2H_outlier.append(K_PM_D_3H_2H[v])
plt.text(3, a, K_PM_D_3H_2H[v])
plt.text(2.75, -0.25, '3H / 2H', color= 'm')
plt.xlim(0,4)
plt.ylim(-0.5,5)
print('Kitchen PM 2H/1H had these values as outliers ', kitchen_pm_2H_1H_outlier)
print('Kitchen PM 3H/1H had these values as outliers ', kitchen_pm_3H_1H_outlier)
print('Kitchen PM 3H/2H had these values as outliers ', kitchen_pm_3H_2H_outlier)
plt.show()
quant_1_1H = np.append(quant_1_1H, np.average(Kit_PM_per_day_1H))
quant_1_2H = np.append(quant_1_2H, np.average(Kit_PM_per_day_2H))
quant_1_3H = np.append(quant_1_3H, np.average(Kit_PM_per_day_3H))
D_50_quant_phase_PM_D_hood = {'Percentile %': ['25','50','75', 'Avg'], '1H': quant_1_1H, '2H': quant_1_2H,'3H' : quant_1_3H}
PM_D_50_phase_hood = pd.DataFrame(data=D_50_quant_phase_PM_D_hood, columns= ['Percentile %','1H','2H','3H' ])
quant_1_2H_1H = np.append(quant_1_2H_1H , np.average(Kit_per_day_2H_1H))
quant_1_3H_1H = np.append(quant_1_3H_1H , np.average(Kit_per_day_3H_1H))
quant_1_3H_2H = np.append(quant_1_3H_2H , np.average(Kit_per_day_3H_2H))
D_50_quant_percent_PM_D_hood ={'Percentile %': ['25','50','75', 'Avg'],'2H / 1H': quant_1_2H_1H,'3H / 1H': quant_1_3H_1H,'3H / 2H': quant_1_3H_2H}
PM_D_50_percent_change_hood = pd.DataFrame(data=D_50_quant_percent_PM_D_hood, columns=['Percentile %','2H / 1H','3H / 1H','3H / 2H'])
print(PM_D_50_phase_hood)
print(PM_D_50_percent_change_hood)
# when i am ready to transfer to a data frame and get the differences
#histograms for the comparison
if Hood_or_no == 'no_hood':
plt.title('Histogram of Fuel per 24 Hours per Person - No Hood' )
plt.hist([Fuel_per_day_per_adult_1N],
color=['b'], alpha=0.5, label='1N')
plt.hist([Fuel_per_day_per_adult_2N],
color=['g'], alpha=0.5, label='2N')
plt.hist([Fuel_per_day_per_adult_3N],
color=['r'], alpha=0.5, label='3N')
plt.hist([Fuel_per_day_per_adult_4N],
color=['y'], alpha=0.5, label='4N')
plt.legend(loc='upper right')
plt.show()
plt.title('Histogram of Kitchen PM 24 Hours - No Hood' )
plt.hist([Kit_PM_per_day_1N],
color=['b'], alpha=0.5, label='1N')
plt.hist([Kit_PM_per_day_2N],
color=['g'], alpha=0.5, label='2N')
plt.hist([Kit_PM_per_day_3N],
color=['r'], alpha=0.5, label='3N')
plt.hist([Kit_PM_per_day_4N],
color=['y'], alpha=0.5, label='4N')
plt.legend(loc='upper right')
plt.show()
if Hood_or_no == 'hood':
plt.title('Histogram of Fuel per 24 Hours per Person - Hood' )
plt.hist([Fuel_per_day_per_adult_1H],
color=['b'], alpha=0.5, label='1H')
plt.hist([Fuel_per_day_per_adult_2H],
color=['g'], alpha=0.5, label='2H')
plt.hist([Fuel_per_day_per_adult_3H],
color=['r'], alpha=0.5, label='3H')
plt.legend(loc='upper right')
plt.show()
plt.title('Histogram of Kitchen PM 24 Hours - Hood' )
plt.hist([Kit_PM_per_day_1H],
color=['b'], alpha=0.5, label='1H')
plt.hist([Kit_PM_per_day_2H],
color=['g'], alpha=0.5, label='2H')
plt.hist([Kit_PM_per_day_3H],
color=['r'], alpha=0.5, label='3H')
plt.legend(loc='upper right')
plt.show()
|
"""Test torch algo utility functions."""
import numpy as np
import pytest
import tensorflow as tf
import torch
import torch.nn.functional as F
import metarl.tf.misc.tensor_utils as tf_utils
import metarl.torch.algos._utils as torch_algo_utils
from tests.fixtures import TfGraphTestCase
def stack(d, arr):
"""Stack 'arr' 'd' times."""
return np.repeat(np.expand_dims(arr, axis=0), repeats=d, axis=0)
ONES = np.ones((4, 6))
ZEROS = np.zeros((4, 6))
ARRANGE = stack(4, np.arange(6))
PI_DIGITS = stack(4, [3, 1, 4, 1, 5, 9])
E_DIGITS = stack(4, [2, 7, 1, 8, 2, 8])
FIBS = stack(4, [1, 1, 2, 3, 5, 8])
nums_1d = np.arange(0, 4).astype(float)
nums_2d = np.arange(0, 4).astype(float).reshape(2, 2)
nums_3d = np.arange(0, 8).astype(float).reshape(2, 2, 2)
class TestTorchAlgoUtils(TfGraphTestCase):
"""Test class for torch algo utility functions."""
# yapf: disable
@pytest.mark.parametrize('gae_lambda, rewards_val, baselines_val', [
(0.4, ONES, ZEROS),
(0.8, PI_DIGITS, ARRANGE),
(1.2, ONES, FIBS),
(1.7, E_DIGITS, PI_DIGITS),
])
# yapf: enable
def testcompute_advantages(self, gae_lambda, rewards_val, baselines_val):
"""Test compute_advantage function."""
discount = 0.99
max_len = rewards_val.shape[-1]
torch_advs = torch_algo_utils.compute_advantages(
discount, gae_lambda, max_len, torch.Tensor(baselines_val),
torch.Tensor(rewards_val))
rewards = tf.compat.v1.placeholder(dtype=tf.float32,
name='reward',
shape=[None, None])
baselines = tf.compat.v1.placeholder(dtype=tf.float32,
name='baseline',
shape=[None, None])
adv = tf_utils.compute_advantages(discount, gae_lambda, max_len,
baselines, rewards)
tf_advs = self.sess.run(adv,
feed_dict={
rewards: rewards_val,
baselines: baselines_val,
})
assert np.allclose(torch_advs.numpy(),
tf_advs.reshape(torch_advs.shape),
atol=1e-5)
def test_add_padding_last_1d(self):
"""Test pad_to_last function for 1d."""
max_length = 10
expected = F.pad(torch.Tensor(nums_1d),
(0, max_length - nums_1d.shape[-1]))
tensor_padding = torch_algo_utils.pad_to_last(nums_1d,
total_length=max_length)
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_1d,
total_length=10,
axis=0)
assert expected.eq(tensor_padding).all()
def test_add_padding_last_2d(self):
"""Test pad_to_last function for 2d."""
max_length = 10
tensor_padding = torch_algo_utils.pad_to_last(nums_2d, total_length=10)
expected = F.pad(torch.Tensor(nums_2d),
(0, max_length - nums_2d.shape[-1]))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_2d,
total_length=10,
axis=0)
expected = F.pad(torch.Tensor(nums_2d),
(0, 0, 0, max_length - nums_2d.shape[0]))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_2d,
total_length=10,
axis=1)
expected = F.pad(torch.Tensor(nums_2d),
(0, max_length - nums_2d.shape[-1], 0, 0))
assert expected.eq(tensor_padding).all()
def test_add_padding_last_3d(self):
"""Test pad_to_last function for 3d."""
max_length = 10
tensor_padding = torch_algo_utils.pad_to_last(nums_3d, total_length=10)
expected = F.pad(torch.Tensor(nums_3d),
(0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_3d,
total_length=10,
axis=0)
expected = F.pad(torch.Tensor(nums_3d),
(0, 0, 0, 0, 0, max_length - nums_3d.shape[0]))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_3d,
total_length=10,
axis=1)
expected = F.pad(torch.Tensor(nums_3d),
(0, 0, 0, max_length - nums_3d.shape[-1], 0, 0))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_3d,
total_length=10,
axis=2)
expected = F.pad(torch.Tensor(nums_3d),
(0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))
assert expected.eq(tensor_padding).all()
@pytest.mark.parametrize('nums', [nums_1d, nums_2d, nums_3d])
def test_out_of_index_error(self, nums):
"""Test pad_to_last raises IndexError."""
with pytest.raises(IndexError):
torch_algo_utils.pad_to_last(nums,
total_length=10,
axis=len(nums.shape))
def testmake_optimizer_with_type(self):
"""Test make_optimizer function with type as first argument."""
optimizer_type = torch.optim.Adam
module = torch.nn.Linear(2, 1)
lr = 0.123
optimizer = torch_algo_utils.make_optimizer(optimizer_type,
module,
lr=lr)
assert isinstance(optimizer, optimizer_type)
assert optimizer.defaults['lr'] == lr
def testmake_optimizer_with_tuple(self):
"""Test make_optimizer function with tuple as first argument."""
optimizer_type = (torch.optim.Adam, {'lr': 0.1})
module = torch.nn.Linear(2, 1)
optimizer = torch_algo_utils.make_optimizer(optimizer_type, module)
assert isinstance(optimizer, optimizer_type)
assert optimizer.defaults['lr'] == optimizer_type[1]['lr']
def testmake_optimizer_raise_value_error(self):
"""Test make_optimizer raises value error."""
optimizer_type = (torch.optim.Adam, {'lr': 0.1})
module = torch.nn.Linear(2, 1)
with pytest.raises(ValueError):
_ = torch_algo_utils.make_optimizer(optimizer_type,
module,
lr=0.123)
|
"""
Projective plane curves over a general ring
AUTHORS:
- William Stein (2005-11-13)
- David Joyner (2005-11-13)
- David Kohel (2006-01)
- Moritz Minzlaff (2010-11)
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.interfaces.all import singular
from sage.misc.all import add, sage_eval
from sage.rings.all import degree_lowest_rational_function
from sage.schemes.projective.projective_space import is_ProjectiveSpace
from curve import Curve_generic_projective
class ProjectiveSpaceCurve_generic(Curve_generic_projective):
def _repr_type(self):
return "Projective Space"
def __init__(self, A, X):
if not is_ProjectiveSpace(A):
raise TypeError("A (=%s) must be a projective space"%A)
Curve_generic_projective.__init__(self, A, X)
d = self.dimension()
if d != 1:
raise ValueError("defining equations (=%s) define a scheme of dimension %s != 1"%(X,d))
class ProjectiveCurve_generic(Curve_generic_projective):
def __init__(self, A, f):
if not (is_ProjectiveSpace(A) and A.dimension != 2):
raise TypeError("Argument A (= %s) must be a projective plane."%A)
Curve_generic_projective.__init__(self, A, [f])
def _repr_type(self):
return "Projective"
def arithmetic_genus(self):
r"""
Return the arithmetic genus of this curve.
This is the arithmetic genus `g_a(C)` as defined in
Hartshorne. If the curve has degree `d` then this is simply
`(d-1)(d-2)/2`. It need *not* equal the geometric genus
(the genus of the normalization of the curve).
EXAMPLE::
sage: x,y,z = PolynomialRing(GF(5), 3, 'xyz').gens()
sage: C = Curve(y^2*z^7 - x^9 - x*z^8); C
Projective Curve over Finite Field of size 5 defined by -x^9 + y^2*z^7 - x*z^8
sage: C.arithmetic_genus()
28
sage: C.genus()
4
"""
d = self.defining_polynomial().total_degree()
return int((d-1)*(d-2)/2)
def divisor_of_function(self, r):
"""
Return the divisor of a function on a curve.
INPUT: r is a rational function on X
OUTPUT:
- ``list`` - The divisor of r represented as a list of
coefficients and points. (TODO: This will change to a more
structural output in the future.)
EXAMPLES::
sage: FF = FiniteField(5)
sage: P2 = ProjectiveSpace(2, FF, names = ['x','y','z'])
sage: R = P2.coordinate_ring()
sage: x, y, z = R.gens()
sage: f = y^2*z^7 - x^9 - x*z^8
sage: C = Curve(f)
sage: K = FractionField(R)
sage: r = 1/x
sage: C.divisor_of_function(r) # todo: not implemented !!!!
[[-1, (0, 0, 1)]]
sage: r = 1/x^3
sage: C.divisor_of_function(r) # todo: not implemented !!!!
[[-3, (0, 0, 1)]]
"""
F = self.base_ring()
f = self.defining_polynomial()
x, y, z = f.parent().gens()
pnts = self.rational_points()
divf = []
for P in pnts:
if P[2] != F(0):
# What is the '5' in this line and the 'r()' in the next???
lcs = self.local_coordinates(P,5)
ldg = degree_lowest_rational_function(r(lcs[0],lcs[1]),z)
if ldg[0] != 0:
divf.append([ldg[0],P])
return divf
def local_coordinates(self, pt, n):
r"""
Return local coordinates to precision n at the given point.
Behaviour is flaky - some choices of `n` are worst that
others.
INPUT:
- ``pt`` - an F-rational point on X which is not a
point of ramification for the projection (x,y) - x.
- ``n`` - the number of terms desired
OUTPUT: x = x0 + t y = y0 + power series in t
EXAMPLES::
sage: FF = FiniteField(5)
sage: P2 = ProjectiveSpace(2, FF, names = ['x','y','z'])
sage: x, y, z = P2.coordinate_ring().gens()
sage: C = Curve(y^2*z^7-x^9-x*z^8)
sage: pt = C([2,3,1])
sage: C.local_coordinates(pt,9) # todo: not implemented !!!!
[2 + t, 3 + 3*t^2 + t^3 + 3*t^4 + 3*t^6 + 3*t^7 + t^8 + 2*t^9 + 3*t^11 + 3*t^12]
"""
f = self.defining_polynomial()
R = f.parent()
F = self.base_ring()
p = F.characteristic()
x0 = F(pt[0])
y0 = F(pt[1])
astr = ["a"+str(i) for i in range(1,2*n)]
x,y = R.gens()
R0 = PolynomialRing(F,2*n+2,names = [str(x),str(y),"t"]+astr)
vars0 = R0.gens()
t = vars0[2]
yt = y0*t**0 + add([vars0[i]*t**(i-2) for i in range(3,2*n+2)])
xt = x0+t
ft = f(xt,yt)
S = singular
S.eval('ring s = '+str(p)+','+str(R0.gens())+',lp;')
S.eval('poly f = '+str(ft))
cmd = 'matrix c = coeffs ('+str(ft)+',t)'
S.eval(cmd)
N = int(S.eval('size(c)'))
b = ["c["+str(i)+",1]," for i in range(2,N/2-4)]
b = ''.join(b)
b = b[:len(b)-1] #to cut off the trailing comma
cmd = 'ideal I = '+b
S.eval(cmd)
c = S.eval('slimgb(I)')
d = c.split("=")
d = d[1:]
d[len(d)-1] += "\n"
e = [x[:x.index("\n")] for x in d]
vals = []
for x in e:
for y in vars0:
if str(y) in x:
if len(x.replace(str(y),"")) != 0:
i = x.find("-")
if i>0:
vals.append([eval(x[1:i]),x[:i],F(eval(x[i+1:]))])
i = x.find("+")
if i>0:
vals.append([eval(x[1:i]),x[:i],-F(eval(x[i+1:]))])
else:
vals.append([eval(str(y)[1:]),str(y),F(0)])
vals.sort()
k = len(vals)
v = [x0+t,y0+add([vals[i][2]*t**(i+1) for i in range(k)])]
return v
def plot(self, *args, **kwds):
"""
Plot the real points of an affine patch of this projective
plane curve.
INPUT:
- ``self`` - an affine plane curve
- ``patch`` - (optional) the affine patch to be plotted; if not
specified, the patch corresponding to the last projective
coordinate being nonzero
- ``*args`` - optional tuples (variable, minimum, maximum) for
plotting dimensions
- ``**kwds`` - optional keyword arguments passed on to
``implicit_plot``
EXAMPLES:
A cuspidal curve::
sage: R.<x, y, z> = QQ[]
sage: C = Curve(x^3 - y^2*z)
sage: C.plot()
Graphics object consisting of 1 graphics primitive
The other affine patches of the same curve::
sage: C.plot(patch=0)
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=1)
Graphics object consisting of 1 graphics primitive
An elliptic curve::
sage: E = EllipticCurve('101a')
sage: C = Curve(E)
sage: C.plot()
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=0)
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=1)
Graphics object consisting of 1 graphics primitive
A hyperelliptic curve::
sage: P.<x> = QQ[]
sage: f = 4*x^5 - 30*x^3 + 45*x - 22
sage: C = HyperellipticCurve(f)
sage: C.plot()
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=0)
Graphics object consisting of 1 graphics primitive
sage: C.plot(patch=1)
Graphics object consisting of 1 graphics primitive
"""
# if user hasn't specified a favourite affine patch, take the
# one avoiding "infinity", i.e. the one corresponding to the
# last projective coordinate being nonzero
patch = kwds.pop('patch', self.ngens() - 1)
from constructor import Curve
C = Curve(self.affine_patch(patch))
return C.plot(*args, **kwds)
def is_singular(C):
r"""
Returns whether the curve is singular or not.
EXAMPLES:
Over `\QQ`::
sage: F = QQ
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^3-Y^2*Z)
sage: C.is_singular()
True
Over a finite field::
sage: F = GF(19)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^3+Y^3+Z^3)
sage: C.is_singular()
False
sage: D = Curve(X^4-X*Z^3)
sage: D.is_singular()
True
sage: E = Curve(X^5+19*Y^5+Z^5)
sage: E.is_singular()
True
sage: E = Curve(X^5+9*Y^5+Z^5)
sage: E.is_singular()
False
Over `\CC`::
sage: F = CC
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X)
sage: C.is_singular()
False
sage: D = Curve(Y^2*Z-X^3)
sage: D.is_singular()
True
sage: E = Curve(Y^2*Z-X^3+Z^3)
sage: E.is_singular()
False
Showing that ticket #12187 is fixed::
sage: F.<X,Y,Z> = GF(2)[]
sage: G = Curve(X^2+Y*Z)
sage: G.is_singular()
False
"""
poly = C.defining_polynomial()
return poly.parent().ideal(poly.gradient()+[poly]).dimension()> 0
class ProjectiveCurve_finite_field(ProjectiveCurve_generic):
def rational_points_iterator(self):
r"""
Return a generator object for the rational points on this curve.
INPUT:
- ``self`` -- a projective curve
OUTPUT:
A generator of all the rational points on the curve defined over its base field.
EXAMPLE::
sage: F = GF(37)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^7+Y*X*Z^5*55+Y^7*12)
sage: len(list(C.rational_points_iterator()))
37
::
sage: F = GF(2)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X*Y*Z)
sage: a = C.rational_points_iterator()
sage: next(a)
(1 : 0 : 0)
sage: next(a)
(0 : 1 : 0)
sage: next(a)
(1 : 1 : 0)
sage: next(a)
(0 : 0 : 1)
sage: next(a)
(1 : 0 : 1)
sage: next(a)
(0 : 1 : 1)
sage: next(a)
Traceback (most recent call last):
...
StopIteration
::
sage: F = GF(3^2,'a')
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^3+5*Y^2*Z-33*X*Y*X)
sage: b = C.rational_points_iterator()
sage: next(b)
(0 : 1 : 0)
sage: next(b)
(0 : 0 : 1)
sage: next(b)
(2*a + 2 : a : 1)
sage: next(b)
(2 : a + 1 : 1)
sage: next(b)
(a + 1 : 2*a + 1 : 1)
sage: next(b)
(1 : 2 : 1)
sage: next(b)
(2*a + 2 : 2*a : 1)
sage: next(b)
(2 : 2*a + 2 : 1)
sage: next(b)
(a + 1 : a + 2 : 1)
sage: next(b)
(1 : 1 : 1)
sage: next(b)
Traceback (most recent call last):
...
StopIteration
"""
g = self.defining_polynomial()
K = g.parent().base_ring()
from sage.rings.polynomial.all import PolynomialRing
R = PolynomialRing(K,'X')
X = R.gen()
one = K.one()
zero = K.zero()
# the point with Z = 0 = Y
try:
t = self.point([one,zero,zero])
yield(t)
except TypeError:
pass
# points with Z = 0, Y = 1
g10 = R(g(X,one,zero))
if g10.is_zero():
for x in K:
yield(self.point([x,one,zero]))
else:
for x in g10.roots(multiplicities=False):
yield(self.point([x,one,zero]))
# points with Z = 1
for y in K:
gy1 = R(g(X,y,one))
if gy1.is_zero():
for x in K:
yield(self.point([x,y,one]))
else:
for x in gy1.roots(multiplicities=False):
yield(self.point([x,y,one]))
def rational_points(self, algorithm="enum", sort=True):
r"""
Return the rational points on this curve computed via enumeration.
INPUT:
- ``algorithm`` (string, default: 'enum') -- the algorithm to
use. Currently this is ignored.
- ``sort`` (boolean, default ``True``) -- whether the output
points should be sorted. If False, the order of the output
is non-deterministic.
OUTPUT:
A list of all the rational points on the curve defined over
its base field, possibly sorted.
.. note::
This is a slow Python-level implementation.
EXAMPLES::
sage: F = GF(7)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^3+Y^3-Z^3)
sage: C.rational_points()
[(0 : 1 : 1), (0 : 2 : 1), (0 : 4 : 1), (1 : 0 : 1), (2 : 0 : 1), (3 : 1 : 0), (4 : 0 : 1), (5 : 1 : 0), (6 : 1 : 0)]
::
sage: F = GF(1237)
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^7+7*Y^6*Z+Z^4*X^2*Y*89)
sage: len(C.rational_points())
1237
::
sage: F = GF(2^6,'a')
sage: P2.<X,Y,Z> = ProjectiveSpace(F,2)
sage: C = Curve(X^5+11*X*Y*Z^3 + X^2*Y^3 - 13*Y^2*Z^3)
sage: len(C.rational_points())
104
::
sage: R.<x,y,z> = GF(2)[]
sage: f = x^3*y + y^3*z + x*z^3
sage: C = Curve(f); pts = C.rational_points()
sage: pts
[(0 : 0 : 1), (0 : 1 : 0), (1 : 0 : 0)]
"""
points = list(self.rational_points_iterator())
if sort:
points.sort()
return points
class ProjectiveCurve_prime_finite_field(ProjectiveCurve_finite_field):
def _points_via_singular(self, sort=True):
r"""
Return all rational points on this curve, computed using Singular's
Brill-Noether implementation.
INPUT:
- ``sort`` - bool (default: True), if True return the
point list sorted. If False, returns the points in the order
computed by Singular.
EXAMPLE::
sage: x, y, z = PolynomialRing(GF(5), 3, 'xyz').gens()
sage: f = y^2*z^7 - x^9 - x*z^8
sage: C = Curve(f); C
Projective Curve over Finite Field of size 5 defined by
-x^9 + y^2*z^7 - x*z^8
sage: C._points_via_singular()
[(0 : 0 : 1), (0 : 1 : 0), (2 : 2 : 1), (2 : 3 : 1),
(3 : 1 : 1), (3 : 4 : 1)]
sage: C._points_via_singular(sort=False) #random
[(0 : 1 : 0), (3 : 1 : 1), (3 : 4 : 1), (2 : 2 : 1),
(0 : 0 : 1), (2 : 3 : 1)]
.. note::
The Brill-Noether package does not always work (i.e., the
'bn' algorithm. When it fails a RuntimeError exception is
raised.
"""
f = self.defining_polynomial()._singular_()
singular = f.parent()
singular.lib('brnoeth')
try:
X1 = f.Adj_div()
except (TypeError, RuntimeError) as s:
raise RuntimeError(str(s) + "\n\n ** Unable to use the\
Brill-Noether Singular package to\
compute all points (see above).")
X2 = singular.NSplaces(1, X1)
R = X2[5][1][1]
singular.set_ring(R)
# We use sage_flattened_str_list since iterating through
# the entire list through the sage/singular interface directly
# would involve hundreds of calls to singular, and timing issues with
# the expect interface could crop up. Also, this is vastly
# faster (and more robust).
v = singular('POINTS').sage_flattened_str_list()
pnts = [self(int(v[3*i]), int(v[3*i+1]), int(v[3*i+2]))
for i in range(len(v)//3)]
# singular always dehomogenizes with respect to the last variable
# so if this variable divides the curve equation, we need to add
# points at infinity
F = self.defining_polynomial()
z = F.parent().gens()[-1]
if z.divides(F):
pnts += [self(1,a,0) for a in self.base_ring()]
pnts += [self(0,1,0)]
# remove multiple points
pnts = list(set(pnts))
if sort:
pnts.sort()
return pnts
def riemann_roch_basis(self, D):
r"""
Return a basis for the Riemann-Roch space corresponding to
`D`.
This uses Singular's Brill-Noether implementation.
INPUT:
- ``D`` - a divisor
OUTPUT:
A list of function field elements that form a basis of the Riemann-Roch space
EXAMPLE::
sage: R.<x,y,z> = GF(2)[]
sage: f = x^3*y + y^3*z + x*z^3
sage: C = Curve(f); pts = C.rational_points()
sage: D = C.divisor([ (4, pts[0]), (4, pts[2]) ])
sage: C.riemann_roch_basis(D)
[x/y, 1, z/y, z^2/y^2, z/x, z^2/(x*y)]
::
sage: R.<x,y,z> = GF(5)[]
sage: f = x^7 + y^7 + z^7
sage: C = Curve(f); pts = C.rational_points()
sage: D = C.divisor([ (3, pts[0]), (-1,pts[1]), (10, pts[5]) ])
sage: C.riemann_roch_basis(D)
[(-2*x + y)/(x + y), (-x + z)/(x + y)]
.. NOTE::
Currently this only works over prime field and divisors supported on rational points.
"""
f = self.defining_polynomial()._singular_()
singular = f.parent()
singular.lib('brnoeth')
try:
X1 = f.Adj_div()
except (TypeError, RuntimeError) as s:
raise RuntimeError(str(s) + "\n\n ** Unable to use the Brill-Noether Singular package to compute all points (see above).")
X2 = singular.NSplaces(1, X1)
# retrieve list of all computed closed points (possibly of degree >1)
v = X2[3].sage_flattened_str_list() # We use sage_flattened_str_list since iterating through
# the entire list through the sage/singular interface directly
# would involve hundreds of calls to singular, and timing issues with
# the expect interface could crop up. Also, this is vastly
# faster (and more robust).
v = [ v[i].partition(',') for i in range(len(v)) ]
pnts = [ ( int(v[i][0]), int(v[i][2])-1 ) for i in range(len(v))]
# retrieve coordinates of rational points
R = X2[5][1][1]
singular.set_ring(R)
v = singular('POINTS').sage_flattened_str_list()
coords = [self(int(v[3*i]), int(v[3*i+1]), int(v[3*i+2])) for i in range(len(v)//3)]
# build correct representation of D for singular
Dsupport = D.support()
Dcoeffs = []
for x in pnts:
if x[0] == 1:
Dcoeffs.append(D.coefficient(coords[x[1]]))
else:
Dcoeffs.append(0)
Dstr = str(tuple(Dcoeffs))
G = singular(','.join([str(x) for x in Dcoeffs]), type='intvec')
# call singular's brill noether routine and return
T = X2[1][2]
T.set_ring()
LG = G.BrillNoether(X2)
LG = [X.split(',\n') for X in LG.sage_structured_str_list()]
x,y,z = self.ambient_space().coordinate_ring().gens()
vars = {'x':x, 'y':y, 'z':z}
V = [(sage_eval(a, vars)/sage_eval(b, vars)) for a, b in LG]
return V
def rational_points(self, algorithm="enum", sort=True):
r"""
INPUT:
- ``algorithm`` - string:
- ``'enum'`` - straightforward enumeration
- ``'bn'`` - via Singular's brnoeth package.
EXAMPLE::
sage: x, y, z = PolynomialRing(GF(5), 3, 'xyz').gens()
sage: f = y^2*z^7 - x^9 - x*z^8
sage: C = Curve(f); C
Projective Curve over Finite Field of size 5 defined by
-x^9 + y^2*z^7 - x*z^8
sage: C.rational_points()
[(0 : 0 : 1), (0 : 1 : 0), (2 : 2 : 1), (2 : 3 : 1),
(3 : 1 : 1), (3 : 4 : 1)]
sage: C = Curve(x - y + z)
sage: C.rational_points()
[(0 : 1 : 1), (1 : 1 : 0), (1 : 2 : 1), (2 : 3 : 1),
(3 : 4 : 1), (4 : 0 : 1)]
sage: C = Curve(x*z+z^2)
sage: C.rational_points('all')
[(0 : 1 : 0), (1 : 0 : 0), (1 : 1 : 0), (2 : 1 : 0),
(3 : 1 : 0), (4 : 0 : 1), (4 : 1 : 0), (4 : 1 : 1),
(4 : 2 : 1), (4 : 3 : 1), (4 : 4 : 1)]
.. note::
The Brill-Noether package does not always work (i.e., the
'bn' algorithm. When it fails a RuntimeError exception is
raised.
"""
if algorithm == "enum":
return ProjectiveCurve_finite_field.rational_points(self,
algorithm="enum",
sort=sort)
elif algorithm == "bn":
return self._points_via_singular(sort=sort)
elif algorithm == "all":
S_enum = self.rational_points(algorithm = "enum")
S_bn = self.rational_points(algorithm = "bn")
if S_enum != S_bn:
raise RuntimeError("Bug in rational_points -- different\
algorithms give different answers for\
curve %s!"%self)
return S_enum
else:
raise ValueError("No algorithm '%s' known"%algorithm)
def Hasse_bounds(q, genus=1):
r"""
Return the Hasse-Weil bounds for the cardinality of a nonsingular
curve defined over `\GF{q}` of given ``genus``.
INPUT:
- ``q`` (int) -- a prime power
- ``genus`` (int, default 1) -- a non-negative integer,
OUTPUT:
(tuple) The Hasse bounds (lb,ub) for the cardinality of a curve of
genus ``genus`` defined over `\GF{q}`.
EXAMPLES::
sage: Hasse_bounds(2)
(1, 5)
sage: Hasse_bounds(next_prime(10^30))
(999999999999998000000000000058, 1000000000000002000000000000058)
"""
if genus==1:
rq = (4*q).isqrt()
else:
rq = (4*(genus**2)*q).isqrt()
return (q+1-rq,q+1+rq)
|
# Demo with a few examples of using OpenCV functions and UI
# packages: opencv-python
# uses lena: https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png
import numpy as np
import cv2
print("Hello World OpenCV")
print("OpenCV Version:", cv2.__version__)
image = np.ones((256, 256), dtype="uint8")
image = image * 127
image[0:128, 0:128] = 0
image[128:, 128:] = 255
cv2.imshow("Image", image)
cv2.waitKey(0)
# Opening and Viewing an Image
import os.path
if os.path.isfile('lena.png'):
print("Test Image File exist")
else:
print("Test Image File does not exist; downloading...")
import urllib.request as urllib_request
urllib_request.urlretrieve("https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png", "lena.png")
image = cv2.imread("./lena.png")
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.imshow("Image RGB", rgb_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def viewImage(image, name_of_window):
cv2.namedWindow(name_of_window, cv2.WINDOW_AUTOSIZE)
cv2.imshow(name_of_window, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
viewImage(image, "Lena")
# Edit pixels
edited = image.copy()
edited[200:390, 200:360, 0] = 255
viewImage(edited, "Lena edited")
# Cropping
cropped = image[200:390, 200:360]
viewImage(cropped, "Lena cropped")
# Resizing
scale_percent = 10 # percent of original size
width = int(image.shape[1] * scale_percent / 100)
height = int(image.shape[0] * scale_percent / 100)
dim = (width, height)
resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
viewImage(resized, "Lena resized to {}%".format(scale_percent))
# Drawing a Rectangle
output = image.copy()
cv2.rectangle(output, (200, 200), (360, 390), (255, 0, 0), 10)
viewImage(output, "Lena with a rectangle")
# Drawing a line
cv2.line(output, (256, 390), (256, 512), (0, 0, 255), 5)
viewImage(output, "Lena with a line")
# Writing on an image
cv2.putText(output, "Lena", (360, 390), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
viewImage(output, "Lena with text")
# Saving an image
cv2.imwrite("./output.jpg", output)
# Blurring/Smoothing
blurred = cv2.GaussianBlur(image, (15, 15), 0)
viewImage(blurred, "Lena blurred")
# Rotating
(h, w, d) = image.shape
center = (w // 2, h // 2)
rot = 45
M = cv2.getRotationMatrix2D(center, rot, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
viewImage(rotated, "Lena rotated by {} degrees".format(rot))
# Blend
alpha_slider_max = 100
def on_trackbar_weight(val):
alpha = val / alpha_slider_max
beta = (1.0 - alpha)
blend = cv2.addWeighted(image, alpha, rotated, beta, 0.0)
cv2.imshow('Lena blended', blend)
cv2.namedWindow('Lena blended')
trackbar_name = 'Alpha 0 - {}'.format(alpha_slider_max)
cv2.createTrackbar(trackbar_name, 'Lena blended', 50, alpha_slider_max, on_trackbar_weight)
on_trackbar_weight(50)
cv2.waitKey()
cv2.destroyWindow('Lena blended')
# Grayscaling
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
viewImage(gray_image, "Lena gray-scale")
# Thresholding
threshold_slider_max = 255
threshold = 200
ret, threshold_image = cv2.threshold(gray_image, threshold, 255, 0)
def on_trackbar_threshold(val):
threshold = val
ret, threshold_image = cv2.threshold(gray_image, threshold, 255, 0)
cv2.imshow("Lena thresholded", threshold_image)
cv2.namedWindow("Lena thresholded")
trackbar_name = "Threshold 0 - {}".format(threshold_slider_max)
cv2.createTrackbar(trackbar_name, "Lena thresholded", threshold, threshold_slider_max, on_trackbar_threshold)
on_trackbar_threshold(threshold)
cv2.waitKey()
cv2.destroyWindow("Lena thresholded")
# Contours
contours, hierarchy = cv2.findContours(threshold_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
image_with_contours = image.copy()
cv2.drawContours(image_with_contours, contours, -1, (255, 0, 0), 1)
viewImage(image_with_contours, "Lena contours")
# Face Detection
face_cascade = cv2.CascadeClassifier('venv\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(gray_image)
print("Lena with {} faces detected".format(len(faces)))
image_faces = image.copy()
for (x, y, w, h) in faces:
cv2.rectangle(image_faces, (x, y), (x + w, y + h), (0, 255, 0), 2)
viewImage(image_faces, "Lena with {} faces detected".format(len(faces)))
def display_box(im, bbox):
n_boxes = len(bbox)
for j_box in range(n_boxes):
for j in range(4):
cv2.line(im,
(int(bbox[j_box][j][0]), int(bbox[j_box][j][1])),
(int(bbox[j_box][(j + 1) % 4][0]), int(bbox[j_box][(j + 1) % 4][1])),
(255, 0, 0), 3)
# Display results
cv2.imshow("Results", im)
inputImage = cv2.imread("qrcode.jpg")
qrDecoder = cv2.QRCodeDetector()
data, bbox, rectifiedImage = qrDecoder.detectAndDecode(inputImage)
if len(data) > 0:
print("Decoded Data : {}".format(data))
display_box(inputImage, bbox)
rectifiedImage = np.uint8(rectifiedImage)
cv2.imshow("Rectified QRCode", rectifiedImage)
else:
print("QR Code not detected")
cv2.imshow("Results", inputImage)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils as utils
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.datasources.alarm_driver_base import AlarmDriverBase
from vitrage.datasources.zabbix.properties import ZabbixProperties as ZProps
from vitrage.datasources.zabbix.properties import ZabbixTriggerStatus \
as TriggerStatus
from vitrage.datasources.zabbix.properties import ZabbixTriggerValue \
as TriggerValue
from vitrage.datasources.zabbix import ZABBIX_DATASOURCE
from vitrage.utils import file as file_utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class ZabbixDriver(AlarmDriverBase):
ServiceKey = namedtuple('ServiceKey', ['hostname', 'triggerid'])
conf_map = None
def __init__(self):
super(ZabbixDriver, self).__init__()
if not ZabbixDriver.conf_map:
ZabbixDriver.conf_map =\
ZabbixDriver._configuration_mapping()
self._client = None
def zabbix_client_login(self):
if not CONF.zabbix.user:
LOG.warning('Zabbix user is not defined')
if not CONF.zabbix.password:
LOG.warning('Zabbix password is not defined')
if not CONF.zabbix.url:
LOG.warning('Zabbix url is not defined')
try:
if not self._client:
self._client = utils.import_object(
'pyzabbix.ZabbixAPI',
CONF.zabbix.url)
self._client.login(
CONF.zabbix.user,
CONF.zabbix.password)
except Exception:
LOG.exception('pyzabbix.ZabbixAPI error occurred.')
self._client = None
def _vitrage_type(self):
return ZABBIX_DATASOURCE
def _alarm_key(self, alarm):
return self.ServiceKey(hostname=alarm[ZProps.RESOURCE_NAME],
triggerid=alarm[ZProps.TRIGGER_ID])
def _get_alarms(self):
self.zabbix_client_login()
if not self._client:
return []
alarms = []
valid_hosts = (host for host in
self._client.host.get(output=[ZProps.HOST])
if host[ZProps.HOST] in ZabbixDriver.conf_map)
for host in valid_hosts:
self._get_triggers_per_host(host, alarms)
return alarms
def _get_triggers_per_host(self, host, alarms):
host_id = host[ZProps.HOST_ID]
triggers = self._client.trigger.get(hostids=host_id,
expandDescription=True)
triggers_rawtexts = self._get_triggers_rawtexts(host_id)
for trigger in triggers:
trigger[ZProps.ZABBIX_RESOURCE_NAME] = host[ZProps.HOST]
trigger_id = trigger[ZProps.TRIGGER_ID]
trigger[ZProps.RAWTEXT] = triggers_rawtexts[trigger_id]
alarms.append(trigger)
def _get_triggers_rawtexts(self, host_id):
output = [ZProps.TRIGGER_ID, ZProps.DESCRIPTION]
triggers = self._client.trigger.get(hostids=host_id, output=output)
return {trigger[ZProps.TRIGGER_ID]: trigger[ZProps.DESCRIPTION]
for trigger in triggers}
def _enrich_alarms(self, alarms):
"""Enrich zabbix alarm using zabbix configuration file
converting Zabbix host name to Vitrage resource type and name
:param alarms: Zabbix alarm
:return: enriched alarm
"""
for alarm in alarms:
alarm[ZProps.VALUE] = self._get_value(alarm)
zabbix_host = alarm[ZProps.ZABBIX_RESOURCE_NAME]
vitrage_host = ZabbixDriver.conf_map[zabbix_host]
alarm[ZProps.RESOURCE_TYPE] = vitrage_host[ZProps.RESOURCE_TYPE]
alarm[ZProps.RESOURCE_NAME] = vitrage_host[ZProps.RESOURCE_NAME]
def _is_erroneous(self, alarm):
return alarm and \
alarm[ZProps.VALUE] == TriggerValue.PROBLEM
def _status_changed(self, new_alarm, old_alarm):
if not (new_alarm and old_alarm):
return False
if new_alarm[ZProps.VALUE] != old_alarm[ZProps.VALUE]:
return True
if new_alarm[ZProps.VALUE] == TriggerValue.PROBLEM:
priority_changed = \
new_alarm[ZProps.PRIORITY] != old_alarm[ZProps.PRIORITY]
description_changed = \
new_alarm[ZProps.DESCRIPTION] != old_alarm[ZProps.DESCRIPTION]
return priority_changed or description_changed
def _is_valid(self, alarm):
return alarm[ZProps.RESOURCE_TYPE] is not None and \
alarm[ZProps.RESOURCE_NAME] is not None
@staticmethod
def _get_value(alarm):
if alarm[ZProps.STATUS] == TriggerStatus.DISABLED:
return TriggerValue.OK
return alarm[ZProps.VALUE]
@staticmethod
def _configuration_mapping():
try:
zabbix_config_file = CONF.zabbix[DSOpts.CONFIG_FILE]
zabbix_config = file_utils.load_yaml_file(zabbix_config_file)
zabbix_config_elements = zabbix_config[ZABBIX_DATASOURCE]
mappings = {}
for element_config in zabbix_config_elements:
mappings[element_config['zabbix_host']] = {
ZProps.RESOURCE_TYPE: element_config['type'],
ZProps.RESOURCE_NAME: element_config['name']
}
return mappings
except Exception:
LOG.exception('Failed in init.')
return {}
def enrich_event(self, event, event_type):
event[DSProps.EVENT_TYPE] = event_type
if ZabbixDriver.conf_map:
zabbix_host = event[ZProps.HOST]
event[ZProps.ZABBIX_RESOURCE_NAME] = zabbix_host
v_resource = ZabbixDriver.conf_map[zabbix_host]
event[ZProps.RESOURCE_NAME] = v_resource[ZProps.RESOURCE_NAME]
event[ZProps.RESOURCE_TYPE] = v_resource[ZProps.RESOURCE_TYPE]
return ZabbixDriver.make_pickleable([event], ZABBIX_DATASOURCE,
DatasourceAction.UPDATE)[0]
@staticmethod
def get_event_types():
return ['zabbix.alarm.ok', 'zabbix.alarm.problem']
@staticmethod
def should_delete_outdated_entities():
return True
|
"""posts table
Revision ID: 5c80010c853a
Revises: 6ca7139bbbf2
Create Date: 2018-06-25 17:18:29.165993
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5c80010c853a'
down_revision = '6ca7139bbbf2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=140), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_post_timestamp'), table_name='post')
op.drop_table('post')
# ### end Alembic commands ###
|
def problem255():
pass
|
"""API router
"""
from django.conf.urls import url
from django.urls import path
from rest_framework.routers import DefaultRouter
from vision_on_edge.azure_app_insight.api import views as app_insight_views
from vision_on_edge.azure_parts.api import views as azure_part_views
from vision_on_edge.azure_settings.api import views as azure_setting_views
from vision_on_edge.azure_training.api import views as azure_training_views
from vision_on_edge.azure_training_status.api import \
views as azure_training_status_views
from vision_on_edge.cameras.api import util_views as camera_util_views
from vision_on_edge.cameras.api import views
from vision_on_edge.feedback.api import views as feedback_views
from vision_on_edge.image_predictions.api import \
views as image_prediction_views
from vision_on_edge.images.api import views as image_views
from vision_on_edge.locations.api import views as location_views
from vision_on_edge.notifications.api import views as notifications_views
from vision_on_edge.relabeling.api import views as relabel_views
from vision_on_edge.streams.api import views as stream_views
router = DefaultRouter()
router.trailing_slash = '/?'
router.register('settings', azure_setting_views.SettingViewSet)
router.register('cameras', views.CameraViewSet)
router.register('parts', azure_part_views.PartViewSet)
router.register('locations', location_views.LocationViewSet)
router.register('image_predictions',
image_prediction_views.ImagePredictionViewSet)
router.register('projects', azure_training_views.ProjectViewSet)
router.register('training_status',
azure_training_status_views.TrainingStatusViewSet)
router.register('tasks', azure_training_views.TaskViewSet)
router.register('images', image_views.ImageViewSet)
router.register('feedback', feedback_views.FeedbackViewSet)
router.register('notifications', notifications_views.NotificationViewSet)
router.register('images', image_views.ImageViewSet)
urlpatterns = router.urls
urlpatterns += [
url('streams/connect', stream_views.connect_stream),
path('streams/<int:stream_id>/disconnect', stream_views.disconnect_stream),
path('streams/<int:stream_id>/video_feed', stream_views.video_feed),
path('streams/<int:stream_id>/capture', stream_views.capture),
path('streams/<int:stream_id>/keep_alive', stream_views.keep_alive),
path('projects/<int:project_id>/train', azure_training_views.train),
path('projects/<int:project_id>/export', azure_training_views.export),
path('projects/<int:project_id>/train_performance',
azure_training_views.train_performance),
path('projects/<int:project_id>/inference_video_feed',
stream_views.inference_video_feed),
path('projects/<int:project_id>/pull_cv_project',
azure_training_views.pull_cv_project),
path('projects/<int:project_id>/update_prob_threshold',
azure_training_views.update_prob_threshold),
path('projects/<int:project_id>/reset_project',
azure_training_views.reset_project),
path('projects/<int:project_id>/reset_camera',
azure_training_views.project_reset_camera),
path('projects/null/export', azure_training_views.export_null),
path('relabel', relabel_views.upload_relabel_image),
path('relabel/update', relabel_views.relabel_update),
path('appinsight/key', app_insight_views.instrumentation_key),
path('camera_utils/verify_rtsp', camera_util_views.verify_rtsp)
]
app_name = "api"
|
# orm/exc.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""SQLAlchemy ORM exceptions."""
from __future__ import annotations
from .. import exc as sa_exc
from .. import util
from ..exc import MultipleResultsFound # noqa
from ..exc import NoResultFound # noqa
NO_STATE = (AttributeError, KeyError)
"""Exception types that may be raised by instrumentation implementations."""
class StaleDataError(sa_exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent".
"""
ConcurrentModificationError = StaleDataError
class FlushError(sa_exc.SQLAlchemyError):
"""A invalid condition was detected during flush()."""
class UnmappedError(sa_exc.InvalidRequestError):
"""Base for exceptions that involve expected mappings not present."""
class ObjectDereferencedError(sa_exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage
collected.
"""
class DetachedInstanceError(sa_exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
code = "bhk3"
class UnmappedInstanceError(UnmappedError):
"""An mapping operation was requested for an unknown instance."""
@util.preload_module("sqlalchemy.orm.base")
def __init__(self, obj, msg=None):
base = util.preloaded.orm_base
if not msg:
try:
base.class_mapper(type(obj))
name = _safe_cls_name(type(obj))
msg = (
"Class %r is mapped, but this instance lacks "
"instrumentation. This occurs when the instance "
"is created before sqlalchemy.orm.mapper(%s) "
"was called." % (name, name)
)
except UnmappedClassError:
msg = _default_unmapped(type(obj))
if isinstance(obj, type):
msg += (
"; was a class (%s) supplied where an instance was "
"required?" % _safe_cls_name(obj)
)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedClassError(UnmappedError):
"""An mapping operation was requested for an unknown class."""
def __init__(self, cls, msg=None):
if not msg:
msg = _default_unmapped(cls)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class ObjectDeletedError(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`_query.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
"""
@util.preload_module("sqlalchemy.orm.base")
def __init__(self, state, msg=None):
base = util.preloaded.orm_base
if not msg:
msg = (
"Instance '%s' has been deleted, or its "
"row is otherwise not present." % base.state_str(state)
)
sa_exc.InvalidRequestError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedColumnError(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
class LoaderStrategyException(sa_exc.InvalidRequestError):
"""A loader strategy for an attribute does not exist."""
def __init__(
self,
applied_to_property_type,
requesting_property,
applies_to,
actual_strategy_type,
strategy_key,
):
if actual_strategy_type is None:
sa_exc.InvalidRequestError.__init__(
self,
"Can't find strategy %s for %s"
% (strategy_key, requesting_property),
)
else:
sa_exc.InvalidRequestError.__init__(
self,
'Can\'t apply "%s" strategy to property "%s", '
'which is a "%s"; this loader strategy is intended '
'to be used with a "%s".'
% (
util.clsname_as_plain_name(actual_strategy_type),
requesting_property,
util.clsname_as_plain_name(applied_to_property_type),
util.clsname_as_plain_name(applies_to),
),
)
def _safe_cls_name(cls):
try:
cls_name = ".".join((cls.__module__, cls.__name__))
except AttributeError:
cls_name = getattr(cls, "__name__", None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
@util.preload_module("sqlalchemy.orm.base")
def _default_unmapped(cls):
base = util.preloaded.orm_base
try:
mappers = base.manager_of_class(cls).mappers
except (TypeError,) + NO_STATE:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# soco documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 14 08:03:37 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
sys.path.insert(0, os.path.abspath('..'))
import soco
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SoCo'
copyright = '2015, The SoCo Team'
author = "`The SoCo Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = soco.__version__
# The full version, including alpha/beta/rc tags.
release = soco.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['soco.', 'soco.music_services.']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Allow auto links into the Python and Requests docs
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'requests': ('http://www.python-requests.org/en/latest/', None)
}
# Shortcuts to Github Issues etc. Use them like this:
# :issue:`123` (which will generate a link to issue 123)
extlinks = {
'issue': ('https://github.com/SoCo/SoCo/issues/%s', '#'),
'PR': ('https://github.com/SoCo/SoCo/pull/%s', '#')
}
# Document members by default, and in source order. This allows the stub files
# in the api directory to be much shorter.
autodoc_default_flags = ['members']
autodoc_member_order = 'bysource'
# Concatenate the class and __init__ docstrings
autoclass_content = 'both'
# Nicer inheritance graphs for RTD theme. NB the image map does not rescale
# properly, so we have had to add some javascript to handle it. See
# _templates and _static
inheritance_node_attrs = dict(
fontsize=14, height=0.75, color='dodgerblue', style='rounded',
)
inheritance_graph_attrs = dict(
rankdir="LR", size='""',
)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'socodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'soco.tex', 'soco Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'soco', 'soco Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'soco', 'soco Documentation',
author, 'soco', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to
# save visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
|
#!/usr/bin/env python3
"""
Main module for the deployable project.
"""
# Bootstrap to be able to perform absolute imports as standalone code
if __name__ == "__main__":
from absolute_import import absolute_import
absolute_import(file=__file__, name=__name__, path=__path__)
# Normal imports
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from deployable.defaults.args import description, epilog
from typing import Any, Tuple
def get_args() -> Tuple[Any]:
"""
Retrieves arguments from command line.
"""
# Create parser and groups
parser = ArgumentParser(description=description, epilog=epilog, formatter_class=RawDescriptionHelpFormatter)
def main() -> None:
"""
Entrypoint.
"""
# Call main method
if __name__ == "__main__":
main()
|
import numpy as np
from collections import deque
import pickle
import torch
from utils import collect_trajectories, random_sample
from PPO import PPO
import matplotlib.pyplot as plt
from parallelEnv import *
import gym
env = gym.make("CartPole-v0")
env.reset()
env.seed(2)
obs_dim = env.observation_space.shape[0]
n_actions = env.action_space.n
act_dist = [0 for i in range(n_actions)]
def train(episode, env_name):
gamma = .99
gae_lambda = 0.95
use_gae = True
beta = .01
cliprange = 0.1
best_score = -np.inf
goal_score = 195.0
ep_length = []
nenvs = 1
rollout_length = 200
minibatches = 10*8
nbatch = nenvs * rollout_length
optimization_epochs = 4
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
envs = parallelEnv(env_name, nenvs, seed=1234)
agent = PPO(state_size=obs_dim,
action_size=n_actions,
seed=0,
hidden_layers=[64,64],
lr_policy=1e-4,
use_reset=True,
device=device)
print(agent.policy)
# keep track of progress
mean_rewards = []
scores_window = deque(maxlen=100)
loss_storage = []
for i_episode in range(episode+1):
log_probs_old, states, actions, rewards, values, dones, vals_last, infos, ep_length = collect_trajectories(envs, act_dist, ep_length, agent.policy, rollout_length)
returns = np.zeros_like(rewards)
advantages = np.zeros_like(rewards)
if not use_gae:
for t in reversed(range(rollout_length)):
if t == rollout_length - 1:
returns[t] = rewards[t] + gamma * (1-dones[t]) * vals_last
else:
returns[t] = rewards[t] + gamma * (1-dones[t]) * returns[t+1]
advantages[t] = returns[t] - values[t]
else:
for t in reversed(range(rollout_length)):
if t == rollout_length - 1:
returns[t] = rewards[t] + gamma * (1-dones[t]) * vals_last
td_error = returns[t] - values[t]
else:
returns[t] = rewards[t] + gamma * (1-dones[t]) * returns[t+1]
td_error = rewards[t] + gamma * (1-dones[t]) * values[t+1] - values[t]
advantages[t] = advantages[t] * gae_lambda * gamma * (1-dones[t]) + td_error
# convert to pytorch tensors and move to gpu if available
returns = torch.from_numpy(returns).float().to(device).view(-1,)
advantages = torch.from_numpy(advantages).float().to(device).view(-1,)
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-10)
for _ in range(optimization_epochs):
sampler = random_sample(nbatch, minibatches)
for inds in sampler:
mb_log_probs_old = log_probs_old[inds]
mb_states = states[inds]
mb_actions = actions[inds]
mb_returns = returns[inds]
mb_advantages = advantages[inds]
loss_p, loss_v, loss_ent = agent.update(mb_log_probs_old, mb_states, mb_actions, mb_returns, mb_advantages, cliprange=cliprange, beta=beta)
loss_storage.append([loss_p, loss_v, loss_ent])
total_rewards = np.sum(rewards, axis=0)
scores_window.append(np.mean(total_rewards)) # last 100 scores
mean_rewards.append(np.mean(total_rewards)) # get the average reward of the parallel environments
cliprange *= 0.999 # the clipping parameter reduces as time goes on
beta *= 0.999 # the regulation term reduces
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
print(total_rewards)
if np.mean(scores_window)>=goal_score and np.mean(scores_window)>=best_score:
torch.save(agent.policy.state_dict(), "policy_cartpole.pth")
best_score = np.mean(scores_window)
return mean_rewards, loss_storage, act_dist, ep_length
mean_rewards, loss, new_act_dist, ep_length = train(10000, 'CartPole-v0')
print (new_act_dist[-1])
print (ep_length)
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['font.size'] = 10
plt.title("PPO + MLP + GAE for 10000 episodes")
plt.subplot(131)
plt.plot(mean_rewards)
plt.ylabel('Average score')
plt.xlabel('Episode')
plt.subplot(132)
plt.plot(list(range(len(ep_length))), ep_length, color="red")
plt.ylabel('Episode Length')
plt.xlabel('Episode')
plt.subplot(133)
plt.ylabel('Frequency')
plt.xlabel('Actions')
plt.bar(['Action {}'.format(i) for i in range(len(new_act_dist))], new_act_dist[-1])
plt.show()
|
import numpy as np
from sklearn.preprocessing import LabelEncoder
from lightgbm import LGBMClassifier, LGBMRegressor
def infer_model(df, features, y, n_jobs):
model_class = LGBMRegressor
if len(np.unique(y)) == 2:
y = LabelEncoder().fit_transform(y)
model_class = LGBMClassifier
categoricals = df[features].select_dtypes(exclude=[np.number]).columns.tolist()
for f in categoricals:
df[f] = LabelEncoder().fit_transform(df[f].apply(str))
min_child_samples = int(0.01*df.shape[0])
model = model_class(min_child_samples=min_child_samples, n_jobs=n_jobs)
return model, df, categoricals, y
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="dns-prefetch" href="https://assets-cdn.github.com">
<link rel="dns-prefetch" href="https://avatars0.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars1.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars2.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars3.githubusercontent.com">
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com">
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/">
<link crossorigin="anonymous" media="all" rel="stylesheet" href="https://assets-cdn.github.com/assets/frameworks-0b6eee1f89d4460d83bdbee0a4cb0020.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet" href="https://assets-cdn.github.com/assets/github-c4d5af25254cfc8bb30ae5cb6f074c97.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet" href="https://assets-cdn.github.com/assets/site-83dc1f7ebc9c7461fe1eab799b56c4c4.css" />
<meta name="viewport" content="width=device-width">
<title>tensorflow/rnn_cell_impl.py at r1.6 · tensorflow/tensorflow · GitHub</title>
<meta name="description" content="GitHub is where people build software. More than 27 million people use GitHub to discover, fork, and contribute to over 80 million projects.">
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<meta property="fb:app_id" content="1401488693436528">
<meta property="og:image" content="https://avatars2.githubusercontent.com/u/15658638?s=400&v=4" /><meta property="og:site_name" content="GitHub" /><meta property="og:type" content="object" /><meta property="og:title" content="tensorflow/tensorflow" /><meta property="og:url" content="https://github.com/tensorflow/tensorflow" /><meta property="og:description" content="tensorflow - Computation using data flow graphs for scalable machine learning" />
<link rel="assets" href="https://assets-cdn.github.com/">
<meta name="pjax-timeout" content="1000">
<meta name="request-id" content="D9CE:22519:FDC93F:17A0312:5AB1D278" data-pjax-transient>
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-site-verification" content="GXs5KoUUkNCoaAZn7wPN-t01Pywp9M3sEjnt_3_ZWPc">
<meta name="google-analytics" content="UA-3769691-2">
<meta name="octolytics-host" content="collector.githubapp.com" /><meta name="octolytics-app-id" content="github" /><meta name="octolytics-event-url" content="https://collector.githubapp.com/github-external/browser_event" /><meta name="octolytics-dimension-request_id" content="D9CE:22519:FDC93F:17A0312:5AB1D278" /><meta name="octolytics-dimension-region_edge" content="sea" /><meta name="octolytics-dimension-region_render" content="iad" />
<meta name="hydro-events-url" content="https://github.com/hydro_browser_events" />
<meta name="analytics-location" content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" />
<meta class="js-ga-set" name="dimension1" content="Logged Out">
<meta name="hostname" content="github.com">
<meta name="user-login" content="">
<meta name="expected-hostname" content="github.com">
<meta name="js-proxy-site-detection-payload" content="YTI3MjdkNDE4ODBjNzIwYjgyNTVlMzFjZTEwODkzZDRmYTJkNTZhMTdhZGZmNGM0ODc3MTZmNTg3MzBjNmM2Nnx7InJlbW90ZV9hZGRyZXNzIjoiMTY3LjIyMC4yMzIuMTI0IiwicmVxdWVzdF9pZCI6IkQ5Q0U6MjI1MTk6RkRDOTNGOjE3QTAzMTI6NUFCMUQyNzgiLCJ0aW1lc3RhbXAiOjE1MjE2MDMxOTMsImhvc3QiOiJnaXRodWIuY29tIn0=">
<meta name="enabled-features" content="UNIVERSE_BANNER,FREE_TRIALS,MARKETPLACE_INSIGHTS,MARKETPLACE_INSIGHTS_CONVERSION_PERCENTAGES">
<meta name="html-safe-nonce" content="54cbe43f921a89e1942c69748da62cb0fab66263">
<meta http-equiv="x-pjax-version" content="f5d037ad7a1b600b21fb2f3474bd0c48">
<link href="https://github.com/tensorflow/tensorflow/commits/r1.6.atom" rel="alternate" title="Recent Commits to tensorflow:r1.6" type="application/atom+xml">
<meta name="description" content="tensorflow - Computation using data flow graphs for scalable machine learning">
<meta name="go-import" content="github.com/tensorflow/tensorflow git https://github.com/tensorflow/tensorflow.git">
<meta name="octolytics-dimension-user_id" content="15658638" /><meta name="octolytics-dimension-user_login" content="tensorflow" /><meta name="octolytics-dimension-repository_id" content="45717250" /><meta name="octolytics-dimension-repository_nwo" content="tensorflow/tensorflow" /><meta name="octolytics-dimension-repository_public" content="true" /><meta name="octolytics-dimension-repository_is_fork" content="false" /><meta name="octolytics-dimension-repository_network_root_id" content="45717250" /><meta name="octolytics-dimension-repository_network_root_nwo" content="tensorflow/tensorflow" /><meta name="octolytics-dimension-repository_explore_github_marketplace_ci_cta_shown" content="false" />
<link rel="canonical" href="https://github.com/tensorflow/tensorflow/blob/r1.6/tensorflow/python/ops/rnn_cell_impl.py" data-pjax-transient>
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<link rel="mask-icon" href="https://assets-cdn.github.com/pinned-octocat.svg" color="#000000">
<link rel="icon" type="image/x-icon" class="js-site-favicon" href="https://assets-cdn.github.com/favicon.ico">
<meta name="theme-color" content="#1e2327">
<link rel="manifest" href="/manifest.json" crossOrigin="use-credentials">
</head>
<body class="logged-out env-production page-blob">
<div class="position-relative js-header-wrapper ">
<a href="#start-of-content" tabindex="1" class="px-2 py-4 show-on-focus js-skip-to-content">Skip to content</a>
<div id="js-pjax-loader-bar" class="pjax-loader-bar"><div class="progress"></div></div>
<header class="Header header-logged-out position-relative f4 py-3" role="banner">
<div class="container-lg d-flex px-3">
<div class="d-flex flex-justify-between flex-items-center">
<a class="header-logo-invertocat my-0" href="https://github.com/" aria-label="Homepage" data-ga-click="(Logged out) Header, go to homepage, icon:logo-wordmark">
<svg height="32" class="octicon octicon-mark-github" viewBox="0 0 16 16" version="1.1" width="32" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/></svg>
</a>
</div>
<div class="HeaderMenu HeaderMenu--bright d-flex flex-justify-between flex-auto">
<nav class="mt-0">
<ul class="d-flex list-style-none">
<li class="ml-2">
<a class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:features" data-selected-links="/features /features/project-management /features/code-review /features/project-management /features/integrations /features" href="/features">
Features
</a> </li>
<li class="ml-4">
<a class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:business" data-selected-links="/business /business/security /business/customers /business" href="/business">
Business
</a> </li>
<li class="ml-4">
<a class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:explore" data-selected-links="/explore /trending /trending/developers /integrations /integrations/feature/code /integrations/feature/collaborate /integrations/feature/ship showcases showcases_search showcases_landing /explore" href="/explore">
Explore
</a> </li>
<li class="ml-4">
<a class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:marketplace" data-selected-links=" /marketplace" href="/marketplace">
Marketplace
</a> </li>
<li class="ml-4">
<a class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:pricing" data-selected-links="/pricing /pricing/developer /pricing/team /pricing/business-hosted /pricing/business-enterprise /pricing" href="/pricing">
Pricing
</a> </li>
</ul>
</nav>
<div class="d-flex">
<div class="d-lg-flex flex-items-center mr-3">
<div class="header-search scoped-search site-scoped-search js-site-search" role="search">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-site-search-form" data-scoped-search-url="/tensorflow/tensorflow/search" data-unscoped-search-url="/search" action="/tensorflow/tensorflow/search" accept-charset="UTF-8" method="get"><input name="utf8" type="hidden" value="✓" />
<label class="form-control header-search-wrapper js-chromeless-input-container">
<a class="header-search-scope no-underline" href="/tensorflow/tensorflow/blob/r1.6/tensorflow/python/ops/rnn_cell_impl.py">This repository</a>
<input type="text"
class="form-control header-search-input js-site-search-focus js-site-search-field is-clearable"
data-hotkey="s,/"
name="q"
value=""
placeholder="Search"
aria-label="Search this repository"
data-unscoped-placeholder="Search GitHub"
data-scoped-placeholder="Search"
autocapitalize="off">
<input type="hidden" class="js-site-search-type-field" name="type" >
</label>
</form></div>
</div>
<span class="d-inline-block">
<div class="HeaderNavlink px-0 py-2 m-0">
<a class="text-bold text-white no-underline" href="/login?return_to=%2Ftensorflow%2Ftensorflow%2Fblob%2Fr1.6%2Ftensorflow%2Fpython%2Fops%2Frnn_cell_impl.py" data-ga-click="(Logged out) Header, clicked Sign in, text:sign-in">Sign in</a>
<span class="text-gray">or</span>
<a class="text-bold text-white no-underline" href="/join?source=header-repo" data-ga-click="(Logged out) Header, clicked Sign up, text:sign-up">Sign up</a>
</div>
</span>
</div>
</div>
</div>
</header>
</div>
<div id="start-of-content" class="show-on-focus"></div>
<div id="js-flash-container">
</div>
<div role="main"
class="application-main "
>
<div itemscope itemtype="http://schema.org/SoftwareSourceCode" class="">
<div id="js-repo-pjax-container" data-pjax-container >
<div class="pagehead repohead instapaper_ignore readability-menu experiment-repo-nav ">
<div class="repohead-details-container clearfix container">
<ul class="pagehead-actions">
<li>
<a href="/login?return_to=%2Ftensorflow%2Ftensorflow"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to watch a repository" rel="nofollow">
<svg class="octicon octicon-eye" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
Watch
</a>
<a class="social-count" href="/tensorflow/tensorflow/watchers"
aria-label="7632 users are watching this repository">
7,632
</a>
</li>
<li>
<a href="/login?return_to=%2Ftensorflow%2Ftensorflow"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to star a repository" rel="nofollow">
<svg class="octicon octicon-star" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74z"/></svg>
Star
</a>
<a class="social-count js-social-count" href="/tensorflow/tensorflow/stargazers"
aria-label="93301 users starred this repository">
93,301
</a>
</li>
<li>
<a href="/login?return_to=%2Ftensorflow%2Ftensorflow"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to fork a repository" rel="nofollow">
<svg class="octicon octicon-repo-forked" viewBox="0 0 10 16" version="1.1" width="10" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
Fork
</a>
<a href="/tensorflow/tensorflow/network" class="social-count"
aria-label="59890 users forked this repository">
59,890
</a>
</li>
</ul>
<h1 class="public ">
<svg class="octicon octicon-repo" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"/></svg>
<span class="author" itemprop="author"><a class="url fn" rel="author" href="/tensorflow">tensorflow</a></span><!--
--><span class="path-divider">/</span><!--
--><strong itemprop="name"><a data-pjax="#js-repo-pjax-container" href="/tensorflow/tensorflow">tensorflow</a></strong>
</h1>
</div>
<nav class="reponav js-repo-nav js-sidenav-container-pjax container"
itemscope
itemtype="http://schema.org/BreadcrumbList"
role="navigation"
data-pjax="#js-repo-pjax-container">
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a class="js-selected-navigation-item selected reponav-item" itemprop="url" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages /tensorflow/tensorflow/tree/r1.6" href="/tensorflow/tensorflow/tree/r1.6">
<svg class="octicon octicon-code" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M9.5 3L8 4.5 11.5 8 8 11.5 9.5 13 14 8 9.5 3zm-5 0L0 8l4.5 5L6 11.5 2.5 8 6 4.5 4.5 3z"/></svg>
<span itemprop="name">Code</span>
<meta itemprop="position" content="1">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a itemprop="url" data-hotkey="g i" class="js-selected-navigation-item reponav-item" data-selected-links="repo_issues repo_labels repo_milestones /tensorflow/tensorflow/issues" href="/tensorflow/tensorflow/issues">
<svg class="octicon octicon-issue-opened" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"/></svg>
<span itemprop="name">Issues</span>
<span class="Counter">1,287</span>
<meta itemprop="position" content="2">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a data-hotkey="g p" itemprop="url" class="js-selected-navigation-item reponav-item" data-selected-links="repo_pulls checks /tensorflow/tensorflow/pulls" href="/tensorflow/tensorflow/pulls">
<svg class="octicon octicon-git-pull-request" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M11 11.28V5c-.03-.78-.34-1.47-.94-2.06C9.46 2.35 8.78 2.03 8 2H7V0L4 3l3 3V4h1c.27.02.48.11.69.31.21.2.3.42.31.69v6.28A1.993 1.993 0 0 0 10 15a1.993 1.993 0 0 0 1-3.72zm-1 2.92c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zM4 3c0-1.11-.89-2-2-2a1.993 1.993 0 0 0-1 3.72v6.56A1.993 1.993 0 0 0 2 15a1.993 1.993 0 0 0 1-3.72V4.72c.59-.34 1-.98 1-1.72zm-.8 10c0 .66-.55 1.2-1.2 1.2-.65 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
<span itemprop="name">Pull requests</span>
<span class="Counter">197</span>
<meta itemprop="position" content="3">
</a> </span>
<a data-hotkey="g b" class="js-selected-navigation-item reponav-item" data-selected-links="repo_projects new_repo_project repo_project /tensorflow/tensorflow/projects" href="/tensorflow/tensorflow/projects">
<svg class="octicon octicon-project" viewBox="0 0 15 16" version="1.1" width="15" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M10 12h3V2h-3v10zm-4-2h3V2H6v8zm-4 4h3V2H2v12zm-1 1h13V1H1v14zM14 0H1a1 1 0 0 0-1 1v14a1 1 0 0 0 1 1h13a1 1 0 0 0 1-1V1a1 1 0 0 0-1-1z"/></svg>
Projects
<span class="Counter" >0</span>
</a>
<a class="js-selected-navigation-item reponav-item" data-selected-links="repo_graphs repo_contributors dependency_graph pulse /tensorflow/tensorflow/pulse" href="/tensorflow/tensorflow/pulse">
<svg class="octicon octicon-graph" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M16 14v1H0V0h1v14h15zM5 13H3V8h2v5zm4 0H7V3h2v10zm4 0h-2V6h2v7z"/></svg>
Insights
</a>
</nav>
</div>
<div class="container new-discussion-timeline experiment-repo-nav ">
<div class="repository-content ">
<a class="d-none js-permalink-shortcut" data-hotkey="y" href="/tensorflow/tensorflow/blob/cbc658095ae228f2f557af47e4901d552573aa15/tensorflow/python/ops/rnn_cell_impl.py">Permalink</a>
<!-- blob contrib key: blob_contributors:v21:5895d9f5b73fd7bcfb782c5d3257ea21 -->
<div class="file-navigation">
<div class="select-menu branch-select-menu js-menu-container js-select-menu float-left">
<button class=" btn btn-sm select-menu-button js-menu-target css-truncate" data-hotkey="w"
type="button" aria-label="Switch branches or tags" aria-expanded="false" aria-haspopup="true">
<i>Branch:</i>
<span class="js-select-button css-truncate-target">r1.6</span>
</button>
<div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax>
<div class="select-menu-modal">
<div class="select-menu-header">
<svg class="octicon octicon-x js-menu-close" role="img" aria-label="Close" viewBox="0 0 12 16" version="1.1" width="12" height="16"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
<span class="select-menu-title">Switch branches/tags</span>
</div>
<div class="select-menu-filters">
<div class="select-menu-text-filter">
<input type="text" aria-label="Filter branches/tags" id="context-commitish-filter-field" class="form-control js-filterable-field js-navigation-enable" placeholder="Filter branches/tags">
</div>
<div class="select-menu-tabs">
<ul>
<li class="select-menu-tab">
<a href="#" data-tab-filter="branches" data-filter-placeholder="Filter branches/tags" class="js-select-menu-tab" role="tab">Branches</a>
</li>
<li class="select-menu-tab">
<a href="#" data-tab-filter="tags" data-filter-placeholder="Find a tag…" class="js-select-menu-tab" role="tab">Tags</a>
</li>
</ul>
</div>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches" role="menu">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/0.6.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="0.6.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
0.6.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/branch_189041421/tensorflow/python/ops/rnn_cell_impl.py"
data-name="branch_189041421"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
branch_189041421
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/martinwicke-patch-1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="martinwicke-patch-1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
martinwicke-patch-1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/martinwicke-patch-2/tensorflow/python/ops/rnn_cell_impl.py"
data-name="martinwicke-patch-2"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
martinwicke-patch-2
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn_cell_impl.py"
data-name="master"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
master
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/mrry-patch-1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="mrry-patch-1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
mrry-patch-1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r0.7/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r0.7"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r0.7
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r0.8/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r0.8"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r0.8
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r0.9/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r0.9"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r0.9
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r0.10/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r0.10"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r0.10
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r0.11/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r0.11"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r0.11
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r0.12/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r0.12"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r0.12
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r1.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r1.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r1.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r1.1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r1.1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r1.1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r1.2/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r1.2"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r1.2
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r1.3/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r1.3"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r1.3
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r1.4/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r1.4"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r1.4
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r1.5/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r1.5"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r1.5
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open selected"
href="/tensorflow/tensorflow/blob/r1.6/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r1.6"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r1.6
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/r1.7/tensorflow/python/ops/rnn_cell_impl.py"
data-name="r1.7"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
r1.7
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/blob/yifeif-patch-1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="yifeif-patch-1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
yifeif-patch-1
</span>
</a>
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.7.0-rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.7.0-rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.7.0-rc0">
v1.7.0-rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.6.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.6.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.6.0">
v1.6.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.6.0-rc1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.6.0-rc1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.6.0-rc1">
v1.6.0-rc1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.6.0-rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.6.0-rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.6.0-rc0">
v1.6.0-rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.5.1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.5.1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.5.1">
v1.5.1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.5.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.5.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.5.0">
v1.5.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.5.0-rc1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.5.0-rc1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.5.0-rc1">
v1.5.0-rc1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.5.0-rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.5.0-rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.5.0-rc0">
v1.5.0-rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.4.1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.4.1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.4.1">
v1.4.1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.4.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.4.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.4.0">
v1.4.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.4.0-rc1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.4.0-rc1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.4.0-rc1">
v1.4.0-rc1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.4.0-rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.4.0-rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.4.0-rc0">
v1.4.0-rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.3.1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.3.1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.3.1">
v1.3.1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.3.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.3.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.3.0">
v1.3.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.3.0-rc2/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.3.0-rc2"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.3.0-rc2">
v1.3.0-rc2
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.3.0-rc1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.3.0-rc1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.3.0-rc1">
v1.3.0-rc1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.3.0-rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.3.0-rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.3.0-rc0">
v1.3.0-rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.2.1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.2.1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.2.1">
v1.2.1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.2.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.2.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.2.0">
v1.2.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.2.0-rc2/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.2.0-rc2"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.2.0-rc2">
v1.2.0-rc2
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.2.0-rc1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.2.0-rc1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.2.0-rc1">
v1.2.0-rc1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.2.0-rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.2.0-rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.2.0-rc0">
v1.2.0-rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.1.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.1.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.1.0">
v1.1.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.1.0-rc2/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.1.0-rc2"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.1.0-rc2">
v1.1.0-rc2
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.1.0-rc1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.1.0-rc1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.1.0-rc1">
v1.1.0-rc1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.1.0-rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.1.0-rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.1.0-rc0">
v1.1.0-rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.0.1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.0.1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.0.1">
v1.0.1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.0.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.0.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.0.0">
v1.0.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.0.0-rc2/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.0.0-rc2"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.0.0-rc2">
v1.0.0-rc2
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.0.0-rc1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.0.0-rc1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.0.0-rc1">
v1.0.0-rc1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.0.0-rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.0.0-rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.0.0-rc0">
v1.0.0-rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v1.0.0-alpha/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v1.0.0-alpha"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v1.0.0-alpha">
v1.0.0-alpha
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.12.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.12.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.12.0">
v0.12.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.11.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.11.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.11.0">
v0.11.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.11.0rc2/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.11.0rc2"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.11.0rc2">
v0.11.0rc2
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.11.0rc1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.11.0rc1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.11.0rc1">
v0.11.0rc1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.11.0rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.11.0rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.11.0rc0">
v0.11.0rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.10.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.10.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.10.0">
v0.10.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.10.0rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.10.0rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.10.0rc0">
v0.10.0rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.9.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.9.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.9.0">
v0.9.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.9.0rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.9.0rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.9.0rc0">
v0.9.0rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.8.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.8.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.8.0">
v0.8.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.8.0rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.8.0rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.8.0rc0">
v0.8.0rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.7.1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.7.1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.7.1">
v0.7.1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.7.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.7.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.7.0">
v0.7.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/v0.6.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="v0.6.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="v0.6.0">
v0.6.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/0.12.1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="0.12.1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="0.12.1">
0.12.1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/0.12.0-rc1/tensorflow/python/ops/rnn_cell_impl.py"
data-name="0.12.0-rc1"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="0.12.0-rc1">
0.12.0-rc1
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/0.12.0-rc0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="0.12.0-rc0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="0.12.0-rc0">
0.12.0-rc0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/0.6.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="0.6.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="0.6.0">
0.6.0
</span>
</a>
<a class="select-menu-item js-navigation-item js-navigation-open "
href="/tensorflow/tensorflow/tree/0.5.0/tensorflow/python/ops/rnn_cell_impl.py"
data-name="0.5.0"
data-skip-pjax="true"
rel="nofollow">
<svg class="octicon octicon-check select-menu-item-icon" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target" title="0.5.0">
0.5.0
</span>
</a>
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div>
</div>
</div>
</div>
<div class="BtnGroup float-right">
<a href="/tensorflow/tensorflow/find/r1.6"
class="js-pjax-capture-input btn btn-sm BtnGroup-item"
data-pjax
data-hotkey="t">
Find file
</a>
<clipboard-copy
for="blob-path"
aria-label="Copy file path to clipboard"
class="btn btn-sm BtnGroup-item tooltipped tooltipped-s"
data-copied-hint="Copied!">
Copy path
</clipboard-copy>
</div>
<div id="blob-path" class="breadcrumb">
<span class="repo-root js-repo-root"><span class="js-path-segment"><a data-pjax="true" href="/tensorflow/tensorflow/tree/r1.6"><span>tensorflow</span></a></span></span><span class="separator">/</span><span class="js-path-segment"><a data-pjax="true" href="/tensorflow/tensorflow/tree/r1.6/tensorflow"><span>tensorflow</span></a></span><span class="separator">/</span><span class="js-path-segment"><a data-pjax="true" href="/tensorflow/tensorflow/tree/r1.6/tensorflow/python"><span>python</span></a></span><span class="separator">/</span><span class="js-path-segment"><a data-pjax="true" href="/tensorflow/tensorflow/tree/r1.6/tensorflow/python/ops"><span>ops</span></a></span><span class="separator">/</span><strong class="final-path">rnn_cell_impl.py</strong>
</div>
</div>
<div class="commit-tease">
<span class="float-right">
<a class="commit-tease-sha" href="/tensorflow/tensorflow/commit/dff64beac8570d910f83774087642bb6a3fda96a" data-pjax>
dff64be
</a>
<relative-time datetime="2018-01-23T21:46:25Z">Jan 23, 2018</relative-time>
</span>
<div>
<img alt="@michaelkhan3" class="avatar" height="20" src="https://avatars3.githubusercontent.com/u/4420471?s=40&v=4" width="20" />
<a href="/michaelkhan3" class="user-mention" rel="contributor">michaelkhan3</a>
<a href="/tensorflow/tensorflow/commit/dff64beac8570d910f83774087642bb6a3fda96a" class="message" data-pjax="true" title="Add property to get cell wrapped by DropoutWrapper (#16006)
* add property to get cell wrapped by dropout
* updating golden files">Add property to get cell wrapped by DropoutWrapper (</a><a href="https://github.com/tensorflow/tensorflow/pull/16006" class="issue-link js-issue-link" data-error-text="Failed to load issue title" data-id="287414630" data-permission-text="Issue title is private" data-url="https://github.com/tensorflow/tensorflow/issues/16006">#16006</a><a href="/tensorflow/tensorflow/commit/dff64beac8570d910f83774087642bb6a3fda96a" class="message" data-pjax="true" title="Add property to get cell wrapped by DropoutWrapper (#16006)
* add property to get cell wrapped by dropout
* updating golden files">)</a>
</div>
<div class="commit-tease-contributors">
<button type="button" class="btn-link muted-link contributors-toggle" data-facebox="#blob_contributors_box">
<strong>13</strong>
contributors
</button>
<a class="avatar-link tooltipped tooltipped-s" aria-label="tensorflower-gardener" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=tensorflower-gardener"><img alt="@tensorflower-gardener" class="avatar" height="20" src="https://avatars1.githubusercontent.com/u/17151892?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="ebrevdo" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=ebrevdo"><img alt="@ebrevdo" class="avatar" height="20" src="https://avatars2.githubusercontent.com/u/1794715?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="jhseu" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=jhseu"><img alt="@jhseu" class="avatar" height="20" src="https://avatars1.githubusercontent.com/u/170179?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="protoget" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=protoget"><img alt="@protoget" class="avatar" height="20" src="https://avatars2.githubusercontent.com/u/5117188?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="xiejw" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=xiejw"><img alt="@xiejw" class="avatar" height="20" src="https://avatars0.githubusercontent.com/u/1184671?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="martinwicke" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=martinwicke"><img alt="@martinwicke" class="avatar" height="20" src="https://avatars1.githubusercontent.com/u/577277?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="michaelkhan3" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=michaelkhan3"><img alt="@michaelkhan3" class="avatar" height="20" src="https://avatars3.githubusercontent.com/u/4420471?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="fchollet" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=fchollet"><img alt="@fchollet" class="avatar" height="20" src="https://avatars2.githubusercontent.com/u/710255?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="drpngx" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=drpngx"><img alt="@drpngx" class="avatar" height="20" src="https://avatars3.githubusercontent.com/u/20959853?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="caisq" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=caisq"><img alt="@caisq" class="avatar" height="20" src="https://avatars0.githubusercontent.com/u/16824702?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="alextp" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=alextp"><img alt="@alextp" class="avatar" height="20" src="https://avatars2.githubusercontent.com/u/5061?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="annarev" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=annarev"><img alt="@annarev" class="avatar" height="20" src="https://avatars0.githubusercontent.com/u/22060313?s=40&v=4" width="20" /> </a>
<a class="avatar-link tooltipped tooltipped-s" aria-label="kosklain" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py?author=kosklain"><img alt="@kosklain" class="avatar" height="20" src="https://avatars2.githubusercontent.com/u/1104579?s=40&v=4" width="20" /> </a>
</div>
<div id="blob_contributors_box" style="display:none">
<h2 class="facebox-header" data-facebox-id="facebox-header">Users who have contributed to this file</h2>
<ul class="facebox-user-list" data-facebox-id="facebox-description">
<li class="facebox-user-list-item">
<img alt="@tensorflower-gardener" height="24" src="https://avatars0.githubusercontent.com/u/17151892?s=48&v=4" width="24" />
<a href="/tensorflower-gardener">tensorflower-gardener</a>
</li>
<li class="facebox-user-list-item">
<img alt="@ebrevdo" height="24" src="https://avatars3.githubusercontent.com/u/1794715?s=48&v=4" width="24" />
<a href="/ebrevdo">ebrevdo</a>
</li>
<li class="facebox-user-list-item">
<img alt="@jhseu" height="24" src="https://avatars0.githubusercontent.com/u/170179?s=48&v=4" width="24" />
<a href="/jhseu">jhseu</a>
</li>
<li class="facebox-user-list-item">
<img alt="@protoget" height="24" src="https://avatars3.githubusercontent.com/u/5117188?s=48&v=4" width="24" />
<a href="/protoget">protoget</a>
</li>
<li class="facebox-user-list-item">
<img alt="@xiejw" height="24" src="https://avatars1.githubusercontent.com/u/1184671?s=48&v=4" width="24" />
<a href="/xiejw">xiejw</a>
</li>
<li class="facebox-user-list-item">
<img alt="@martinwicke" height="24" src="https://avatars0.githubusercontent.com/u/577277?s=48&v=4" width="24" />
<a href="/martinwicke">martinwicke</a>
</li>
<li class="facebox-user-list-item">
<img alt="@michaelkhan3" height="24" src="https://avatars2.githubusercontent.com/u/4420471?s=48&v=4" width="24" />
<a href="/michaelkhan3">michaelkhan3</a>
</li>
<li class="facebox-user-list-item">
<img alt="@fchollet" height="24" src="https://avatars3.githubusercontent.com/u/710255?s=48&v=4" width="24" />
<a href="/fchollet">fchollet</a>
</li>
<li class="facebox-user-list-item">
<img alt="@drpngx" height="24" src="https://avatars2.githubusercontent.com/u/20959853?s=48&v=4" width="24" />
<a href="/drpngx">drpngx</a>
</li>
<li class="facebox-user-list-item">
<img alt="@caisq" height="24" src="https://avatars1.githubusercontent.com/u/16824702?s=48&v=4" width="24" />
<a href="/caisq">caisq</a>
</li>
<li class="facebox-user-list-item">
<img alt="@alextp" height="24" src="https://avatars3.githubusercontent.com/u/5061?s=48&v=4" width="24" />
<a href="/alextp">alextp</a>
</li>
<li class="facebox-user-list-item">
<img alt="@annarev" height="24" src="https://avatars1.githubusercontent.com/u/22060313?s=48&v=4" width="24" />
<a href="/annarev">annarev</a>
</li>
<li class="facebox-user-list-item">
<img alt="@kosklain" height="24" src="https://avatars3.githubusercontent.com/u/1104579?s=48&v=4" width="24" />
<a href="/kosklain">kosklain</a>
</li>
</ul>
</div>
</div>
<div class="file">
<div class="file-header">
<div class="file-actions">
<div class="BtnGroup">
<a id="raw-url" class="btn btn-sm BtnGroup-item" href="/tensorflow/tensorflow/raw/r1.6/tensorflow/python/ops/rnn_cell_impl.py">Raw</a>
<a class="btn btn-sm js-update-url-with-hash BtnGroup-item" data-hotkey="b" href="/tensorflow/tensorflow/blame/r1.6/tensorflow/python/ops/rnn_cell_impl.py">Blame</a>
<a rel="nofollow" class="btn btn-sm BtnGroup-item" href="/tensorflow/tensorflow/commits/r1.6/tensorflow/python/ops/rnn_cell_impl.py">History</a>
</div>
<button type="button" class="btn-octicon disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg class="octicon octicon-pencil" viewBox="0 0 14 16" version="1.1" width="14" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M0 12v3h3l8-8-3-3-8 8zm3 2H1v-2h1v1h1v1zm10.3-9.3L12 6 9 3l1.3-1.3a.996.996 0 0 1 1.41 0l1.59 1.59c.39.39.39 1.02 0 1.41z"/></svg>
</button>
<button type="button" class="btn-octicon btn-octicon-danger disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg class="octicon octicon-trashcan" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M11 2H9c0-.55-.45-1-1-1H5c-.55 0-1 .45-1 1H2c-.55 0-1 .45-1 1v1c0 .55.45 1 1 1v9c0 .55.45 1 1 1h7c.55 0 1-.45 1-1V5c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm-1 12H3V5h1v8h1V5h1v8h1V5h1v8h1V5h1v9zm1-10H2V3h9v1z"/></svg>
</button>
</div>
<div class="file-info">
1286 lines (1078 sloc)
<span class="file-info-divider"></span>
49.7 KB
</div>
</div>
<div itemprop="text" class="blob-wrapper data type-python">
<table class="highlight tab-size js-file-line-container" data-tab-size="8">
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> Copyright 2015 The TensorFlow Authors. All Rights Reserved.</span></td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span></span></td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> Licensed under the Apache License, Version 2.0 (the "License");</span></td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> you may not use this file except in compliance with the License.</span></td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> You may obtain a copy of the License at</span></td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span></span></td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> http://www.apache.org/licenses/LICENSE-2.0</span></td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span></span></td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> Unless required by applicable law or agreed to in writing, software</span></td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> distributed under the License is distributed on an "AS IS" BASIS,</span></td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span></td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> See the License for the specific language governing permissions and</span></td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> limitations under the License.</span></td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> ==============================================================================</span></td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code blob-code-inner js-file-line"><span class="pl-s"><span class="pl-pds">"""</span>Module implementing RNN Cells.</span></td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L17" class="blob-num js-line-number" data-line-number="17"></td>
<td id="LC17" class="blob-code blob-code-inner js-file-line"><span class="pl-s">This module provides a number of basic commonly used RNN cells, such as LSTM</span></td>
</tr>
<tr>
<td id="L18" class="blob-num js-line-number" data-line-number="18"></td>
<td id="LC18" class="blob-code blob-code-inner js-file-line"><span class="pl-s">(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of</span></td>
</tr>
<tr>
<td id="L19" class="blob-num js-line-number" data-line-number="19"></td>
<td id="LC19" class="blob-code blob-code-inner js-file-line"><span class="pl-s">operators that allow adding dropouts, projections, or embeddings for inputs.</span></td>
</tr>
<tr>
<td id="L20" class="blob-num js-line-number" data-line-number="20"></td>
<td id="LC20" class="blob-code blob-code-inner js-file-line"><span class="pl-s">Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by</span></td>
</tr>
<tr>
<td id="L21" class="blob-num js-line-number" data-line-number="21"></td>
<td id="LC21" class="blob-code blob-code-inner js-file-line"><span class="pl-s">calling the `rnn` ops several times.</span></td>
</tr>
<tr>
<td id="L22" class="blob-num js-line-number" data-line-number="22"></td>
<td id="LC22" class="blob-code blob-code-inner js-file-line"><span class="pl-s"><span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L23" class="blob-num js-line-number" data-line-number="23"></td>
<td id="LC23" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> <span class="pl-c1">__future__</span> <span class="pl-k">import</span> absolute_import</td>
</tr>
<tr>
<td id="L24" class="blob-num js-line-number" data-line-number="24"></td>
<td id="LC24" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> <span class="pl-c1">__future__</span> <span class="pl-k">import</span> division</td>
</tr>
<tr>
<td id="L25" class="blob-num js-line-number" data-line-number="25"></td>
<td id="LC25" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> <span class="pl-c1">__future__</span> <span class="pl-k">import</span> print_function</td>
</tr>
<tr>
<td id="L26" class="blob-num js-line-number" data-line-number="26"></td>
<td id="LC26" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L27" class="blob-num js-line-number" data-line-number="27"></td>
<td id="LC27" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> collections</td>
</tr>
<tr>
<td id="L28" class="blob-num js-line-number" data-line-number="28"></td>
<td id="LC28" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> hashlib</td>
</tr>
<tr>
<td id="L29" class="blob-num js-line-number" data-line-number="29"></td>
<td id="LC29" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> numbers</td>
</tr>
<tr>
<td id="L30" class="blob-num js-line-number" data-line-number="30"></td>
<td id="LC30" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L31" class="blob-num js-line-number" data-line-number="31"></td>
<td id="LC31" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.eager <span class="pl-k">import</span> context</td>
</tr>
<tr>
<td id="L32" class="blob-num js-line-number" data-line-number="32"></td>
<td id="LC32" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.framework <span class="pl-k">import</span> constant_op</td>
</tr>
<tr>
<td id="L33" class="blob-num js-line-number" data-line-number="33"></td>
<td id="LC33" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.framework <span class="pl-k">import</span> dtypes</td>
</tr>
<tr>
<td id="L34" class="blob-num js-line-number" data-line-number="34"></td>
<td id="LC34" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.framework <span class="pl-k">import</span> ops</td>
</tr>
<tr>
<td id="L35" class="blob-num js-line-number" data-line-number="35"></td>
<td id="LC35" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.framework <span class="pl-k">import</span> tensor_shape</td>
</tr>
<tr>
<td id="L36" class="blob-num js-line-number" data-line-number="36"></td>
<td id="LC36" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.framework <span class="pl-k">import</span> tensor_util</td>
</tr>
<tr>
<td id="L37" class="blob-num js-line-number" data-line-number="37"></td>
<td id="LC37" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.layers <span class="pl-k">import</span> base <span class="pl-k">as</span> base_layer</td>
</tr>
<tr>
<td id="L38" class="blob-num js-line-number" data-line-number="38"></td>
<td id="LC38" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> array_ops</td>
</tr>
<tr>
<td id="L39" class="blob-num js-line-number" data-line-number="39"></td>
<td id="LC39" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> clip_ops</td>
</tr>
<tr>
<td id="L40" class="blob-num js-line-number" data-line-number="40"></td>
<td id="LC40" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> init_ops</td>
</tr>
<tr>
<td id="L41" class="blob-num js-line-number" data-line-number="41"></td>
<td id="LC41" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> math_ops</td>
</tr>
<tr>
<td id="L42" class="blob-num js-line-number" data-line-number="42"></td>
<td id="LC42" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> nn_ops</td>
</tr>
<tr>
<td id="L43" class="blob-num js-line-number" data-line-number="43"></td>
<td id="LC43" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> partitioned_variables</td>
</tr>
<tr>
<td id="L44" class="blob-num js-line-number" data-line-number="44"></td>
<td id="LC44" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> random_ops</td>
</tr>
<tr>
<td id="L45" class="blob-num js-line-number" data-line-number="45"></td>
<td id="LC45" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> tensor_array_ops</td>
</tr>
<tr>
<td id="L46" class="blob-num js-line-number" data-line-number="46"></td>
<td id="LC46" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> variable_scope <span class="pl-k">as</span> vs</td>
</tr>
<tr>
<td id="L47" class="blob-num js-line-number" data-line-number="47"></td>
<td id="LC47" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.ops <span class="pl-k">import</span> variables <span class="pl-k">as</span> tf_variables</td>
</tr>
<tr>
<td id="L48" class="blob-num js-line-number" data-line-number="48"></td>
<td id="LC48" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.platform <span class="pl-k">import</span> tf_logging <span class="pl-k">as</span> logging</td>
</tr>
<tr>
<td id="L49" class="blob-num js-line-number" data-line-number="49"></td>
<td id="LC49" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.util <span class="pl-k">import</span> nest</td>
</tr>
<tr>
<td id="L50" class="blob-num js-line-number" data-line-number="50"></td>
<td id="LC50" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> tensorflow.python.util.tf_export <span class="pl-k">import</span> tf_export</td>
</tr>
<tr>
<td id="L51" class="blob-num js-line-number" data-line-number="51"></td>
<td id="LC51" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L52" class="blob-num js-line-number" data-line-number="52"></td>
<td id="LC52" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L53" class="blob-num js-line-number" data-line-number="53"></td>
<td id="LC53" class="blob-code blob-code-inner js-file-line"><span class="pl-c1">_BIAS_VARIABLE_NAME</span> <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">"</span>bias<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L54" class="blob-num js-line-number" data-line-number="54"></td>
<td id="LC54" class="blob-code blob-code-inner js-file-line"><span class="pl-c1">_WEIGHTS_VARIABLE_NAME</span> <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">"</span>kernel<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L55" class="blob-num js-line-number" data-line-number="55"></td>
<td id="LC55" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L56" class="blob-num js-line-number" data-line-number="56"></td>
<td id="LC56" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L57" class="blob-num js-line-number" data-line-number="57"></td>
<td id="LC57" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">_like_rnncell</span>(<span class="pl-smi">cell</span>):</td>
</tr>
<tr>
<td id="L58" class="blob-num js-line-number" data-line-number="58"></td>
<td id="LC58" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Checks that a given object is an RNNCell by using duck typing.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L59" class="blob-num js-line-number" data-line-number="59"></td>
<td id="LC59" class="blob-code blob-code-inner js-file-line"> conditions <span class="pl-k">=</span> [<span class="pl-c1">hasattr</span>(cell, <span class="pl-s"><span class="pl-pds">"</span>output_size<span class="pl-pds">"</span></span>), <span class="pl-c1">hasattr</span>(cell, <span class="pl-s"><span class="pl-pds">"</span>state_size<span class="pl-pds">"</span></span>),</td>
</tr>
<tr>
<td id="L60" class="blob-num js-line-number" data-line-number="60"></td>
<td id="LC60" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">hasattr</span>(cell, <span class="pl-s"><span class="pl-pds">"</span>zero_state<span class="pl-pds">"</span></span>), <span class="pl-c1">callable</span>(cell)]</td>
</tr>
<tr>
<td id="L61" class="blob-num js-line-number" data-line-number="61"></td>
<td id="LC61" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">all</span>(conditions)</td>
</tr>
<tr>
<td id="L62" class="blob-num js-line-number" data-line-number="62"></td>
<td id="LC62" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L63" class="blob-num js-line-number" data-line-number="63"></td>
<td id="LC63" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L64" class="blob-num js-line-number" data-line-number="64"></td>
<td id="LC64" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">_concat</span>(<span class="pl-smi">prefix</span>, <span class="pl-smi">suffix</span>, <span class="pl-smi">static</span><span class="pl-k">=</span><span class="pl-c1">False</span>):</td>
</tr>
<tr>
<td id="L65" class="blob-num js-line-number" data-line-number="65"></td>
<td id="LC65" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Concat that enables int, Tensor, or TensorShape values.</span></td>
</tr>
<tr>
<td id="L66" class="blob-num js-line-number" data-line-number="66"></td>
<td id="LC66" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L67" class="blob-num js-line-number" data-line-number="67"></td>
<td id="LC67" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> This function takes a size specification, which can be an integer, a</span></td>
</tr>
<tr>
<td id="L68" class="blob-num js-line-number" data-line-number="68"></td>
<td id="LC68" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> TensorShape, or a Tensor, and converts it into a concatenated Tensor</span></td>
</tr>
<tr>
<td id="L69" class="blob-num js-line-number" data-line-number="69"></td>
<td id="LC69" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> (if static = False) or a list of integers (if static = True).</span></td>
</tr>
<tr>
<td id="L70" class="blob-num js-line-number" data-line-number="70"></td>
<td id="LC70" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L71" class="blob-num js-line-number" data-line-number="71"></td>
<td id="LC71" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L72" class="blob-num js-line-number" data-line-number="72"></td>
<td id="LC72" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> prefix: The prefix; usually the batch size (and/or time step size).</span></td>
</tr>
<tr>
<td id="L73" class="blob-num js-line-number" data-line-number="73"></td>
<td id="LC73" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> (TensorShape, int, or Tensor.)</span></td>
</tr>
<tr>
<td id="L74" class="blob-num js-line-number" data-line-number="74"></td>
<td id="LC74" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> suffix: TensorShape, int, or Tensor.</span></td>
</tr>
<tr>
<td id="L75" class="blob-num js-line-number" data-line-number="75"></td>
<td id="LC75" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> static: If `True`, return a python list with possibly unknown dimensions.</span></td>
</tr>
<tr>
<td id="L76" class="blob-num js-line-number" data-line-number="76"></td>
<td id="LC76" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Otherwise return a `Tensor`.</span></td>
</tr>
<tr>
<td id="L77" class="blob-num js-line-number" data-line-number="77"></td>
<td id="LC77" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L78" class="blob-num js-line-number" data-line-number="78"></td>
<td id="LC78" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Returns:</span></td>
</tr>
<tr>
<td id="L79" class="blob-num js-line-number" data-line-number="79"></td>
<td id="LC79" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> shape: the concatenation of prefix and suffix.</span></td>
</tr>
<tr>
<td id="L80" class="blob-num js-line-number" data-line-number="80"></td>
<td id="LC80" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L81" class="blob-num js-line-number" data-line-number="81"></td>
<td id="LC81" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Raises:</span></td>
</tr>
<tr>
<td id="L82" class="blob-num js-line-number" data-line-number="82"></td>
<td id="LC82" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> ValueError: if `suffix` is not a scalar or vector (or TensorShape).</span></td>
</tr>
<tr>
<td id="L83" class="blob-num js-line-number" data-line-number="83"></td>
<td id="LC83" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> ValueError: if prefix or suffix was `None` and asked for dynamic</span></td>
</tr>
<tr>
<td id="L84" class="blob-num js-line-number" data-line-number="84"></td>
<td id="LC84" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Tensors out.</span></td>
</tr>
<tr>
<td id="L85" class="blob-num js-line-number" data-line-number="85"></td>
<td id="LC85" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L86" class="blob-num js-line-number" data-line-number="86"></td>
<td id="LC86" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">isinstance</span>(prefix, ops.Tensor):</td>
</tr>
<tr>
<td id="L87" class="blob-num js-line-number" data-line-number="87"></td>
<td id="LC87" class="blob-code blob-code-inner js-file-line"> p <span class="pl-k">=</span> prefix</td>
</tr>
<tr>
<td id="L88" class="blob-num js-line-number" data-line-number="88"></td>
<td id="LC88" class="blob-code blob-code-inner js-file-line"> p_static <span class="pl-k">=</span> tensor_util.constant_value(prefix)</td>
</tr>
<tr>
<td id="L89" class="blob-num js-line-number" data-line-number="89"></td>
<td id="LC89" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> p.shape.ndims <span class="pl-k">==</span> <span class="pl-c1">0</span>:</td>
</tr>
<tr>
<td id="L90" class="blob-num js-line-number" data-line-number="90"></td>
<td id="LC90" class="blob-code blob-code-inner js-file-line"> p <span class="pl-k">=</span> array_ops.expand_dims(p, <span class="pl-c1">0</span>)</td>
</tr>
<tr>
<td id="L91" class="blob-num js-line-number" data-line-number="91"></td>
<td id="LC91" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">elif</span> p.shape.ndims <span class="pl-k">!=</span> <span class="pl-c1">1</span>:</td>
</tr>
<tr>
<td id="L92" class="blob-num js-line-number" data-line-number="92"></td>
<td id="LC92" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>prefix tensor must be either a scalar or vector, <span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L93" class="blob-num js-line-number" data-line-number="93"></td>
<td id="LC93" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>but saw tensor: <span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> p)</td>
</tr>
<tr>
<td id="L94" class="blob-num js-line-number" data-line-number="94"></td>
<td id="LC94" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L95" class="blob-num js-line-number" data-line-number="95"></td>
<td id="LC95" class="blob-code blob-code-inner js-file-line"> p <span class="pl-k">=</span> tensor_shape.as_shape(prefix)</td>
</tr>
<tr>
<td id="L96" class="blob-num js-line-number" data-line-number="96"></td>
<td id="LC96" class="blob-code blob-code-inner js-file-line"> p_static <span class="pl-k">=</span> p.as_list() <span class="pl-k">if</span> p.ndims <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span> <span class="pl-k">else</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L97" class="blob-num js-line-number" data-line-number="97"></td>
<td id="LC97" class="blob-code blob-code-inner js-file-line"> p <span class="pl-k">=</span> (constant_op.constant(p.as_list(), <span class="pl-v">dtype</span><span class="pl-k">=</span>dtypes.int32)</td>
</tr>
<tr>
<td id="L98" class="blob-num js-line-number" data-line-number="98"></td>
<td id="LC98" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> p.is_fully_defined() <span class="pl-k">else</span> <span class="pl-c1">None</span>)</td>
</tr>
<tr>
<td id="L99" class="blob-num js-line-number" data-line-number="99"></td>
<td id="LC99" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">isinstance</span>(suffix, ops.Tensor):</td>
</tr>
<tr>
<td id="L100" class="blob-num js-line-number" data-line-number="100"></td>
<td id="LC100" class="blob-code blob-code-inner js-file-line"> s <span class="pl-k">=</span> suffix</td>
</tr>
<tr>
<td id="L101" class="blob-num js-line-number" data-line-number="101"></td>
<td id="LC101" class="blob-code blob-code-inner js-file-line"> s_static <span class="pl-k">=</span> tensor_util.constant_value(suffix)</td>
</tr>
<tr>
<td id="L102" class="blob-num js-line-number" data-line-number="102"></td>
<td id="LC102" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> s.shape.ndims <span class="pl-k">==</span> <span class="pl-c1">0</span>:</td>
</tr>
<tr>
<td id="L103" class="blob-num js-line-number" data-line-number="103"></td>
<td id="LC103" class="blob-code blob-code-inner js-file-line"> s <span class="pl-k">=</span> array_ops.expand_dims(s, <span class="pl-c1">0</span>)</td>
</tr>
<tr>
<td id="L104" class="blob-num js-line-number" data-line-number="104"></td>
<td id="LC104" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">elif</span> s.shape.ndims <span class="pl-k">!=</span> <span class="pl-c1">1</span>:</td>
</tr>
<tr>
<td id="L105" class="blob-num js-line-number" data-line-number="105"></td>
<td id="LC105" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>suffix tensor must be either a scalar or vector, <span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L106" class="blob-num js-line-number" data-line-number="106"></td>
<td id="LC106" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>but saw tensor: <span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> s)</td>
</tr>
<tr>
<td id="L107" class="blob-num js-line-number" data-line-number="107"></td>
<td id="LC107" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L108" class="blob-num js-line-number" data-line-number="108"></td>
<td id="LC108" class="blob-code blob-code-inner js-file-line"> s <span class="pl-k">=</span> tensor_shape.as_shape(suffix)</td>
</tr>
<tr>
<td id="L109" class="blob-num js-line-number" data-line-number="109"></td>
<td id="LC109" class="blob-code blob-code-inner js-file-line"> s_static <span class="pl-k">=</span> s.as_list() <span class="pl-k">if</span> s.ndims <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span> <span class="pl-k">else</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L110" class="blob-num js-line-number" data-line-number="110"></td>
<td id="LC110" class="blob-code blob-code-inner js-file-line"> s <span class="pl-k">=</span> (constant_op.constant(s.as_list(), <span class="pl-v">dtype</span><span class="pl-k">=</span>dtypes.int32)</td>
</tr>
<tr>
<td id="L111" class="blob-num js-line-number" data-line-number="111"></td>
<td id="LC111" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> s.is_fully_defined() <span class="pl-k">else</span> <span class="pl-c1">None</span>)</td>
</tr>
<tr>
<td id="L112" class="blob-num js-line-number" data-line-number="112"></td>
<td id="LC112" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L113" class="blob-num js-line-number" data-line-number="113"></td>
<td id="LC113" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> static:</td>
</tr>
<tr>
<td id="L114" class="blob-num js-line-number" data-line-number="114"></td>
<td id="LC114" class="blob-code blob-code-inner js-file-line"> shape <span class="pl-k">=</span> tensor_shape.as_shape(p_static).concatenate(s_static)</td>
</tr>
<tr>
<td id="L115" class="blob-num js-line-number" data-line-number="115"></td>
<td id="LC115" class="blob-code blob-code-inner js-file-line"> shape <span class="pl-k">=</span> shape.as_list() <span class="pl-k">if</span> shape.ndims <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span> <span class="pl-k">else</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L116" class="blob-num js-line-number" data-line-number="116"></td>
<td id="LC116" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L117" class="blob-num js-line-number" data-line-number="117"></td>
<td id="LC117" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> p <span class="pl-k">is</span> <span class="pl-c1">None</span> <span class="pl-k">or</span> s <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L118" class="blob-num js-line-number" data-line-number="118"></td>
<td id="LC118" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Provided a prefix or suffix of None: <span class="pl-c1">%s</span> and <span class="pl-c1">%s</span><span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L119" class="blob-num js-line-number" data-line-number="119"></td>
<td id="LC119" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">%</span> (prefix, suffix))</td>
</tr>
<tr>
<td id="L120" class="blob-num js-line-number" data-line-number="120"></td>
<td id="LC120" class="blob-code blob-code-inner js-file-line"> shape <span class="pl-k">=</span> array_ops.concat((p, s), <span class="pl-c1">0</span>)</td>
</tr>
<tr>
<td id="L121" class="blob-num js-line-number" data-line-number="121"></td>
<td id="LC121" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> shape</td>
</tr>
<tr>
<td id="L122" class="blob-num js-line-number" data-line-number="122"></td>
<td id="LC122" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L123" class="blob-num js-line-number" data-line-number="123"></td>
<td id="LC123" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L124" class="blob-num js-line-number" data-line-number="124"></td>
<td id="LC124" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">_zero_state_tensors</span>(<span class="pl-smi">state_size</span>, <span class="pl-smi">batch_size</span>, <span class="pl-smi">dtype</span>):</td>
</tr>
<tr>
<td id="L125" class="blob-num js-line-number" data-line-number="125"></td>
<td id="LC125" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Create tensors of zeros based on state_size, batch_size, and dtype.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L126" class="blob-num js-line-number" data-line-number="126"></td>
<td id="LC126" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">get_state_shape</span>(<span class="pl-smi">s</span>):</td>
</tr>
<tr>
<td id="L127" class="blob-num js-line-number" data-line-number="127"></td>
<td id="LC127" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Combine s with batch_size to get a proper tensor shape.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L128" class="blob-num js-line-number" data-line-number="128"></td>
<td id="LC128" class="blob-code blob-code-inner js-file-line"> c <span class="pl-k">=</span> _concat(batch_size, s)</td>
</tr>
<tr>
<td id="L129" class="blob-num js-line-number" data-line-number="129"></td>
<td id="LC129" class="blob-code blob-code-inner js-file-line"> size <span class="pl-k">=</span> array_ops.zeros(c, <span class="pl-v">dtype</span><span class="pl-k">=</span>dtype)</td>
</tr>
<tr>
<td id="L130" class="blob-num js-line-number" data-line-number="130"></td>
<td id="LC130" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> context.in_graph_mode():</td>
</tr>
<tr>
<td id="L131" class="blob-num js-line-number" data-line-number="131"></td>
<td id="LC131" class="blob-code blob-code-inner js-file-line"> c_static <span class="pl-k">=</span> _concat(batch_size, s, <span class="pl-v">static</span><span class="pl-k">=</span><span class="pl-c1">True</span>)</td>
</tr>
<tr>
<td id="L132" class="blob-num js-line-number" data-line-number="132"></td>
<td id="LC132" class="blob-code blob-code-inner js-file-line"> size.set_shape(c_static)</td>
</tr>
<tr>
<td id="L133" class="blob-num js-line-number" data-line-number="133"></td>
<td id="LC133" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> size</td>
</tr>
<tr>
<td id="L134" class="blob-num js-line-number" data-line-number="134"></td>
<td id="LC134" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> nest.map_structure(get_state_shape, state_size)</td>
</tr>
<tr>
<td id="L135" class="blob-num js-line-number" data-line-number="135"></td>
<td id="LC135" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L136" class="blob-num js-line-number" data-line-number="136"></td>
<td id="LC136" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L137" class="blob-num js-line-number" data-line-number="137"></td>
<td id="LC137" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.RNNCell<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L138" class="blob-num js-line-number" data-line-number="138"></td>
<td id="LC138" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">RNNCell</span>(<span class="pl-e">base_layer</span>.<span class="pl-e">Layer</span>):</td>
</tr>
<tr>
<td id="L139" class="blob-num js-line-number" data-line-number="139"></td>
<td id="LC139" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Abstract object representing an RNN cell.</span></td>
</tr>
<tr>
<td id="L140" class="blob-num js-line-number" data-line-number="140"></td>
<td id="LC140" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L141" class="blob-num js-line-number" data-line-number="141"></td>
<td id="LC141" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Every `RNNCell` must have the properties below and implement `call` with</span></td>
</tr>
<tr>
<td id="L142" class="blob-num js-line-number" data-line-number="142"></td>
<td id="LC142" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the signature `(output, next_state) = call(input, state)`. The optional</span></td>
</tr>
<tr>
<td id="L143" class="blob-num js-line-number" data-line-number="143"></td>
<td id="LC143" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> third input argument, `scope`, is allowed for backwards compatibility</span></td>
</tr>
<tr>
<td id="L144" class="blob-num js-line-number" data-line-number="144"></td>
<td id="LC144" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> purposes; but should be left off for new subclasses.</span></td>
</tr>
<tr>
<td id="L145" class="blob-num js-line-number" data-line-number="145"></td>
<td id="LC145" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L146" class="blob-num js-line-number" data-line-number="146"></td>
<td id="LC146" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> This definition of cell differs from the definition used in the literature.</span></td>
</tr>
<tr>
<td id="L147" class="blob-num js-line-number" data-line-number="147"></td>
<td id="LC147" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> In the literature, 'cell' refers to an object with a single scalar output.</span></td>
</tr>
<tr>
<td id="L148" class="blob-num js-line-number" data-line-number="148"></td>
<td id="LC148" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> This definition refers to a horizontal array of such units.</span></td>
</tr>
<tr>
<td id="L149" class="blob-num js-line-number" data-line-number="149"></td>
<td id="LC149" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L150" class="blob-num js-line-number" data-line-number="150"></td>
<td id="LC150" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> An RNN cell, in the most abstract setting, is anything that has</span></td>
</tr>
<tr>
<td id="L151" class="blob-num js-line-number" data-line-number="151"></td>
<td id="LC151" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> a state and performs some operation that takes a matrix of inputs.</span></td>
</tr>
<tr>
<td id="L152" class="blob-num js-line-number" data-line-number="152"></td>
<td id="LC152" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> This operation results in an output matrix with `self.output_size` columns.</span></td>
</tr>
<tr>
<td id="L153" class="blob-num js-line-number" data-line-number="153"></td>
<td id="LC153" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> If `self.state_size` is an integer, this operation also results in a new</span></td>
</tr>
<tr>
<td id="L154" class="blob-num js-line-number" data-line-number="154"></td>
<td id="LC154" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state matrix with `self.state_size` columns. If `self.state_size` is a</span></td>
</tr>
<tr>
<td id="L155" class="blob-num js-line-number" data-line-number="155"></td>
<td id="LC155" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> (possibly nested tuple of) TensorShape object(s), then it should return a</span></td>
</tr>
<tr>
<td id="L156" class="blob-num js-line-number" data-line-number="156"></td>
<td id="LC156" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> matching structure of Tensors having shape `[batch_size].concatenate(s)`</span></td>
</tr>
<tr>
<td id="L157" class="blob-num js-line-number" data-line-number="157"></td>
<td id="LC157" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> for each `s` in `self.batch_size`.</span></td>
</tr>
<tr>
<td id="L158" class="blob-num js-line-number" data-line-number="158"></td>
<td id="LC158" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L159" class="blob-num js-line-number" data-line-number="159"></td>
<td id="LC159" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L160" class="blob-num js-line-number" data-line-number="160"></td>
<td id="LC160" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__call__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>, <span class="pl-smi">scope</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L161" class="blob-num js-line-number" data-line-number="161"></td>
<td id="LC161" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Run this RNN cell on inputs, starting from the given state.</span></td>
</tr>
<tr>
<td id="L162" class="blob-num js-line-number" data-line-number="162"></td>
<td id="LC162" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L163" class="blob-num js-line-number" data-line-number="163"></td>
<td id="LC163" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L164" class="blob-num js-line-number" data-line-number="164"></td>
<td id="LC164" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> inputs: `2-D` tensor with shape `[batch_size, input_size]`.</span></td>
</tr>
<tr>
<td id="L165" class="blob-num js-line-number" data-line-number="165"></td>
<td id="LC165" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state: if `self.state_size` is an integer, this should be a `2-D Tensor`</span></td>
</tr>
<tr>
<td id="L166" class="blob-num js-line-number" data-line-number="166"></td>
<td id="LC166" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> with shape `[batch_size, self.state_size]`. Otherwise, if</span></td>
</tr>
<tr>
<td id="L167" class="blob-num js-line-number" data-line-number="167"></td>
<td id="LC167" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `self.state_size` is a tuple of integers, this should be a tuple</span></td>
</tr>
<tr>
<td id="L168" class="blob-num js-line-number" data-line-number="168"></td>
<td id="LC168" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> with shapes `[batch_size, s] for s in self.state_size`.</span></td>
</tr>
<tr>
<td id="L169" class="blob-num js-line-number" data-line-number="169"></td>
<td id="LC169" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> scope: VariableScope for the created subgraph; defaults to class name.</span></td>
</tr>
<tr>
<td id="L170" class="blob-num js-line-number" data-line-number="170"></td>
<td id="LC170" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L171" class="blob-num js-line-number" data-line-number="171"></td>
<td id="LC171" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Returns:</span></td>
</tr>
<tr>
<td id="L172" class="blob-num js-line-number" data-line-number="172"></td>
<td id="LC172" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> A pair containing:</span></td>
</tr>
<tr>
<td id="L173" class="blob-num js-line-number" data-line-number="173"></td>
<td id="LC173" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L174" class="blob-num js-line-number" data-line-number="174"></td>
<td id="LC174" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.</span></td>
</tr>
<tr>
<td id="L175" class="blob-num js-line-number" data-line-number="175"></td>
<td id="LC175" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> - New state: Either a single `2-D` tensor, or a tuple of tensors matching</span></td>
</tr>
<tr>
<td id="L176" class="blob-num js-line-number" data-line-number="176"></td>
<td id="LC176" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the arity and shapes of `state`.</span></td>
</tr>
<tr>
<td id="L177" class="blob-num js-line-number" data-line-number="177"></td>
<td id="LC177" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L178" class="blob-num js-line-number" data-line-number="178"></td>
<td id="LC178" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> scope <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L179" class="blob-num js-line-number" data-line-number="179"></td>
<td id="LC179" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> vs.variable_scope(scope,</td>
</tr>
<tr>
<td id="L180" class="blob-num js-line-number" data-line-number="180"></td>
<td id="LC180" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">custom_getter</span><span class="pl-k">=</span><span class="pl-c1">self</span>._rnn_get_variable) <span class="pl-k">as</span> scope:</td>
</tr>
<tr>
<td id="L181" class="blob-num js-line-number" data-line-number="181"></td>
<td id="LC181" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">super</span>(RNNCell, <span class="pl-c1">self</span>).<span class="pl-c1">__call__</span>(inputs, state, <span class="pl-v">scope</span><span class="pl-k">=</span>scope)</td>
</tr>
<tr>
<td id="L182" class="blob-num js-line-number" data-line-number="182"></td>
<td id="LC182" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L183" class="blob-num js-line-number" data-line-number="183"></td>
<td id="LC183" class="blob-code blob-code-inner js-file-line"> scope_attrname <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">"</span>rnncell_scope<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L184" class="blob-num js-line-number" data-line-number="184"></td>
<td id="LC184" class="blob-code blob-code-inner js-file-line"> scope <span class="pl-k">=</span> <span class="pl-c1">getattr</span>(<span class="pl-c1">self</span>, scope_attrname, <span class="pl-c1">None</span>)</td>
</tr>
<tr>
<td id="L185" class="blob-num js-line-number" data-line-number="185"></td>
<td id="LC185" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> scope <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L186" class="blob-num js-line-number" data-line-number="186"></td>
<td id="LC186" class="blob-code blob-code-inner js-file-line"> scope <span class="pl-k">=</span> vs.variable_scope(vs.get_variable_scope(),</td>
</tr>
<tr>
<td id="L187" class="blob-num js-line-number" data-line-number="187"></td>
<td id="LC187" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">custom_getter</span><span class="pl-k">=</span><span class="pl-c1">self</span>._rnn_get_variable)</td>
</tr>
<tr>
<td id="L188" class="blob-num js-line-number" data-line-number="188"></td>
<td id="LC188" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">setattr</span>(<span class="pl-c1">self</span>, scope_attrname, scope)</td>
</tr>
<tr>
<td id="L189" class="blob-num js-line-number" data-line-number="189"></td>
<td id="LC189" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> scope:</td>
</tr>
<tr>
<td id="L190" class="blob-num js-line-number" data-line-number="190"></td>
<td id="LC190" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">super</span>(RNNCell, <span class="pl-c1">self</span>).<span class="pl-c1">__call__</span>(inputs, state)</td>
</tr>
<tr>
<td id="L191" class="blob-num js-line-number" data-line-number="191"></td>
<td id="LC191" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L192" class="blob-num js-line-number" data-line-number="192"></td>
<td id="LC192" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_rnn_get_variable</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">getter</span>, <span class="pl-k">*</span><span class="pl-smi">args</span>, <span class="pl-k">**</span><span class="pl-smi">kwargs</span>):</td>
</tr>
<tr>
<td id="L193" class="blob-num js-line-number" data-line-number="193"></td>
<td id="LC193" class="blob-code blob-code-inner js-file-line"> variable <span class="pl-k">=</span> getter(<span class="pl-k">*</span>args, <span class="pl-k">**</span>kwargs)</td>
</tr>
<tr>
<td id="L194" class="blob-num js-line-number" data-line-number="194"></td>
<td id="LC194" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> context.in_graph_mode():</td>
</tr>
<tr>
<td id="L195" class="blob-num js-line-number" data-line-number="195"></td>
<td id="LC195" class="blob-code blob-code-inner js-file-line"> trainable <span class="pl-k">=</span> (variable <span class="pl-k">in</span> tf_variables.trainable_variables() <span class="pl-k">or</span></td>
</tr>
<tr>
<td id="L196" class="blob-num js-line-number" data-line-number="196"></td>
<td id="LC196" class="blob-code blob-code-inner js-file-line"> (<span class="pl-c1">isinstance</span>(variable, tf_variables.PartitionedVariable) <span class="pl-k">and</span></td>
</tr>
<tr>
<td id="L197" class="blob-num js-line-number" data-line-number="197"></td>
<td id="LC197" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">list</span>(variable)[<span class="pl-c1">0</span>] <span class="pl-k">in</span> tf_variables.trainable_variables()))</td>
</tr>
<tr>
<td id="L198" class="blob-num js-line-number" data-line-number="198"></td>
<td id="LC198" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L199" class="blob-num js-line-number" data-line-number="199"></td>
<td id="LC199" class="blob-code blob-code-inner js-file-line"> trainable <span class="pl-k">=</span> variable._trainable <span class="pl-c"><span class="pl-c">#</span> pylint: disable=protected-access</span></td>
</tr>
<tr>
<td id="L200" class="blob-num js-line-number" data-line-number="200"></td>
<td id="LC200" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> trainable <span class="pl-k">and</span> variable <span class="pl-k">not</span> <span class="pl-k">in</span> <span class="pl-c1">self</span>._trainable_weights:</td>
</tr>
<tr>
<td id="L201" class="blob-num js-line-number" data-line-number="201"></td>
<td id="LC201" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._trainable_weights.append(variable)</td>
</tr>
<tr>
<td id="L202" class="blob-num js-line-number" data-line-number="202"></td>
<td id="LC202" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">elif</span> <span class="pl-k">not</span> trainable <span class="pl-k">and</span> variable <span class="pl-k">not</span> <span class="pl-k">in</span> <span class="pl-c1">self</span>._non_trainable_weights:</td>
</tr>
<tr>
<td id="L203" class="blob-num js-line-number" data-line-number="203"></td>
<td id="LC203" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._non_trainable_weights.append(variable)</td>
</tr>
<tr>
<td id="L204" class="blob-num js-line-number" data-line-number="204"></td>
<td id="LC204" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> variable</td>
</tr>
<tr>
<td id="L205" class="blob-num js-line-number" data-line-number="205"></td>
<td id="LC205" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L206" class="blob-num js-line-number" data-line-number="206"></td>
<td id="LC206" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L207" class="blob-num js-line-number" data-line-number="207"></td>
<td id="LC207" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L208" class="blob-num js-line-number" data-line-number="208"></td>
<td id="LC208" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>size(s) of state(s) used by this cell.</span></td>
</tr>
<tr>
<td id="L209" class="blob-num js-line-number" data-line-number="209"></td>
<td id="LC209" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L210" class="blob-num js-line-number" data-line-number="210"></td>
<td id="LC210" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> It can be represented by an Integer, a TensorShape or a tuple of Integers</span></td>
</tr>
<tr>
<td id="L211" class="blob-num js-line-number" data-line-number="211"></td>
<td id="LC211" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> or TensorShapes.</span></td>
</tr>
<tr>
<td id="L212" class="blob-num js-line-number" data-line-number="212"></td>
<td id="LC212" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L213" class="blob-num js-line-number" data-line-number="213"></td>
<td id="LC213" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">NotImplementedError</span>(<span class="pl-s"><span class="pl-pds">"</span>Abstract method<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L214" class="blob-num js-line-number" data-line-number="214"></td>
<td id="LC214" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L215" class="blob-num js-line-number" data-line-number="215"></td>
<td id="LC215" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L216" class="blob-num js-line-number" data-line-number="216"></td>
<td id="LC216" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L217" class="blob-num js-line-number" data-line-number="217"></td>
<td id="LC217" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Integer or TensorShape: size of outputs produced by this cell.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L218" class="blob-num js-line-number" data-line-number="218"></td>
<td id="LC218" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">NotImplementedError</span>(<span class="pl-s"><span class="pl-pds">"</span>Abstract method<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L219" class="blob-num js-line-number" data-line-number="219"></td>
<td id="LC219" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L220" class="blob-num js-line-number" data-line-number="220"></td>
<td id="LC220" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">build</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">_</span>):</td>
</tr>
<tr>
<td id="L221" class="blob-num js-line-number" data-line-number="221"></td>
<td id="LC221" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> This tells the parent Layer object that it's OK to call</span></td>
</tr>
<tr>
<td id="L222" class="blob-num js-line-number" data-line-number="222"></td>
<td id="LC222" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> self.add_variable() inside the call() method.</span></td>
</tr>
<tr>
<td id="L223" class="blob-num js-line-number" data-line-number="223"></td>
<td id="LC223" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">pass</span></td>
</tr>
<tr>
<td id="L224" class="blob-num js-line-number" data-line-number="224"></td>
<td id="LC224" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L225" class="blob-num js-line-number" data-line-number="225"></td>
<td id="LC225" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">zero_state</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">batch_size</span>, <span class="pl-smi">dtype</span>):</td>
</tr>
<tr>
<td id="L226" class="blob-num js-line-number" data-line-number="226"></td>
<td id="LC226" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Return zero-filled state tensor(s).</span></td>
</tr>
<tr>
<td id="L227" class="blob-num js-line-number" data-line-number="227"></td>
<td id="LC227" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L228" class="blob-num js-line-number" data-line-number="228"></td>
<td id="LC228" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L229" class="blob-num js-line-number" data-line-number="229"></td>
<td id="LC229" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> batch_size: int, float, or unit Tensor representing the batch size.</span></td>
</tr>
<tr>
<td id="L230" class="blob-num js-line-number" data-line-number="230"></td>
<td id="LC230" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> dtype: the data type to use for the state.</span></td>
</tr>
<tr>
<td id="L231" class="blob-num js-line-number" data-line-number="231"></td>
<td id="LC231" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L232" class="blob-num js-line-number" data-line-number="232"></td>
<td id="LC232" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Returns:</span></td>
</tr>
<tr>
<td id="L233" class="blob-num js-line-number" data-line-number="233"></td>
<td id="LC233" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> If `state_size` is an int or TensorShape, then the return value is a</span></td>
</tr>
<tr>
<td id="L234" class="blob-num js-line-number" data-line-number="234"></td>
<td id="LC234" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `N-D` tensor of shape `[batch_size, state_size]` filled with zeros.</span></td>
</tr>
<tr>
<td id="L235" class="blob-num js-line-number" data-line-number="235"></td>
<td id="LC235" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L236" class="blob-num js-line-number" data-line-number="236"></td>
<td id="LC236" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> If `state_size` is a nested list or tuple, then the return value is</span></td>
</tr>
<tr>
<td id="L237" class="blob-num js-line-number" data-line-number="237"></td>
<td id="LC237" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> a nested list or tuple (of the same structure) of `2-D` tensors with</span></td>
</tr>
<tr>
<td id="L238" class="blob-num js-line-number" data-line-number="238"></td>
<td id="LC238" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the shapes `[batch_size, s]` for each s in `state_size`.</span></td>
</tr>
<tr>
<td id="L239" class="blob-num js-line-number" data-line-number="239"></td>
<td id="LC239" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L240" class="blob-num js-line-number" data-line-number="240"></td>
<td id="LC240" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Try to use the last cached zero_state. This is done to avoid recreating</span></td>
</tr>
<tr>
<td id="L241" class="blob-num js-line-number" data-line-number="241"></td>
<td id="LC241" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> zeros, especially when eager execution is enabled.</span></td>
</tr>
<tr>
<td id="L242" class="blob-num js-line-number" data-line-number="242"></td>
<td id="LC242" class="blob-code blob-code-inner js-file-line"> state_size <span class="pl-k">=</span> <span class="pl-c1">self</span>.state_size</td>
</tr>
<tr>
<td id="L243" class="blob-num js-line-number" data-line-number="243"></td>
<td id="LC243" class="blob-code blob-code-inner js-file-line"> is_eager <span class="pl-k">=</span> context.in_eager_mode()</td>
</tr>
<tr>
<td id="L244" class="blob-num js-line-number" data-line-number="244"></td>
<td id="LC244" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> is_eager <span class="pl-k">and</span> <span class="pl-c1">hasattr</span>(<span class="pl-c1">self</span>, <span class="pl-s"><span class="pl-pds">"</span>_last_zero_state<span class="pl-pds">"</span></span>):</td>
</tr>
<tr>
<td id="L245" class="blob-num js-line-number" data-line-number="245"></td>
<td id="LC245" class="blob-code blob-code-inner js-file-line"> (last_state_size, last_batch_size, last_dtype,</td>
</tr>
<tr>
<td id="L246" class="blob-num js-line-number" data-line-number="246"></td>
<td id="LC246" class="blob-code blob-code-inner js-file-line"> last_output) <span class="pl-k">=</span> <span class="pl-c1">getattr</span>(<span class="pl-c1">self</span>, <span class="pl-s"><span class="pl-pds">"</span>_last_zero_state<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L247" class="blob-num js-line-number" data-line-number="247"></td>
<td id="LC247" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> (last_batch_size <span class="pl-k">==</span> batch_size <span class="pl-k">and</span></td>
</tr>
<tr>
<td id="L248" class="blob-num js-line-number" data-line-number="248"></td>
<td id="LC248" class="blob-code blob-code-inner js-file-line"> last_dtype <span class="pl-k">==</span> dtype <span class="pl-k">and</span></td>
</tr>
<tr>
<td id="L249" class="blob-num js-line-number" data-line-number="249"></td>
<td id="LC249" class="blob-code blob-code-inner js-file-line"> last_state_size <span class="pl-k">==</span> state_size):</td>
</tr>
<tr>
<td id="L250" class="blob-num js-line-number" data-line-number="250"></td>
<td id="LC250" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> last_output</td>
</tr>
<tr>
<td id="L251" class="blob-num js-line-number" data-line-number="251"></td>
<td id="LC251" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> ops.name_scope(<span class="pl-c1">type</span>(<span class="pl-c1">self</span>).<span class="pl-c1">__name__</span> <span class="pl-k">+</span> <span class="pl-s"><span class="pl-pds">"</span>ZeroState<span class="pl-pds">"</span></span>, <span class="pl-v">values</span><span class="pl-k">=</span>[batch_size]):</td>
</tr>
<tr>
<td id="L252" class="blob-num js-line-number" data-line-number="252"></td>
<td id="LC252" class="blob-code blob-code-inner js-file-line"> output <span class="pl-k">=</span> _zero_state_tensors(state_size, batch_size, dtype)</td>
</tr>
<tr>
<td id="L253" class="blob-num js-line-number" data-line-number="253"></td>
<td id="LC253" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> is_eager:</td>
</tr>
<tr>
<td id="L254" class="blob-num js-line-number" data-line-number="254"></td>
<td id="LC254" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._last_zero_state <span class="pl-k">=</span> (state_size, batch_size, dtype, output)</td>
</tr>
<tr>
<td id="L255" class="blob-num js-line-number" data-line-number="255"></td>
<td id="LC255" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> output</td>
</tr>
<tr>
<td id="L256" class="blob-num js-line-number" data-line-number="256"></td>
<td id="LC256" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L257" class="blob-num js-line-number" data-line-number="257"></td>
<td id="LC257" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L258" class="blob-num js-line-number" data-line-number="258"></td>
<td id="LC258" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">_LayerRNNCell</span>(<span class="pl-e">RNNCell</span>):</td>
</tr>
<tr>
<td id="L259" class="blob-num js-line-number" data-line-number="259"></td>
<td id="LC259" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Subclass of RNNCells that act like proper `tf.Layer` objects.</span></td>
</tr>
<tr>
<td id="L260" class="blob-num js-line-number" data-line-number="260"></td>
<td id="LC260" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L261" class="blob-num js-line-number" data-line-number="261"></td>
<td id="LC261" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> For backwards compatibility purposes, most `RNNCell` instances allow their</span></td>
</tr>
<tr>
<td id="L262" class="blob-num js-line-number" data-line-number="262"></td>
<td id="LC262" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `call` methods to instantiate variables via `tf.get_variable`. The underlying</span></td>
</tr>
<tr>
<td id="L263" class="blob-num js-line-number" data-line-number="263"></td>
<td id="LC263" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> variable scope thus keeps track of any variables, and returning cached</span></td>
</tr>
<tr>
<td id="L264" class="blob-num js-line-number" data-line-number="264"></td>
<td id="LC264" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> versions. This is atypical of `tf.layer` objects, which separate this</span></td>
</tr>
<tr>
<td id="L265" class="blob-num js-line-number" data-line-number="265"></td>
<td id="LC265" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> part of layer building into a `build` method that is only called once.</span></td>
</tr>
<tr>
<td id="L266" class="blob-num js-line-number" data-line-number="266"></td>
<td id="LC266" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L267" class="blob-num js-line-number" data-line-number="267"></td>
<td id="LC267" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Here we provide a subclass for `RNNCell` objects that act exactly as</span></td>
</tr>
<tr>
<td id="L268" class="blob-num js-line-number" data-line-number="268"></td>
<td id="LC268" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `Layer` objects do. They must provide a `build` method and their</span></td>
</tr>
<tr>
<td id="L269" class="blob-num js-line-number" data-line-number="269"></td>
<td id="LC269" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `call` methods do not access Variables `tf.get_variable`.</span></td>
</tr>
<tr>
<td id="L270" class="blob-num js-line-number" data-line-number="270"></td>
<td id="LC270" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L271" class="blob-num js-line-number" data-line-number="271"></td>
<td id="LC271" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L272" class="blob-num js-line-number" data-line-number="272"></td>
<td id="LC272" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__call__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>, <span class="pl-smi">scope</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-k">*</span><span class="pl-smi">args</span>, <span class="pl-k">**</span><span class="pl-smi">kwargs</span>):</td>
</tr>
<tr>
<td id="L273" class="blob-num js-line-number" data-line-number="273"></td>
<td id="LC273" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Run this RNN cell on inputs, starting from the given state.</span></td>
</tr>
<tr>
<td id="L274" class="blob-num js-line-number" data-line-number="274"></td>
<td id="LC274" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L275" class="blob-num js-line-number" data-line-number="275"></td>
<td id="LC275" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L276" class="blob-num js-line-number" data-line-number="276"></td>
<td id="LC276" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> inputs: `2-D` tensor with shape `[batch_size, input_size]`.</span></td>
</tr>
<tr>
<td id="L277" class="blob-num js-line-number" data-line-number="277"></td>
<td id="LC277" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state: if `self.state_size` is an integer, this should be a `2-D Tensor`</span></td>
</tr>
<tr>
<td id="L278" class="blob-num js-line-number" data-line-number="278"></td>
<td id="LC278" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> with shape `[batch_size, self.state_size]`. Otherwise, if</span></td>
</tr>
<tr>
<td id="L279" class="blob-num js-line-number" data-line-number="279"></td>
<td id="LC279" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `self.state_size` is a tuple of integers, this should be a tuple</span></td>
</tr>
<tr>
<td id="L280" class="blob-num js-line-number" data-line-number="280"></td>
<td id="LC280" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> with shapes `[batch_size, s] for s in self.state_size`.</span></td>
</tr>
<tr>
<td id="L281" class="blob-num js-line-number" data-line-number="281"></td>
<td id="LC281" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> scope: optional cell scope.</span></td>
</tr>
<tr>
<td id="L282" class="blob-num js-line-number" data-line-number="282"></td>
<td id="LC282" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> *args: Additional positional arguments.</span></td>
</tr>
<tr>
<td id="L283" class="blob-num js-line-number" data-line-number="283"></td>
<td id="LC283" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> **kwargs: Additional keyword arguments.</span></td>
</tr>
<tr>
<td id="L284" class="blob-num js-line-number" data-line-number="284"></td>
<td id="LC284" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L285" class="blob-num js-line-number" data-line-number="285"></td>
<td id="LC285" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Returns:</span></td>
</tr>
<tr>
<td id="L286" class="blob-num js-line-number" data-line-number="286"></td>
<td id="LC286" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> A pair containing:</span></td>
</tr>
<tr>
<td id="L287" class="blob-num js-line-number" data-line-number="287"></td>
<td id="LC287" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L288" class="blob-num js-line-number" data-line-number="288"></td>
<td id="LC288" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.</span></td>
</tr>
<tr>
<td id="L289" class="blob-num js-line-number" data-line-number="289"></td>
<td id="LC289" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> - New state: Either a single `2-D` tensor, or a tuple of tensors matching</span></td>
</tr>
<tr>
<td id="L290" class="blob-num js-line-number" data-line-number="290"></td>
<td id="LC290" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the arity and shapes of `state`.</span></td>
</tr>
<tr>
<td id="L291" class="blob-num js-line-number" data-line-number="291"></td>
<td id="LC291" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L292" class="blob-num js-line-number" data-line-number="292"></td>
<td id="LC292" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Bypass RNNCell's variable capturing semantics for LayerRNNCell.</span></td>
</tr>
<tr>
<td id="L293" class="blob-num js-line-number" data-line-number="293"></td>
<td id="LC293" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Instead, it is up to subclasses to provide a proper build</span></td>
</tr>
<tr>
<td id="L294" class="blob-num js-line-number" data-line-number="294"></td>
<td id="LC294" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> method. See the class docstring for more details.</span></td>
</tr>
<tr>
<td id="L295" class="blob-num js-line-number" data-line-number="295"></td>
<td id="LC295" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> base_layer.Layer.<span class="pl-c1">__call__</span>(<span class="pl-c1">self</span>, inputs, state, <span class="pl-v">scope</span><span class="pl-k">=</span>scope,</td>
</tr>
<tr>
<td id="L296" class="blob-num js-line-number" data-line-number="296"></td>
<td id="LC296" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">*</span>args, <span class="pl-k">**</span>kwargs)</td>
</tr>
<tr>
<td id="L297" class="blob-num js-line-number" data-line-number="297"></td>
<td id="LC297" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L298" class="blob-num js-line-number" data-line-number="298"></td>
<td id="LC298" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L299" class="blob-num js-line-number" data-line-number="299"></td>
<td id="LC299" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.BasicRNNCell<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L300" class="blob-num js-line-number" data-line-number="300"></td>
<td id="LC300" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">BasicRNNCell</span>(<span class="pl-e">_LayerRNNCell</span>):</td>
</tr>
<tr>
<td id="L301" class="blob-num js-line-number" data-line-number="301"></td>
<td id="LC301" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>The most basic RNN cell.</span></td>
</tr>
<tr>
<td id="L302" class="blob-num js-line-number" data-line-number="302"></td>
<td id="LC302" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L303" class="blob-num js-line-number" data-line-number="303"></td>
<td id="LC303" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L304" class="blob-num js-line-number" data-line-number="304"></td>
<td id="LC304" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> num_units: int, The number of units in the RNN cell.</span></td>
</tr>
<tr>
<td id="L305" class="blob-num js-line-number" data-line-number="305"></td>
<td id="LC305" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> activation: Nonlinearity to use. Default: `tanh`.</span></td>
</tr>
<tr>
<td id="L306" class="blob-num js-line-number" data-line-number="306"></td>
<td id="LC306" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> reuse: (optional) Python boolean describing whether to reuse variables</span></td>
</tr>
<tr>
<td id="L307" class="blob-num js-line-number" data-line-number="307"></td>
<td id="LC307" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> in an existing scope. If not `True`, and the existing scope already has</span></td>
</tr>
<tr>
<td id="L308" class="blob-num js-line-number" data-line-number="308"></td>
<td id="LC308" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the given variables, an error is raised.</span></td>
</tr>
<tr>
<td id="L309" class="blob-num js-line-number" data-line-number="309"></td>
<td id="LC309" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> name: String, the name of the layer. Layers with the same name will</span></td>
</tr>
<tr>
<td id="L310" class="blob-num js-line-number" data-line-number="310"></td>
<td id="LC310" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> share weights, but to avoid mistakes we require reuse=True in such</span></td>
</tr>
<tr>
<td id="L311" class="blob-num js-line-number" data-line-number="311"></td>
<td id="LC311" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cases.</span></td>
</tr>
<tr>
<td id="L312" class="blob-num js-line-number" data-line-number="312"></td>
<td id="LC312" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L313" class="blob-num js-line-number" data-line-number="313"></td>
<td id="LC313" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L314" class="blob-num js-line-number" data-line-number="314"></td>
<td id="LC314" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__init__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">num_units</span>, <span class="pl-smi">activation</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">reuse</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">name</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L315" class="blob-num js-line-number" data-line-number="315"></td>
<td id="LC315" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">super</span>(BasicRNNCell, <span class="pl-c1">self</span>).<span class="pl-c1">__init__</span>(<span class="pl-v">_reuse</span><span class="pl-k">=</span>reuse, <span class="pl-v">name</span><span class="pl-k">=</span>name)</td>
</tr>
<tr>
<td id="L316" class="blob-num js-line-number" data-line-number="316"></td>
<td id="LC316" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L317" class="blob-num js-line-number" data-line-number="317"></td>
<td id="LC317" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Inputs must be 2-dimensional.</span></td>
</tr>
<tr>
<td id="L318" class="blob-num js-line-number" data-line-number="318"></td>
<td id="LC318" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>.input_spec <span class="pl-k">=</span> base_layer.InputSpec(<span class="pl-v">ndim</span><span class="pl-k">=</span><span class="pl-c1">2</span>)</td>
</tr>
<tr>
<td id="L319" class="blob-num js-line-number" data-line-number="319"></td>
<td id="LC319" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L320" class="blob-num js-line-number" data-line-number="320"></td>
<td id="LC320" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._num_units <span class="pl-k">=</span> num_units</td>
</tr>
<tr>
<td id="L321" class="blob-num js-line-number" data-line-number="321"></td>
<td id="LC321" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._activation <span class="pl-k">=</span> activation <span class="pl-k">or</span> math_ops.tanh</td>
</tr>
<tr>
<td id="L322" class="blob-num js-line-number" data-line-number="322"></td>
<td id="LC322" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L323" class="blob-num js-line-number" data-line-number="323"></td>
<td id="LC323" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L324" class="blob-num js-line-number" data-line-number="324"></td>
<td id="LC324" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L325" class="blob-num js-line-number" data-line-number="325"></td>
<td id="LC325" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._num_units</td>
</tr>
<tr>
<td id="L326" class="blob-num js-line-number" data-line-number="326"></td>
<td id="LC326" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L327" class="blob-num js-line-number" data-line-number="327"></td>
<td id="LC327" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L328" class="blob-num js-line-number" data-line-number="328"></td>
<td id="LC328" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L329" class="blob-num js-line-number" data-line-number="329"></td>
<td id="LC329" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._num_units</td>
</tr>
<tr>
<td id="L330" class="blob-num js-line-number" data-line-number="330"></td>
<td id="LC330" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L331" class="blob-num js-line-number" data-line-number="331"></td>
<td id="LC331" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">build</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs_shape</span>):</td>
</tr>
<tr>
<td id="L332" class="blob-num js-line-number" data-line-number="332"></td>
<td id="LC332" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> inputs_shape[<span class="pl-c1">1</span>].value <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L333" class="blob-num js-line-number" data-line-number="333"></td>
<td id="LC333" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Expected inputs.shape[-1] to be known, saw shape: <span class="pl-c1">%s</span><span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L334" class="blob-num js-line-number" data-line-number="334"></td>
<td id="LC334" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">%</span> inputs_shape)</td>
</tr>
<tr>
<td id="L335" class="blob-num js-line-number" data-line-number="335"></td>
<td id="LC335" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L336" class="blob-num js-line-number" data-line-number="336"></td>
<td id="LC336" class="blob-code blob-code-inner js-file-line"> input_depth <span class="pl-k">=</span> inputs_shape[<span class="pl-c1">1</span>].value</td>
</tr>
<tr>
<td id="L337" class="blob-num js-line-number" data-line-number="337"></td>
<td id="LC337" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._kernel <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L338" class="blob-num js-line-number" data-line-number="338"></td>
<td id="LC338" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">_WEIGHTS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L339" class="blob-num js-line-number" data-line-number="339"></td>
<td id="LC339" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[input_depth <span class="pl-k">+</span> <span class="pl-c1">self</span>._num_units, <span class="pl-c1">self</span>._num_units])</td>
</tr>
<tr>
<td id="L340" class="blob-num js-line-number" data-line-number="340"></td>
<td id="LC340" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._bias <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L341" class="blob-num js-line-number" data-line-number="341"></td>
<td id="LC341" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">_BIAS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L342" class="blob-num js-line-number" data-line-number="342"></td>
<td id="LC342" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[<span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L343" class="blob-num js-line-number" data-line-number="343"></td>
<td id="LC343" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span>init_ops.zeros_initializer(<span class="pl-v">dtype</span><span class="pl-k">=</span><span class="pl-c1">self</span>.dtype))</td>
</tr>
<tr>
<td id="L344" class="blob-num js-line-number" data-line-number="344"></td>
<td id="LC344" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L345" class="blob-num js-line-number" data-line-number="345"></td>
<td id="LC345" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>.built <span class="pl-k">=</span> <span class="pl-c1">True</span></td>
</tr>
<tr>
<td id="L346" class="blob-num js-line-number" data-line-number="346"></td>
<td id="LC346" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L347" class="blob-num js-line-number" data-line-number="347"></td>
<td id="LC347" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">call</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>):</td>
</tr>
<tr>
<td id="L348" class="blob-num js-line-number" data-line-number="348"></td>
<td id="LC348" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Most basic RNN: output = new_state = act(W * input + U * state + B).<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L349" class="blob-num js-line-number" data-line-number="349"></td>
<td id="LC349" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L350" class="blob-num js-line-number" data-line-number="350"></td>
<td id="LC350" class="blob-code blob-code-inner js-file-line"> gate_inputs <span class="pl-k">=</span> math_ops.matmul(</td>
</tr>
<tr>
<td id="L351" class="blob-num js-line-number" data-line-number="351"></td>
<td id="LC351" class="blob-code blob-code-inner js-file-line"> array_ops.concat([inputs, state], <span class="pl-c1">1</span>), <span class="pl-c1">self</span>._kernel)</td>
</tr>
<tr>
<td id="L352" class="blob-num js-line-number" data-line-number="352"></td>
<td id="LC352" class="blob-code blob-code-inner js-file-line"> gate_inputs <span class="pl-k">=</span> nn_ops.bias_add(gate_inputs, <span class="pl-c1">self</span>._bias)</td>
</tr>
<tr>
<td id="L353" class="blob-num js-line-number" data-line-number="353"></td>
<td id="LC353" class="blob-code blob-code-inner js-file-line"> output <span class="pl-k">=</span> <span class="pl-c1">self</span>._activation(gate_inputs)</td>
</tr>
<tr>
<td id="L354" class="blob-num js-line-number" data-line-number="354"></td>
<td id="LC354" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> output, output</td>
</tr>
<tr>
<td id="L355" class="blob-num js-line-number" data-line-number="355"></td>
<td id="LC355" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L356" class="blob-num js-line-number" data-line-number="356"></td>
<td id="LC356" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L357" class="blob-num js-line-number" data-line-number="357"></td>
<td id="LC357" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.GRUCell<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L358" class="blob-num js-line-number" data-line-number="358"></td>
<td id="LC358" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">GRUCell</span>(<span class="pl-e">_LayerRNNCell</span>):</td>
</tr>
<tr>
<td id="L359" class="blob-num js-line-number" data-line-number="359"></td>
<td id="LC359" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).</span></td>
</tr>
<tr>
<td id="L360" class="blob-num js-line-number" data-line-number="360"></td>
<td id="LC360" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L361" class="blob-num js-line-number" data-line-number="361"></td>
<td id="LC361" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L362" class="blob-num js-line-number" data-line-number="362"></td>
<td id="LC362" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> num_units: int, The number of units in the GRU cell.</span></td>
</tr>
<tr>
<td id="L363" class="blob-num js-line-number" data-line-number="363"></td>
<td id="LC363" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> activation: Nonlinearity to use. Default: `tanh`.</span></td>
</tr>
<tr>
<td id="L364" class="blob-num js-line-number" data-line-number="364"></td>
<td id="LC364" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> reuse: (optional) Python boolean describing whether to reuse variables</span></td>
</tr>
<tr>
<td id="L365" class="blob-num js-line-number" data-line-number="365"></td>
<td id="LC365" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> in an existing scope. If not `True`, and the existing scope already has</span></td>
</tr>
<tr>
<td id="L366" class="blob-num js-line-number" data-line-number="366"></td>
<td id="LC366" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the given variables, an error is raised.</span></td>
</tr>
<tr>
<td id="L367" class="blob-num js-line-number" data-line-number="367"></td>
<td id="LC367" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> kernel_initializer: (optional) The initializer to use for the weight and</span></td>
</tr>
<tr>
<td id="L368" class="blob-num js-line-number" data-line-number="368"></td>
<td id="LC368" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> projection matrices.</span></td>
</tr>
<tr>
<td id="L369" class="blob-num js-line-number" data-line-number="369"></td>
<td id="LC369" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> bias_initializer: (optional) The initializer to use for the bias.</span></td>
</tr>
<tr>
<td id="L370" class="blob-num js-line-number" data-line-number="370"></td>
<td id="LC370" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> name: String, the name of the layer. Layers with the same name will</span></td>
</tr>
<tr>
<td id="L371" class="blob-num js-line-number" data-line-number="371"></td>
<td id="LC371" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> share weights, but to avoid mistakes we require reuse=True in such</span></td>
</tr>
<tr>
<td id="L372" class="blob-num js-line-number" data-line-number="372"></td>
<td id="LC372" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cases.</span></td>
</tr>
<tr>
<td id="L373" class="blob-num js-line-number" data-line-number="373"></td>
<td id="LC373" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L374" class="blob-num js-line-number" data-line-number="374"></td>
<td id="LC374" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L375" class="blob-num js-line-number" data-line-number="375"></td>
<td id="LC375" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__init__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>,</td>
</tr>
<tr>
<td id="L376" class="blob-num js-line-number" data-line-number="376"></td>
<td id="LC376" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">num_units</span>,</td>
</tr>
<tr>
<td id="L377" class="blob-num js-line-number" data-line-number="377"></td>
<td id="LC377" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">activation</span><span class="pl-k">=</span><span class="pl-c1">None</span>,</td>
</tr>
<tr>
<td id="L378" class="blob-num js-line-number" data-line-number="378"></td>
<td id="LC378" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">reuse</span><span class="pl-k">=</span><span class="pl-c1">None</span>,</td>
</tr>
<tr>
<td id="L379" class="blob-num js-line-number" data-line-number="379"></td>
<td id="LC379" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">kernel_initializer</span><span class="pl-k">=</span><span class="pl-c1">None</span>,</td>
</tr>
<tr>
<td id="L380" class="blob-num js-line-number" data-line-number="380"></td>
<td id="LC380" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">bias_initializer</span><span class="pl-k">=</span><span class="pl-c1">None</span>,</td>
</tr>
<tr>
<td id="L381" class="blob-num js-line-number" data-line-number="381"></td>
<td id="LC381" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">name</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L382" class="blob-num js-line-number" data-line-number="382"></td>
<td id="LC382" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">super</span>(GRUCell, <span class="pl-c1">self</span>).<span class="pl-c1">__init__</span>(<span class="pl-v">_reuse</span><span class="pl-k">=</span>reuse, <span class="pl-v">name</span><span class="pl-k">=</span>name)</td>
</tr>
<tr>
<td id="L383" class="blob-num js-line-number" data-line-number="383"></td>
<td id="LC383" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L384" class="blob-num js-line-number" data-line-number="384"></td>
<td id="LC384" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Inputs must be 2-dimensional.</span></td>
</tr>
<tr>
<td id="L385" class="blob-num js-line-number" data-line-number="385"></td>
<td id="LC385" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>.input_spec <span class="pl-k">=</span> base_layer.InputSpec(<span class="pl-v">ndim</span><span class="pl-k">=</span><span class="pl-c1">2</span>)</td>
</tr>
<tr>
<td id="L386" class="blob-num js-line-number" data-line-number="386"></td>
<td id="LC386" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L387" class="blob-num js-line-number" data-line-number="387"></td>
<td id="LC387" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._num_units <span class="pl-k">=</span> num_units</td>
</tr>
<tr>
<td id="L388" class="blob-num js-line-number" data-line-number="388"></td>
<td id="LC388" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._activation <span class="pl-k">=</span> activation <span class="pl-k">or</span> math_ops.tanh</td>
</tr>
<tr>
<td id="L389" class="blob-num js-line-number" data-line-number="389"></td>
<td id="LC389" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._kernel_initializer <span class="pl-k">=</span> kernel_initializer</td>
</tr>
<tr>
<td id="L390" class="blob-num js-line-number" data-line-number="390"></td>
<td id="LC390" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._bias_initializer <span class="pl-k">=</span> bias_initializer</td>
</tr>
<tr>
<td id="L391" class="blob-num js-line-number" data-line-number="391"></td>
<td id="LC391" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L392" class="blob-num js-line-number" data-line-number="392"></td>
<td id="LC392" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L393" class="blob-num js-line-number" data-line-number="393"></td>
<td id="LC393" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L394" class="blob-num js-line-number" data-line-number="394"></td>
<td id="LC394" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._num_units</td>
</tr>
<tr>
<td id="L395" class="blob-num js-line-number" data-line-number="395"></td>
<td id="LC395" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L396" class="blob-num js-line-number" data-line-number="396"></td>
<td id="LC396" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L397" class="blob-num js-line-number" data-line-number="397"></td>
<td id="LC397" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L398" class="blob-num js-line-number" data-line-number="398"></td>
<td id="LC398" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._num_units</td>
</tr>
<tr>
<td id="L399" class="blob-num js-line-number" data-line-number="399"></td>
<td id="LC399" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L400" class="blob-num js-line-number" data-line-number="400"></td>
<td id="LC400" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">build</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs_shape</span>):</td>
</tr>
<tr>
<td id="L401" class="blob-num js-line-number" data-line-number="401"></td>
<td id="LC401" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> inputs_shape[<span class="pl-c1">1</span>].value <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L402" class="blob-num js-line-number" data-line-number="402"></td>
<td id="LC402" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Expected inputs.shape[-1] to be known, saw shape: <span class="pl-c1">%s</span><span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L403" class="blob-num js-line-number" data-line-number="403"></td>
<td id="LC403" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">%</span> inputs_shape)</td>
</tr>
<tr>
<td id="L404" class="blob-num js-line-number" data-line-number="404"></td>
<td id="LC404" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L405" class="blob-num js-line-number" data-line-number="405"></td>
<td id="LC405" class="blob-code blob-code-inner js-file-line"> input_depth <span class="pl-k">=</span> inputs_shape[<span class="pl-c1">1</span>].value</td>
</tr>
<tr>
<td id="L406" class="blob-num js-line-number" data-line-number="406"></td>
<td id="LC406" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._gate_kernel <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L407" class="blob-num js-line-number" data-line-number="407"></td>
<td id="LC407" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>gates/<span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> <span class="pl-c1">_WEIGHTS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L408" class="blob-num js-line-number" data-line-number="408"></td>
<td id="LC408" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[input_depth <span class="pl-k">+</span> <span class="pl-c1">self</span>._num_units, <span class="pl-c1">2</span> <span class="pl-k">*</span> <span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L409" class="blob-num js-line-number" data-line-number="409"></td>
<td id="LC409" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span><span class="pl-c1">self</span>._kernel_initializer)</td>
</tr>
<tr>
<td id="L410" class="blob-num js-line-number" data-line-number="410"></td>
<td id="LC410" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._gate_bias <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L411" class="blob-num js-line-number" data-line-number="411"></td>
<td id="LC411" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>gates/<span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> <span class="pl-c1">_BIAS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L412" class="blob-num js-line-number" data-line-number="412"></td>
<td id="LC412" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[<span class="pl-c1">2</span> <span class="pl-k">*</span> <span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L413" class="blob-num js-line-number" data-line-number="413"></td>
<td id="LC413" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span>(</td>
</tr>
<tr>
<td id="L414" class="blob-num js-line-number" data-line-number="414"></td>
<td id="LC414" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._bias_initializer</td>
</tr>
<tr>
<td id="L415" class="blob-num js-line-number" data-line-number="415"></td>
<td id="LC415" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._bias_initializer <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L416" class="blob-num js-line-number" data-line-number="416"></td>
<td id="LC416" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span> init_ops.constant_initializer(<span class="pl-c1">1.0</span>, <span class="pl-v">dtype</span><span class="pl-k">=</span><span class="pl-c1">self</span>.dtype)))</td>
</tr>
<tr>
<td id="L417" class="blob-num js-line-number" data-line-number="417"></td>
<td id="LC417" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._candidate_kernel <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L418" class="blob-num js-line-number" data-line-number="418"></td>
<td id="LC418" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>candidate/<span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> <span class="pl-c1">_WEIGHTS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L419" class="blob-num js-line-number" data-line-number="419"></td>
<td id="LC419" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[input_depth <span class="pl-k">+</span> <span class="pl-c1">self</span>._num_units, <span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L420" class="blob-num js-line-number" data-line-number="420"></td>
<td id="LC420" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span><span class="pl-c1">self</span>._kernel_initializer)</td>
</tr>
<tr>
<td id="L421" class="blob-num js-line-number" data-line-number="421"></td>
<td id="LC421" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._candidate_bias <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L422" class="blob-num js-line-number" data-line-number="422"></td>
<td id="LC422" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>candidate/<span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> <span class="pl-c1">_BIAS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L423" class="blob-num js-line-number" data-line-number="423"></td>
<td id="LC423" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[<span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L424" class="blob-num js-line-number" data-line-number="424"></td>
<td id="LC424" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span>(</td>
</tr>
<tr>
<td id="L425" class="blob-num js-line-number" data-line-number="425"></td>
<td id="LC425" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._bias_initializer</td>
</tr>
<tr>
<td id="L426" class="blob-num js-line-number" data-line-number="426"></td>
<td id="LC426" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._bias_initializer <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L427" class="blob-num js-line-number" data-line-number="427"></td>
<td id="LC427" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span> init_ops.zeros_initializer(<span class="pl-v">dtype</span><span class="pl-k">=</span><span class="pl-c1">self</span>.dtype)))</td>
</tr>
<tr>
<td id="L428" class="blob-num js-line-number" data-line-number="428"></td>
<td id="LC428" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L429" class="blob-num js-line-number" data-line-number="429"></td>
<td id="LC429" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>.built <span class="pl-k">=</span> <span class="pl-c1">True</span></td>
</tr>
<tr>
<td id="L430" class="blob-num js-line-number" data-line-number="430"></td>
<td id="LC430" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L431" class="blob-num js-line-number" data-line-number="431"></td>
<td id="LC431" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">call</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>):</td>
</tr>
<tr>
<td id="L432" class="blob-num js-line-number" data-line-number="432"></td>
<td id="LC432" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Gated recurrent unit (GRU) with nunits cells.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L433" class="blob-num js-line-number" data-line-number="433"></td>
<td id="LC433" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L434" class="blob-num js-line-number" data-line-number="434"></td>
<td id="LC434" class="blob-code blob-code-inner js-file-line"> gate_inputs <span class="pl-k">=</span> math_ops.matmul(</td>
</tr>
<tr>
<td id="L435" class="blob-num js-line-number" data-line-number="435"></td>
<td id="LC435" class="blob-code blob-code-inner js-file-line"> array_ops.concat([inputs, state], <span class="pl-c1">1</span>), <span class="pl-c1">self</span>._gate_kernel)</td>
</tr>
<tr>
<td id="L436" class="blob-num js-line-number" data-line-number="436"></td>
<td id="LC436" class="blob-code blob-code-inner js-file-line"> gate_inputs <span class="pl-k">=</span> nn_ops.bias_add(gate_inputs, <span class="pl-c1">self</span>._gate_bias)</td>
</tr>
<tr>
<td id="L437" class="blob-num js-line-number" data-line-number="437"></td>
<td id="LC437" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L438" class="blob-num js-line-number" data-line-number="438"></td>
<td id="LC438" class="blob-code blob-code-inner js-file-line"> value <span class="pl-k">=</span> math_ops.sigmoid(gate_inputs)</td>
</tr>
<tr>
<td id="L439" class="blob-num js-line-number" data-line-number="439"></td>
<td id="LC439" class="blob-code blob-code-inner js-file-line"> r, u <span class="pl-k">=</span> array_ops.split(<span class="pl-v">value</span><span class="pl-k">=</span>value, <span class="pl-v">num_or_size_splits</span><span class="pl-k">=</span><span class="pl-c1">2</span>, <span class="pl-v">axis</span><span class="pl-k">=</span><span class="pl-c1">1</span>)</td>
</tr>
<tr>
<td id="L440" class="blob-num js-line-number" data-line-number="440"></td>
<td id="LC440" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L441" class="blob-num js-line-number" data-line-number="441"></td>
<td id="LC441" class="blob-code blob-code-inner js-file-line"> r_state <span class="pl-k">=</span> r <span class="pl-k">*</span> state</td>
</tr>
<tr>
<td id="L442" class="blob-num js-line-number" data-line-number="442"></td>
<td id="LC442" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L443" class="blob-num js-line-number" data-line-number="443"></td>
<td id="LC443" class="blob-code blob-code-inner js-file-line"> candidate <span class="pl-k">=</span> math_ops.matmul(</td>
</tr>
<tr>
<td id="L444" class="blob-num js-line-number" data-line-number="444"></td>
<td id="LC444" class="blob-code blob-code-inner js-file-line"> array_ops.concat([inputs, r_state], <span class="pl-c1">1</span>), <span class="pl-c1">self</span>._candidate_kernel)</td>
</tr>
<tr>
<td id="L445" class="blob-num js-line-number" data-line-number="445"></td>
<td id="LC445" class="blob-code blob-code-inner js-file-line"> candidate <span class="pl-k">=</span> nn_ops.bias_add(candidate, <span class="pl-c1">self</span>._candidate_bias)</td>
</tr>
<tr>
<td id="L446" class="blob-num js-line-number" data-line-number="446"></td>
<td id="LC446" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L447" class="blob-num js-line-number" data-line-number="447"></td>
<td id="LC447" class="blob-code blob-code-inner js-file-line"> c <span class="pl-k">=</span> <span class="pl-c1">self</span>._activation(candidate)</td>
</tr>
<tr>
<td id="L448" class="blob-num js-line-number" data-line-number="448"></td>
<td id="LC448" class="blob-code blob-code-inner js-file-line"> new_h <span class="pl-k">=</span> u <span class="pl-k">*</span> state <span class="pl-k">+</span> (<span class="pl-c1">1</span> <span class="pl-k">-</span> u) <span class="pl-k">*</span> c</td>
</tr>
<tr>
<td id="L449" class="blob-num js-line-number" data-line-number="449"></td>
<td id="LC449" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> new_h, new_h</td>
</tr>
<tr>
<td id="L450" class="blob-num js-line-number" data-line-number="450"></td>
<td id="LC450" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L451" class="blob-num js-line-number" data-line-number="451"></td>
<td id="LC451" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L452" class="blob-num js-line-number" data-line-number="452"></td>
<td id="LC452" class="blob-code blob-code-inner js-file-line">_LSTMStateTuple <span class="pl-k">=</span> collections.namedtuple(<span class="pl-s"><span class="pl-pds">"</span>LSTMStateTuple<span class="pl-pds">"</span></span>, (<span class="pl-s"><span class="pl-pds">"</span>c<span class="pl-pds">"</span></span>, <span class="pl-s"><span class="pl-pds">"</span>h<span class="pl-pds">"</span></span>))</td>
</tr>
<tr>
<td id="L453" class="blob-num js-line-number" data-line-number="453"></td>
<td id="LC453" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L454" class="blob-num js-line-number" data-line-number="454"></td>
<td id="LC454" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L455" class="blob-num js-line-number" data-line-number="455"></td>
<td id="LC455" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.LSTMStateTuple<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L456" class="blob-num js-line-number" data-line-number="456"></td>
<td id="LC456" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">LSTMStateTuple</span>(<span class="pl-e">_LSTMStateTuple</span>):</td>
</tr>
<tr>
<td id="L457" class="blob-num js-line-number" data-line-number="457"></td>
<td id="LC457" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.</span></td>
</tr>
<tr>
<td id="L458" class="blob-num js-line-number" data-line-number="458"></td>
<td id="LC458" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L459" class="blob-num js-line-number" data-line-number="459"></td>
<td id="LC459" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state</span></td>
</tr>
<tr>
<td id="L460" class="blob-num js-line-number" data-line-number="460"></td>
<td id="LC460" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> and `h` is the output.</span></td>
</tr>
<tr>
<td id="L461" class="blob-num js-line-number" data-line-number="461"></td>
<td id="LC461" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L462" class="blob-num js-line-number" data-line-number="462"></td>
<td id="LC462" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Only used when `state_is_tuple=True`.</span></td>
</tr>
<tr>
<td id="L463" class="blob-num js-line-number" data-line-number="463"></td>
<td id="LC463" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L464" class="blob-num js-line-number" data-line-number="464"></td>
<td id="LC464" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">__slots__</span> <span class="pl-k">=</span> ()</td>
</tr>
<tr>
<td id="L465" class="blob-num js-line-number" data-line-number="465"></td>
<td id="LC465" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L466" class="blob-num js-line-number" data-line-number="466"></td>
<td id="LC466" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L467" class="blob-num js-line-number" data-line-number="467"></td>
<td id="LC467" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">dtype</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L468" class="blob-num js-line-number" data-line-number="468"></td>
<td id="LC468" class="blob-code blob-code-inner js-file-line"> (c, h) <span class="pl-k">=</span> <span class="pl-c1">self</span></td>
</tr>
<tr>
<td id="L469" class="blob-num js-line-number" data-line-number="469"></td>
<td id="LC469" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> c.dtype <span class="pl-k">!=</span> h.dtype:</td>
</tr>
<tr>
<td id="L470" class="blob-num js-line-number" data-line-number="470"></td>
<td id="LC470" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">TypeError</span>(<span class="pl-s"><span class="pl-pds">"</span>Inconsistent internal state: <span class="pl-c1">%s</span> vs <span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span></td>
</tr>
<tr>
<td id="L471" class="blob-num js-line-number" data-line-number="471"></td>
<td id="LC471" class="blob-code blob-code-inner js-file-line"> (<span class="pl-c1">str</span>(c.dtype), <span class="pl-c1">str</span>(h.dtype)))</td>
</tr>
<tr>
<td id="L472" class="blob-num js-line-number" data-line-number="472"></td>
<td id="LC472" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> c.dtype</td>
</tr>
<tr>
<td id="L473" class="blob-num js-line-number" data-line-number="473"></td>
<td id="LC473" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L474" class="blob-num js-line-number" data-line-number="474"></td>
<td id="LC474" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L475" class="blob-num js-line-number" data-line-number="475"></td>
<td id="LC475" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.BasicLSTMCell<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L476" class="blob-num js-line-number" data-line-number="476"></td>
<td id="LC476" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">BasicLSTMCell</span>(<span class="pl-e">_LayerRNNCell</span>):</td>
</tr>
<tr>
<td id="L477" class="blob-num js-line-number" data-line-number="477"></td>
<td id="LC477" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Basic LSTM recurrent network cell.</span></td>
</tr>
<tr>
<td id="L478" class="blob-num js-line-number" data-line-number="478"></td>
<td id="LC478" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L479" class="blob-num js-line-number" data-line-number="479"></td>
<td id="LC479" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> The implementation is based on: http://arxiv.org/abs/1409.2329.</span></td>
</tr>
<tr>
<td id="L480" class="blob-num js-line-number" data-line-number="480"></td>
<td id="LC480" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L481" class="blob-num js-line-number" data-line-number="481"></td>
<td id="LC481" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> We add forget_bias (default: 1) to the biases of the forget gate in order to</span></td>
</tr>
<tr>
<td id="L482" class="blob-num js-line-number" data-line-number="482"></td>
<td id="LC482" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> reduce the scale of forgetting in the beginning of the training.</span></td>
</tr>
<tr>
<td id="L483" class="blob-num js-line-number" data-line-number="483"></td>
<td id="LC483" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L484" class="blob-num js-line-number" data-line-number="484"></td>
<td id="LC484" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> It does not allow cell clipping, a projection layer, and does not</span></td>
</tr>
<tr>
<td id="L485" class="blob-num js-line-number" data-line-number="485"></td>
<td id="LC485" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> use peep-hole connections: it is the basic baseline.</span></td>
</tr>
<tr>
<td id="L486" class="blob-num js-line-number" data-line-number="486"></td>
<td id="LC486" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L487" class="blob-num js-line-number" data-line-number="487"></td>
<td id="LC487" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}</span></td>
</tr>
<tr>
<td id="L488" class="blob-num js-line-number" data-line-number="488"></td>
<td id="LC488" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> that follows.</span></td>
</tr>
<tr>
<td id="L489" class="blob-num js-line-number" data-line-number="489"></td>
<td id="LC489" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L490" class="blob-num js-line-number" data-line-number="490"></td>
<td id="LC490" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L491" class="blob-num js-line-number" data-line-number="491"></td>
<td id="LC491" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__init__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">num_units</span>, <span class="pl-smi">forget_bias</span><span class="pl-k">=</span><span class="pl-c1">1.0</span>,</td>
</tr>
<tr>
<td id="L492" class="blob-num js-line-number" data-line-number="492"></td>
<td id="LC492" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">state_is_tuple</span><span class="pl-k">=</span><span class="pl-c1">True</span>, <span class="pl-smi">activation</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">reuse</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">name</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L493" class="blob-num js-line-number" data-line-number="493"></td>
<td id="LC493" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Initialize the basic LSTM cell.</span></td>
</tr>
<tr>
<td id="L494" class="blob-num js-line-number" data-line-number="494"></td>
<td id="LC494" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L495" class="blob-num js-line-number" data-line-number="495"></td>
<td id="LC495" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L496" class="blob-num js-line-number" data-line-number="496"></td>
<td id="LC496" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> num_units: int, The number of units in the LSTM cell.</span></td>
</tr>
<tr>
<td id="L497" class="blob-num js-line-number" data-line-number="497"></td>
<td id="LC497" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> forget_bias: float, The bias added to forget gates (see above).</span></td>
</tr>
<tr>
<td id="L498" class="blob-num js-line-number" data-line-number="498"></td>
<td id="LC498" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Must set to `0.0` manually when restoring from CudnnLSTM-trained</span></td>
</tr>
<tr>
<td id="L499" class="blob-num js-line-number" data-line-number="499"></td>
<td id="LC499" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> checkpoints.</span></td>
</tr>
<tr>
<td id="L500" class="blob-num js-line-number" data-line-number="500"></td>
<td id="LC500" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state_is_tuple: If True, accepted and returned states are 2-tuples of</span></td>
</tr>
<tr>
<td id="L501" class="blob-num js-line-number" data-line-number="501"></td>
<td id="LC501" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the `c_state` and `m_state`. If False, they are concatenated</span></td>
</tr>
<tr>
<td id="L502" class="blob-num js-line-number" data-line-number="502"></td>
<td id="LC502" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> along the column axis. The latter behavior will soon be deprecated.</span></td>
</tr>
<tr>
<td id="L503" class="blob-num js-line-number" data-line-number="503"></td>
<td id="LC503" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> activation: Activation function of the inner states. Default: `tanh`.</span></td>
</tr>
<tr>
<td id="L504" class="blob-num js-line-number" data-line-number="504"></td>
<td id="LC504" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> reuse: (optional) Python boolean describing whether to reuse variables</span></td>
</tr>
<tr>
<td id="L505" class="blob-num js-line-number" data-line-number="505"></td>
<td id="LC505" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> in an existing scope. If not `True`, and the existing scope already has</span></td>
</tr>
<tr>
<td id="L506" class="blob-num js-line-number" data-line-number="506"></td>
<td id="LC506" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the given variables, an error is raised.</span></td>
</tr>
<tr>
<td id="L507" class="blob-num js-line-number" data-line-number="507"></td>
<td id="LC507" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> name: String, the name of the layer. Layers with the same name will</span></td>
</tr>
<tr>
<td id="L508" class="blob-num js-line-number" data-line-number="508"></td>
<td id="LC508" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> share weights, but to avoid mistakes we require reuse=True in such</span></td>
</tr>
<tr>
<td id="L509" class="blob-num js-line-number" data-line-number="509"></td>
<td id="LC509" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cases.</span></td>
</tr>
<tr>
<td id="L510" class="blob-num js-line-number" data-line-number="510"></td>
<td id="LC510" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L511" class="blob-num js-line-number" data-line-number="511"></td>
<td id="LC511" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> When restoring from CudnnLSTM-trained checkpoints, must use</span></td>
</tr>
<tr>
<td id="L512" class="blob-num js-line-number" data-line-number="512"></td>
<td id="LC512" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `CudnnCompatibleLSTMCell` instead.</span></td>
</tr>
<tr>
<td id="L513" class="blob-num js-line-number" data-line-number="513"></td>
<td id="LC513" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L514" class="blob-num js-line-number" data-line-number="514"></td>
<td id="LC514" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">super</span>(BasicLSTMCell, <span class="pl-c1">self</span>).<span class="pl-c1">__init__</span>(<span class="pl-v">_reuse</span><span class="pl-k">=</span>reuse, <span class="pl-v">name</span><span class="pl-k">=</span>name)</td>
</tr>
<tr>
<td id="L515" class="blob-num js-line-number" data-line-number="515"></td>
<td id="LC515" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> state_is_tuple:</td>
</tr>
<tr>
<td id="L516" class="blob-num js-line-number" data-line-number="516"></td>
<td id="LC516" class="blob-code blob-code-inner js-file-line"> logging.warn(<span class="pl-s"><span class="pl-pds">"</span><span class="pl-c1">%s</span>: Using a concatenated state is slower and will soon be <span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L517" class="blob-num js-line-number" data-line-number="517"></td>
<td id="LC517" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>deprecated. Use state_is_tuple=True.<span class="pl-pds">"</span></span>, <span class="pl-c1">self</span>)</td>
</tr>
<tr>
<td id="L518" class="blob-num js-line-number" data-line-number="518"></td>
<td id="LC518" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L519" class="blob-num js-line-number" data-line-number="519"></td>
<td id="LC519" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Inputs must be 2-dimensional.</span></td>
</tr>
<tr>
<td id="L520" class="blob-num js-line-number" data-line-number="520"></td>
<td id="LC520" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>.input_spec <span class="pl-k">=</span> base_layer.InputSpec(<span class="pl-v">ndim</span><span class="pl-k">=</span><span class="pl-c1">2</span>)</td>
</tr>
<tr>
<td id="L521" class="blob-num js-line-number" data-line-number="521"></td>
<td id="LC521" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L522" class="blob-num js-line-number" data-line-number="522"></td>
<td id="LC522" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._num_units <span class="pl-k">=</span> num_units</td>
</tr>
<tr>
<td id="L523" class="blob-num js-line-number" data-line-number="523"></td>
<td id="LC523" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._forget_bias <span class="pl-k">=</span> forget_bias</td>
</tr>
<tr>
<td id="L524" class="blob-num js-line-number" data-line-number="524"></td>
<td id="LC524" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._state_is_tuple <span class="pl-k">=</span> state_is_tuple</td>
</tr>
<tr>
<td id="L525" class="blob-num js-line-number" data-line-number="525"></td>
<td id="LC525" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._activation <span class="pl-k">=</span> activation <span class="pl-k">or</span> math_ops.tanh</td>
</tr>
<tr>
<td id="L526" class="blob-num js-line-number" data-line-number="526"></td>
<td id="LC526" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L527" class="blob-num js-line-number" data-line-number="527"></td>
<td id="LC527" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L528" class="blob-num js-line-number" data-line-number="528"></td>
<td id="LC528" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L529" class="blob-num js-line-number" data-line-number="529"></td>
<td id="LC529" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> (LSTMStateTuple(<span class="pl-c1">self</span>._num_units, <span class="pl-c1">self</span>._num_units)</td>
</tr>
<tr>
<td id="L530" class="blob-num js-line-number" data-line-number="530"></td>
<td id="LC530" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_is_tuple <span class="pl-k">else</span> <span class="pl-c1">2</span> <span class="pl-k">*</span> <span class="pl-c1">self</span>._num_units)</td>
</tr>
<tr>
<td id="L531" class="blob-num js-line-number" data-line-number="531"></td>
<td id="LC531" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L532" class="blob-num js-line-number" data-line-number="532"></td>
<td id="LC532" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L533" class="blob-num js-line-number" data-line-number="533"></td>
<td id="LC533" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L534" class="blob-num js-line-number" data-line-number="534"></td>
<td id="LC534" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._num_units</td>
</tr>
<tr>
<td id="L535" class="blob-num js-line-number" data-line-number="535"></td>
<td id="LC535" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L536" class="blob-num js-line-number" data-line-number="536"></td>
<td id="LC536" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">build</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs_shape</span>):</td>
</tr>
<tr>
<td id="L537" class="blob-num js-line-number" data-line-number="537"></td>
<td id="LC537" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> inputs_shape[<span class="pl-c1">1</span>].value <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L538" class="blob-num js-line-number" data-line-number="538"></td>
<td id="LC538" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Expected inputs.shape[-1] to be known, saw shape: <span class="pl-c1">%s</span><span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L539" class="blob-num js-line-number" data-line-number="539"></td>
<td id="LC539" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">%</span> inputs_shape)</td>
</tr>
<tr>
<td id="L540" class="blob-num js-line-number" data-line-number="540"></td>
<td id="LC540" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L541" class="blob-num js-line-number" data-line-number="541"></td>
<td id="LC541" class="blob-code blob-code-inner js-file-line"> input_depth <span class="pl-k">=</span> inputs_shape[<span class="pl-c1">1</span>].value</td>
</tr>
<tr>
<td id="L542" class="blob-num js-line-number" data-line-number="542"></td>
<td id="LC542" class="blob-code blob-code-inner js-file-line"> h_depth <span class="pl-k">=</span> <span class="pl-c1">self</span>._num_units</td>
</tr>
<tr>
<td id="L543" class="blob-num js-line-number" data-line-number="543"></td>
<td id="LC543" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._kernel <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L544" class="blob-num js-line-number" data-line-number="544"></td>
<td id="LC544" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">_WEIGHTS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L545" class="blob-num js-line-number" data-line-number="545"></td>
<td id="LC545" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[input_depth <span class="pl-k">+</span> h_depth, <span class="pl-c1">4</span> <span class="pl-k">*</span> <span class="pl-c1">self</span>._num_units])</td>
</tr>
<tr>
<td id="L546" class="blob-num js-line-number" data-line-number="546"></td>
<td id="LC546" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._bias <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L547" class="blob-num js-line-number" data-line-number="547"></td>
<td id="LC547" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">_BIAS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L548" class="blob-num js-line-number" data-line-number="548"></td>
<td id="LC548" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[<span class="pl-c1">4</span> <span class="pl-k">*</span> <span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L549" class="blob-num js-line-number" data-line-number="549"></td>
<td id="LC549" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span>init_ops.zeros_initializer(<span class="pl-v">dtype</span><span class="pl-k">=</span><span class="pl-c1">self</span>.dtype))</td>
</tr>
<tr>
<td id="L550" class="blob-num js-line-number" data-line-number="550"></td>
<td id="LC550" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L551" class="blob-num js-line-number" data-line-number="551"></td>
<td id="LC551" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>.built <span class="pl-k">=</span> <span class="pl-c1">True</span></td>
</tr>
<tr>
<td id="L552" class="blob-num js-line-number" data-line-number="552"></td>
<td id="LC552" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L553" class="blob-num js-line-number" data-line-number="553"></td>
<td id="LC553" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">call</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>):</td>
</tr>
<tr>
<td id="L554" class="blob-num js-line-number" data-line-number="554"></td>
<td id="LC554" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Long short-term memory cell (LSTM).</span></td>
</tr>
<tr>
<td id="L555" class="blob-num js-line-number" data-line-number="555"></td>
<td id="LC555" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L556" class="blob-num js-line-number" data-line-number="556"></td>
<td id="LC556" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L557" class="blob-num js-line-number" data-line-number="557"></td>
<td id="LC557" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> inputs: `2-D` tensor with shape `[batch_size, input_size]`.</span></td>
</tr>
<tr>
<td id="L558" class="blob-num js-line-number" data-line-number="558"></td>
<td id="LC558" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state: An `LSTMStateTuple` of state tensors, each shaped</span></td>
</tr>
<tr>
<td id="L559" class="blob-num js-line-number" data-line-number="559"></td>
<td id="LC559" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `[batch_size, self.state_size]`, if `state_is_tuple` has been set to</span></td>
</tr>
<tr>
<td id="L560" class="blob-num js-line-number" data-line-number="560"></td>
<td id="LC560" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `True`. Otherwise, a `Tensor` shaped</span></td>
</tr>
<tr>
<td id="L561" class="blob-num js-line-number" data-line-number="561"></td>
<td id="LC561" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `[batch_size, 2 * self.state_size]`.</span></td>
</tr>
<tr>
<td id="L562" class="blob-num js-line-number" data-line-number="562"></td>
<td id="LC562" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L563" class="blob-num js-line-number" data-line-number="563"></td>
<td id="LC563" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Returns:</span></td>
</tr>
<tr>
<td id="L564" class="blob-num js-line-number" data-line-number="564"></td>
<td id="LC564" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> A pair containing the new hidden state, and the new state (either a</span></td>
</tr>
<tr>
<td id="L565" class="blob-num js-line-number" data-line-number="565"></td>
<td id="LC565" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `LSTMStateTuple` or a concatenated state, depending on</span></td>
</tr>
<tr>
<td id="L566" class="blob-num js-line-number" data-line-number="566"></td>
<td id="LC566" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `state_is_tuple`).</span></td>
</tr>
<tr>
<td id="L567" class="blob-num js-line-number" data-line-number="567"></td>
<td id="LC567" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L568" class="blob-num js-line-number" data-line-number="568"></td>
<td id="LC568" class="blob-code blob-code-inner js-file-line"> sigmoid <span class="pl-k">=</span> math_ops.sigmoid</td>
</tr>
<tr>
<td id="L569" class="blob-num js-line-number" data-line-number="569"></td>
<td id="LC569" class="blob-code blob-code-inner js-file-line"> one <span class="pl-k">=</span> constant_op.constant(<span class="pl-c1">1</span>, <span class="pl-v">dtype</span><span class="pl-k">=</span>dtypes.int32)</td>
</tr>
<tr>
<td id="L570" class="blob-num js-line-number" data-line-number="570"></td>
<td id="LC570" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Parameters of gates are concatenated into one multiply for efficiency.</span></td>
</tr>
<tr>
<td id="L571" class="blob-num js-line-number" data-line-number="571"></td>
<td id="LC571" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_is_tuple:</td>
</tr>
<tr>
<td id="L572" class="blob-num js-line-number" data-line-number="572"></td>
<td id="LC572" class="blob-code blob-code-inner js-file-line"> c, h <span class="pl-k">=</span> state</td>
</tr>
<tr>
<td id="L573" class="blob-num js-line-number" data-line-number="573"></td>
<td id="LC573" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L574" class="blob-num js-line-number" data-line-number="574"></td>
<td id="LC574" class="blob-code blob-code-inner js-file-line"> c, h <span class="pl-k">=</span> array_ops.split(<span class="pl-v">value</span><span class="pl-k">=</span>state, <span class="pl-v">num_or_size_splits</span><span class="pl-k">=</span><span class="pl-c1">2</span>, <span class="pl-v">axis</span><span class="pl-k">=</span>one)</td>
</tr>
<tr>
<td id="L575" class="blob-num js-line-number" data-line-number="575"></td>
<td id="LC575" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L576" class="blob-num js-line-number" data-line-number="576"></td>
<td id="LC576" class="blob-code blob-code-inner js-file-line"> gate_inputs <span class="pl-k">=</span> math_ops.matmul(</td>
</tr>
<tr>
<td id="L577" class="blob-num js-line-number" data-line-number="577"></td>
<td id="LC577" class="blob-code blob-code-inner js-file-line"> array_ops.concat([inputs, h], <span class="pl-c1">1</span>), <span class="pl-c1">self</span>._kernel)</td>
</tr>
<tr>
<td id="L578" class="blob-num js-line-number" data-line-number="578"></td>
<td id="LC578" class="blob-code blob-code-inner js-file-line"> gate_inputs <span class="pl-k">=</span> nn_ops.bias_add(gate_inputs, <span class="pl-c1">self</span>._bias)</td>
</tr>
<tr>
<td id="L579" class="blob-num js-line-number" data-line-number="579"></td>
<td id="LC579" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L580" class="blob-num js-line-number" data-line-number="580"></td>
<td id="LC580" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> i = input_gate, j = new_input, f = forget_gate, o = output_gate</span></td>
</tr>
<tr>
<td id="L581" class="blob-num js-line-number" data-line-number="581"></td>
<td id="LC581" class="blob-code blob-code-inner js-file-line"> i, j, f, o <span class="pl-k">=</span> array_ops.split(</td>
</tr>
<tr>
<td id="L582" class="blob-num js-line-number" data-line-number="582"></td>
<td id="LC582" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">value</span><span class="pl-k">=</span>gate_inputs, <span class="pl-v">num_or_size_splits</span><span class="pl-k">=</span><span class="pl-c1">4</span>, <span class="pl-v">axis</span><span class="pl-k">=</span>one)</td>
</tr>
<tr>
<td id="L583" class="blob-num js-line-number" data-line-number="583"></td>
<td id="LC583" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L584" class="blob-num js-line-number" data-line-number="584"></td>
<td id="LC584" class="blob-code blob-code-inner js-file-line"> forget_bias_tensor <span class="pl-k">=</span> constant_op.constant(<span class="pl-c1">self</span>._forget_bias, <span class="pl-v">dtype</span><span class="pl-k">=</span>f.dtype)</td>
</tr>
<tr>
<td id="L585" class="blob-num js-line-number" data-line-number="585"></td>
<td id="LC585" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Note that using `add` and `multiply` instead of `+` and `*` gives a</span></td>
</tr>
<tr>
<td id="L586" class="blob-num js-line-number" data-line-number="586"></td>
<td id="LC586" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> performance improvement. So using those at the cost of readability.</span></td>
</tr>
<tr>
<td id="L587" class="blob-num js-line-number" data-line-number="587"></td>
<td id="LC587" class="blob-code blob-code-inner js-file-line"> add <span class="pl-k">=</span> math_ops.add</td>
</tr>
<tr>
<td id="L588" class="blob-num js-line-number" data-line-number="588"></td>
<td id="LC588" class="blob-code blob-code-inner js-file-line"> multiply <span class="pl-k">=</span> math_ops.multiply</td>
</tr>
<tr>
<td id="L589" class="blob-num js-line-number" data-line-number="589"></td>
<td id="LC589" class="blob-code blob-code-inner js-file-line"> new_c <span class="pl-k">=</span> add(multiply(c, sigmoid(add(f, forget_bias_tensor))),</td>
</tr>
<tr>
<td id="L590" class="blob-num js-line-number" data-line-number="590"></td>
<td id="LC590" class="blob-code blob-code-inner js-file-line"> multiply(sigmoid(i), <span class="pl-c1">self</span>._activation(j)))</td>
</tr>
<tr>
<td id="L591" class="blob-num js-line-number" data-line-number="591"></td>
<td id="LC591" class="blob-code blob-code-inner js-file-line"> new_h <span class="pl-k">=</span> multiply(<span class="pl-c1">self</span>._activation(new_c), sigmoid(o))</td>
</tr>
<tr>
<td id="L592" class="blob-num js-line-number" data-line-number="592"></td>
<td id="LC592" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L593" class="blob-num js-line-number" data-line-number="593"></td>
<td id="LC593" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_is_tuple:</td>
</tr>
<tr>
<td id="L594" class="blob-num js-line-number" data-line-number="594"></td>
<td id="LC594" class="blob-code blob-code-inner js-file-line"> new_state <span class="pl-k">=</span> LSTMStateTuple(new_c, new_h)</td>
</tr>
<tr>
<td id="L595" class="blob-num js-line-number" data-line-number="595"></td>
<td id="LC595" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L596" class="blob-num js-line-number" data-line-number="596"></td>
<td id="LC596" class="blob-code blob-code-inner js-file-line"> new_state <span class="pl-k">=</span> array_ops.concat([new_c, new_h], <span class="pl-c1">1</span>)</td>
</tr>
<tr>
<td id="L597" class="blob-num js-line-number" data-line-number="597"></td>
<td id="LC597" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> new_h, new_state</td>
</tr>
<tr>
<td id="L598" class="blob-num js-line-number" data-line-number="598"></td>
<td id="LC598" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L599" class="blob-num js-line-number" data-line-number="599"></td>
<td id="LC599" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L600" class="blob-num js-line-number" data-line-number="600"></td>
<td id="LC600" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.LSTMCell<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L601" class="blob-num js-line-number" data-line-number="601"></td>
<td id="LC601" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">LSTMCell</span>(<span class="pl-e">_LayerRNNCell</span>):</td>
</tr>
<tr>
<td id="L602" class="blob-num js-line-number" data-line-number="602"></td>
<td id="LC602" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Long short-term memory unit (LSTM) recurrent network cell.</span></td>
</tr>
<tr>
<td id="L603" class="blob-num js-line-number" data-line-number="603"></td>
<td id="LC603" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L604" class="blob-num js-line-number" data-line-number="604"></td>
<td id="LC604" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> The default non-peephole implementation is based on:</span></td>
</tr>
<tr>
<td id="L605" class="blob-num js-line-number" data-line-number="605"></td>
<td id="LC605" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L606" class="blob-num js-line-number" data-line-number="606"></td>
<td id="LC606" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> http://www.bioinf.jku.at/publications/older/2604.pdf</span></td>
</tr>
<tr>
<td id="L607" class="blob-num js-line-number" data-line-number="607"></td>
<td id="LC607" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L608" class="blob-num js-line-number" data-line-number="608"></td>
<td id="LC608" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> S. Hochreiter and J. Schmidhuber.</span></td>
</tr>
<tr>
<td id="L609" class="blob-num js-line-number" data-line-number="609"></td>
<td id="LC609" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> "Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.</span></td>
</tr>
<tr>
<td id="L610" class="blob-num js-line-number" data-line-number="610"></td>
<td id="LC610" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L611" class="blob-num js-line-number" data-line-number="611"></td>
<td id="LC611" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> The peephole implementation is based on:</span></td>
</tr>
<tr>
<td id="L612" class="blob-num js-line-number" data-line-number="612"></td>
<td id="LC612" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L613" class="blob-num js-line-number" data-line-number="613"></td>
<td id="LC613" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> https://research.google.com/pubs/archive/43905.pdf</span></td>
</tr>
<tr>
<td id="L614" class="blob-num js-line-number" data-line-number="614"></td>
<td id="LC614" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L615" class="blob-num js-line-number" data-line-number="615"></td>
<td id="LC615" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Hasim Sak, Andrew Senior, and Francoise Beaufays.</span></td>
</tr>
<tr>
<td id="L616" class="blob-num js-line-number" data-line-number="616"></td>
<td id="LC616" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> "Long short-term memory recurrent neural network architectures for</span></td>
</tr>
<tr>
<td id="L617" class="blob-num js-line-number" data-line-number="617"></td>
<td id="LC617" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> large scale acoustic modeling." INTERSPEECH, 2014.</span></td>
</tr>
<tr>
<td id="L618" class="blob-num js-line-number" data-line-number="618"></td>
<td id="LC618" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L619" class="blob-num js-line-number" data-line-number="619"></td>
<td id="LC619" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> The class uses optional peep-hole connections, optional cell clipping, and</span></td>
</tr>
<tr>
<td id="L620" class="blob-num js-line-number" data-line-number="620"></td>
<td id="LC620" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> an optional projection layer.</span></td>
</tr>
<tr>
<td id="L621" class="blob-num js-line-number" data-line-number="621"></td>
<td id="LC621" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L622" class="blob-num js-line-number" data-line-number="622"></td>
<td id="LC622" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L623" class="blob-num js-line-number" data-line-number="623"></td>
<td id="LC623" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__init__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">num_units</span>,</td>
</tr>
<tr>
<td id="L624" class="blob-num js-line-number" data-line-number="624"></td>
<td id="LC624" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">use_peepholes</span><span class="pl-k">=</span><span class="pl-c1">False</span>, <span class="pl-smi">cell_clip</span><span class="pl-k">=</span><span class="pl-c1">None</span>,</td>
</tr>
<tr>
<td id="L625" class="blob-num js-line-number" data-line-number="625"></td>
<td id="LC625" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">initializer</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">num_proj</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">proj_clip</span><span class="pl-k">=</span><span class="pl-c1">None</span>,</td>
</tr>
<tr>
<td id="L626" class="blob-num js-line-number" data-line-number="626"></td>
<td id="LC626" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">num_unit_shards</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">num_proj_shards</span><span class="pl-k">=</span><span class="pl-c1">None</span>,</td>
</tr>
<tr>
<td id="L627" class="blob-num js-line-number" data-line-number="627"></td>
<td id="LC627" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">forget_bias</span><span class="pl-k">=</span><span class="pl-c1">1.0</span>, <span class="pl-smi">state_is_tuple</span><span class="pl-k">=</span><span class="pl-c1">True</span>,</td>
</tr>
<tr>
<td id="L628" class="blob-num js-line-number" data-line-number="628"></td>
<td id="LC628" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">activation</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">reuse</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">name</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L629" class="blob-num js-line-number" data-line-number="629"></td>
<td id="LC629" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Initialize the parameters for an LSTM cell.</span></td>
</tr>
<tr>
<td id="L630" class="blob-num js-line-number" data-line-number="630"></td>
<td id="LC630" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L631" class="blob-num js-line-number" data-line-number="631"></td>
<td id="LC631" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L632" class="blob-num js-line-number" data-line-number="632"></td>
<td id="LC632" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> num_units: int, The number of units in the LSTM cell.</span></td>
</tr>
<tr>
<td id="L633" class="blob-num js-line-number" data-line-number="633"></td>
<td id="LC633" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> use_peepholes: bool, set True to enable diagonal/peephole connections.</span></td>
</tr>
<tr>
<td id="L634" class="blob-num js-line-number" data-line-number="634"></td>
<td id="LC634" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cell_clip: (optional) A float value, if provided the cell state is clipped</span></td>
</tr>
<tr>
<td id="L635" class="blob-num js-line-number" data-line-number="635"></td>
<td id="LC635" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> by this value prior to the cell output activation.</span></td>
</tr>
<tr>
<td id="L636" class="blob-num js-line-number" data-line-number="636"></td>
<td id="LC636" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> initializer: (optional) The initializer to use for the weight and</span></td>
</tr>
<tr>
<td id="L637" class="blob-num js-line-number" data-line-number="637"></td>
<td id="LC637" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> projection matrices.</span></td>
</tr>
<tr>
<td id="L638" class="blob-num js-line-number" data-line-number="638"></td>
<td id="LC638" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> num_proj: (optional) int, The output dimensionality for the projection</span></td>
</tr>
<tr>
<td id="L639" class="blob-num js-line-number" data-line-number="639"></td>
<td id="LC639" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> matrices. If None, no projection is performed.</span></td>
</tr>
<tr>
<td id="L640" class="blob-num js-line-number" data-line-number="640"></td>
<td id="LC640" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is</span></td>
</tr>
<tr>
<td id="L641" class="blob-num js-line-number" data-line-number="641"></td>
<td id="LC641" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> provided, then the projected values are clipped elementwise to within</span></td>
</tr>
<tr>
<td id="L642" class="blob-num js-line-number" data-line-number="642"></td>
<td id="LC642" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `[-proj_clip, proj_clip]`.</span></td>
</tr>
<tr>
<td id="L643" class="blob-num js-line-number" data-line-number="643"></td>
<td id="LC643" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> num_unit_shards: Deprecated, will be removed by Jan. 2017.</span></td>
</tr>
<tr>
<td id="L644" class="blob-num js-line-number" data-line-number="644"></td>
<td id="LC644" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Use a variable_scope partitioner instead.</span></td>
</tr>
<tr>
<td id="L645" class="blob-num js-line-number" data-line-number="645"></td>
<td id="LC645" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> num_proj_shards: Deprecated, will be removed by Jan. 2017.</span></td>
</tr>
<tr>
<td id="L646" class="blob-num js-line-number" data-line-number="646"></td>
<td id="LC646" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Use a variable_scope partitioner instead.</span></td>
</tr>
<tr>
<td id="L647" class="blob-num js-line-number" data-line-number="647"></td>
<td id="LC647" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> forget_bias: Biases of the forget gate are initialized by default to 1</span></td>
</tr>
<tr>
<td id="L648" class="blob-num js-line-number" data-line-number="648"></td>
<td id="LC648" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> in order to reduce the scale of forgetting at the beginning of</span></td>
</tr>
<tr>
<td id="L649" class="blob-num js-line-number" data-line-number="649"></td>
<td id="LC649" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the training. Must set it manually to `0.0` when restoring from</span></td>
</tr>
<tr>
<td id="L650" class="blob-num js-line-number" data-line-number="650"></td>
<td id="LC650" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> CudnnLSTM trained checkpoints.</span></td>
</tr>
<tr>
<td id="L651" class="blob-num js-line-number" data-line-number="651"></td>
<td id="LC651" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state_is_tuple: If True, accepted and returned states are 2-tuples of</span></td>
</tr>
<tr>
<td id="L652" class="blob-num js-line-number" data-line-number="652"></td>
<td id="LC652" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the `c_state` and `m_state`. If False, they are concatenated</span></td>
</tr>
<tr>
<td id="L653" class="blob-num js-line-number" data-line-number="653"></td>
<td id="LC653" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> along the column axis. This latter behavior will soon be deprecated.</span></td>
</tr>
<tr>
<td id="L654" class="blob-num js-line-number" data-line-number="654"></td>
<td id="LC654" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> activation: Activation function of the inner states. Default: `tanh`.</span></td>
</tr>
<tr>
<td id="L655" class="blob-num js-line-number" data-line-number="655"></td>
<td id="LC655" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> reuse: (optional) Python boolean describing whether to reuse variables</span></td>
</tr>
<tr>
<td id="L656" class="blob-num js-line-number" data-line-number="656"></td>
<td id="LC656" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> in an existing scope. If not `True`, and the existing scope already has</span></td>
</tr>
<tr>
<td id="L657" class="blob-num js-line-number" data-line-number="657"></td>
<td id="LC657" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the given variables, an error is raised.</span></td>
</tr>
<tr>
<td id="L658" class="blob-num js-line-number" data-line-number="658"></td>
<td id="LC658" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> name: String, the name of the layer. Layers with the same name will</span></td>
</tr>
<tr>
<td id="L659" class="blob-num js-line-number" data-line-number="659"></td>
<td id="LC659" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> share weights, but to avoid mistakes we require reuse=True in such</span></td>
</tr>
<tr>
<td id="L660" class="blob-num js-line-number" data-line-number="660"></td>
<td id="LC660" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cases.</span></td>
</tr>
<tr>
<td id="L661" class="blob-num js-line-number" data-line-number="661"></td>
<td id="LC661" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L662" class="blob-num js-line-number" data-line-number="662"></td>
<td id="LC662" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> When restoring from CudnnLSTM-trained checkpoints, use</span></td>
</tr>
<tr>
<td id="L663" class="blob-num js-line-number" data-line-number="663"></td>
<td id="LC663" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `CudnnCompatibleLSTMCell` instead.</span></td>
</tr>
<tr>
<td id="L664" class="blob-num js-line-number" data-line-number="664"></td>
<td id="LC664" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L665" class="blob-num js-line-number" data-line-number="665"></td>
<td id="LC665" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">super</span>(LSTMCell, <span class="pl-c1">self</span>).<span class="pl-c1">__init__</span>(<span class="pl-v">_reuse</span><span class="pl-k">=</span>reuse, <span class="pl-v">name</span><span class="pl-k">=</span>name)</td>
</tr>
<tr>
<td id="L666" class="blob-num js-line-number" data-line-number="666"></td>
<td id="LC666" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> state_is_tuple:</td>
</tr>
<tr>
<td id="L667" class="blob-num js-line-number" data-line-number="667"></td>
<td id="LC667" class="blob-code blob-code-inner js-file-line"> logging.warn(<span class="pl-s"><span class="pl-pds">"</span><span class="pl-c1">%s</span>: Using a concatenated state is slower and will soon be <span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L668" class="blob-num js-line-number" data-line-number="668"></td>
<td id="LC668" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>deprecated. Use state_is_tuple=True.<span class="pl-pds">"</span></span>, <span class="pl-c1">self</span>)</td>
</tr>
<tr>
<td id="L669" class="blob-num js-line-number" data-line-number="669"></td>
<td id="LC669" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> num_unit_shards <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span> <span class="pl-k">or</span> num_proj_shards <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L670" class="blob-num js-line-number" data-line-number="670"></td>
<td id="LC670" class="blob-code blob-code-inner js-file-line"> logging.warn(</td>
</tr>
<tr>
<td id="L671" class="blob-num js-line-number" data-line-number="671"></td>
<td id="LC671" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span><span class="pl-c1">%s</span>: The num_unit_shards and proj_unit_shards parameters are <span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L672" class="blob-num js-line-number" data-line-number="672"></td>
<td id="LC672" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>deprecated and will be removed in Jan 2017. <span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L673" class="blob-num js-line-number" data-line-number="673"></td>
<td id="LC673" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>Use a variable scope with a partitioner instead.<span class="pl-pds">"</span></span>, <span class="pl-c1">self</span>)</td>
</tr>
<tr>
<td id="L674" class="blob-num js-line-number" data-line-number="674"></td>
<td id="LC674" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L675" class="blob-num js-line-number" data-line-number="675"></td>
<td id="LC675" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Inputs must be 2-dimensional.</span></td>
</tr>
<tr>
<td id="L676" class="blob-num js-line-number" data-line-number="676"></td>
<td id="LC676" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>.input_spec <span class="pl-k">=</span> base_layer.InputSpec(<span class="pl-v">ndim</span><span class="pl-k">=</span><span class="pl-c1">2</span>)</td>
</tr>
<tr>
<td id="L677" class="blob-num js-line-number" data-line-number="677"></td>
<td id="LC677" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L678" class="blob-num js-line-number" data-line-number="678"></td>
<td id="LC678" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._num_units <span class="pl-k">=</span> num_units</td>
</tr>
<tr>
<td id="L679" class="blob-num js-line-number" data-line-number="679"></td>
<td id="LC679" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._use_peepholes <span class="pl-k">=</span> use_peepholes</td>
</tr>
<tr>
<td id="L680" class="blob-num js-line-number" data-line-number="680"></td>
<td id="LC680" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._cell_clip <span class="pl-k">=</span> cell_clip</td>
</tr>
<tr>
<td id="L681" class="blob-num js-line-number" data-line-number="681"></td>
<td id="LC681" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._initializer <span class="pl-k">=</span> initializer</td>
</tr>
<tr>
<td id="L682" class="blob-num js-line-number" data-line-number="682"></td>
<td id="LC682" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._num_proj <span class="pl-k">=</span> num_proj</td>
</tr>
<tr>
<td id="L683" class="blob-num js-line-number" data-line-number="683"></td>
<td id="LC683" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._proj_clip <span class="pl-k">=</span> proj_clip</td>
</tr>
<tr>
<td id="L684" class="blob-num js-line-number" data-line-number="684"></td>
<td id="LC684" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._num_unit_shards <span class="pl-k">=</span> num_unit_shards</td>
</tr>
<tr>
<td id="L685" class="blob-num js-line-number" data-line-number="685"></td>
<td id="LC685" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._num_proj_shards <span class="pl-k">=</span> num_proj_shards</td>
</tr>
<tr>
<td id="L686" class="blob-num js-line-number" data-line-number="686"></td>
<td id="LC686" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._forget_bias <span class="pl-k">=</span> forget_bias</td>
</tr>
<tr>
<td id="L687" class="blob-num js-line-number" data-line-number="687"></td>
<td id="LC687" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._state_is_tuple <span class="pl-k">=</span> state_is_tuple</td>
</tr>
<tr>
<td id="L688" class="blob-num js-line-number" data-line-number="688"></td>
<td id="LC688" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._activation <span class="pl-k">=</span> activation <span class="pl-k">or</span> math_ops.tanh</td>
</tr>
<tr>
<td id="L689" class="blob-num js-line-number" data-line-number="689"></td>
<td id="LC689" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L690" class="blob-num js-line-number" data-line-number="690"></td>
<td id="LC690" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> num_proj:</td>
</tr>
<tr>
<td id="L691" class="blob-num js-line-number" data-line-number="691"></td>
<td id="LC691" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._state_size <span class="pl-k">=</span> (</td>
</tr>
<tr>
<td id="L692" class="blob-num js-line-number" data-line-number="692"></td>
<td id="LC692" class="blob-code blob-code-inner js-file-line"> LSTMStateTuple(num_units, num_proj)</td>
</tr>
<tr>
<td id="L693" class="blob-num js-line-number" data-line-number="693"></td>
<td id="LC693" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> state_is_tuple <span class="pl-k">else</span> num_units <span class="pl-k">+</span> num_proj)</td>
</tr>
<tr>
<td id="L694" class="blob-num js-line-number" data-line-number="694"></td>
<td id="LC694" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._output_size <span class="pl-k">=</span> num_proj</td>
</tr>
<tr>
<td id="L695" class="blob-num js-line-number" data-line-number="695"></td>
<td id="LC695" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L696" class="blob-num js-line-number" data-line-number="696"></td>
<td id="LC696" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._state_size <span class="pl-k">=</span> (</td>
</tr>
<tr>
<td id="L697" class="blob-num js-line-number" data-line-number="697"></td>
<td id="LC697" class="blob-code blob-code-inner js-file-line"> LSTMStateTuple(num_units, num_units)</td>
</tr>
<tr>
<td id="L698" class="blob-num js-line-number" data-line-number="698"></td>
<td id="LC698" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> state_is_tuple <span class="pl-k">else</span> <span class="pl-c1">2</span> <span class="pl-k">*</span> num_units)</td>
</tr>
<tr>
<td id="L699" class="blob-num js-line-number" data-line-number="699"></td>
<td id="LC699" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._output_size <span class="pl-k">=</span> num_units</td>
</tr>
<tr>
<td id="L700" class="blob-num js-line-number" data-line-number="700"></td>
<td id="LC700" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L701" class="blob-num js-line-number" data-line-number="701"></td>
<td id="LC701" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L702" class="blob-num js-line-number" data-line-number="702"></td>
<td id="LC702" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L703" class="blob-num js-line-number" data-line-number="703"></td>
<td id="LC703" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._state_size</td>
</tr>
<tr>
<td id="L704" class="blob-num js-line-number" data-line-number="704"></td>
<td id="LC704" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L705" class="blob-num js-line-number" data-line-number="705"></td>
<td id="LC705" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L706" class="blob-num js-line-number" data-line-number="706"></td>
<td id="LC706" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L707" class="blob-num js-line-number" data-line-number="707"></td>
<td id="LC707" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._output_size</td>
</tr>
<tr>
<td id="L708" class="blob-num js-line-number" data-line-number="708"></td>
<td id="LC708" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L709" class="blob-num js-line-number" data-line-number="709"></td>
<td id="LC709" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">build</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs_shape</span>):</td>
</tr>
<tr>
<td id="L710" class="blob-num js-line-number" data-line-number="710"></td>
<td id="LC710" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> inputs_shape[<span class="pl-c1">1</span>].value <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L711" class="blob-num js-line-number" data-line-number="711"></td>
<td id="LC711" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Expected inputs.shape[-1] to be known, saw shape: <span class="pl-c1">%s</span><span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L712" class="blob-num js-line-number" data-line-number="712"></td>
<td id="LC712" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">%</span> inputs_shape)</td>
</tr>
<tr>
<td id="L713" class="blob-num js-line-number" data-line-number="713"></td>
<td id="LC713" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L714" class="blob-num js-line-number" data-line-number="714"></td>
<td id="LC714" class="blob-code blob-code-inner js-file-line"> input_depth <span class="pl-k">=</span> inputs_shape[<span class="pl-c1">1</span>].value</td>
</tr>
<tr>
<td id="L715" class="blob-num js-line-number" data-line-number="715"></td>
<td id="LC715" class="blob-code blob-code-inner js-file-line"> h_depth <span class="pl-k">=</span> <span class="pl-c1">self</span>._num_units <span class="pl-k">if</span> <span class="pl-c1">self</span>._num_proj <span class="pl-k">is</span> <span class="pl-c1">None</span> <span class="pl-k">else</span> <span class="pl-c1">self</span>._num_proj</td>
</tr>
<tr>
<td id="L716" class="blob-num js-line-number" data-line-number="716"></td>
<td id="LC716" class="blob-code blob-code-inner js-file-line"> maybe_partitioner <span class="pl-k">=</span> (</td>
</tr>
<tr>
<td id="L717" class="blob-num js-line-number" data-line-number="717"></td>
<td id="LC717" class="blob-code blob-code-inner js-file-line"> partitioned_variables.fixed_size_partitioner(<span class="pl-c1">self</span>._num_unit_shards)</td>
</tr>
<tr>
<td id="L718" class="blob-num js-line-number" data-line-number="718"></td>
<td id="LC718" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._num_unit_shards <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L719" class="blob-num js-line-number" data-line-number="719"></td>
<td id="LC719" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span> <span class="pl-c1">None</span>)</td>
</tr>
<tr>
<td id="L720" class="blob-num js-line-number" data-line-number="720"></td>
<td id="LC720" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._kernel <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L721" class="blob-num js-line-number" data-line-number="721"></td>
<td id="LC721" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">_WEIGHTS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L722" class="blob-num js-line-number" data-line-number="722"></td>
<td id="LC722" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[input_depth <span class="pl-k">+</span> h_depth, <span class="pl-c1">4</span> <span class="pl-k">*</span> <span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L723" class="blob-num js-line-number" data-line-number="723"></td>
<td id="LC723" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span><span class="pl-c1">self</span>._initializer,</td>
</tr>
<tr>
<td id="L724" class="blob-num js-line-number" data-line-number="724"></td>
<td id="LC724" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">partitioner</span><span class="pl-k">=</span>maybe_partitioner)</td>
</tr>
<tr>
<td id="L725" class="blob-num js-line-number" data-line-number="725"></td>
<td id="LC725" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._bias <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L726" class="blob-num js-line-number" data-line-number="726"></td>
<td id="LC726" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">_BIAS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L727" class="blob-num js-line-number" data-line-number="727"></td>
<td id="LC727" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[<span class="pl-c1">4</span> <span class="pl-k">*</span> <span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L728" class="blob-num js-line-number" data-line-number="728"></td>
<td id="LC728" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span>init_ops.zeros_initializer(<span class="pl-v">dtype</span><span class="pl-k">=</span><span class="pl-c1">self</span>.dtype))</td>
</tr>
<tr>
<td id="L729" class="blob-num js-line-number" data-line-number="729"></td>
<td id="LC729" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._use_peepholes:</td>
</tr>
<tr>
<td id="L730" class="blob-num js-line-number" data-line-number="730"></td>
<td id="LC730" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._w_f_diag <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(<span class="pl-s"><span class="pl-pds">"</span>w_f_diag<span class="pl-pds">"</span></span>, <span class="pl-v">shape</span><span class="pl-k">=</span>[<span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L731" class="blob-num js-line-number" data-line-number="731"></td>
<td id="LC731" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span><span class="pl-c1">self</span>._initializer)</td>
</tr>
<tr>
<td id="L732" class="blob-num js-line-number" data-line-number="732"></td>
<td id="LC732" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._w_i_diag <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(<span class="pl-s"><span class="pl-pds">"</span>w_i_diag<span class="pl-pds">"</span></span>, <span class="pl-v">shape</span><span class="pl-k">=</span>[<span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L733" class="blob-num js-line-number" data-line-number="733"></td>
<td id="LC733" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span><span class="pl-c1">self</span>._initializer)</td>
</tr>
<tr>
<td id="L734" class="blob-num js-line-number" data-line-number="734"></td>
<td id="LC734" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._w_o_diag <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(<span class="pl-s"><span class="pl-pds">"</span>w_o_diag<span class="pl-pds">"</span></span>, <span class="pl-v">shape</span><span class="pl-k">=</span>[<span class="pl-c1">self</span>._num_units],</td>
</tr>
<tr>
<td id="L735" class="blob-num js-line-number" data-line-number="735"></td>
<td id="LC735" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span><span class="pl-c1">self</span>._initializer)</td>
</tr>
<tr>
<td id="L736" class="blob-num js-line-number" data-line-number="736"></td>
<td id="LC736" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L737" class="blob-num js-line-number" data-line-number="737"></td>
<td id="LC737" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._num_proj <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L738" class="blob-num js-line-number" data-line-number="738"></td>
<td id="LC738" class="blob-code blob-code-inner js-file-line"> maybe_proj_partitioner <span class="pl-k">=</span> (</td>
</tr>
<tr>
<td id="L739" class="blob-num js-line-number" data-line-number="739"></td>
<td id="LC739" class="blob-code blob-code-inner js-file-line"> partitioned_variables.fixed_size_partitioner(<span class="pl-c1">self</span>._num_proj_shards)</td>
</tr>
<tr>
<td id="L740" class="blob-num js-line-number" data-line-number="740"></td>
<td id="LC740" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._num_proj_shards <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L741" class="blob-num js-line-number" data-line-number="741"></td>
<td id="LC741" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span> <span class="pl-c1">None</span>)</td>
</tr>
<tr>
<td id="L742" class="blob-num js-line-number" data-line-number="742"></td>
<td id="LC742" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._proj_kernel <span class="pl-k">=</span> <span class="pl-c1">self</span>.add_variable(</td>
</tr>
<tr>
<td id="L743" class="blob-num js-line-number" data-line-number="743"></td>
<td id="LC743" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>projection/<span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> <span class="pl-c1">_WEIGHTS_VARIABLE_NAME</span>,</td>
</tr>
<tr>
<td id="L744" class="blob-num js-line-number" data-line-number="744"></td>
<td id="LC744" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">shape</span><span class="pl-k">=</span>[<span class="pl-c1">self</span>._num_units, <span class="pl-c1">self</span>._num_proj],</td>
</tr>
<tr>
<td id="L745" class="blob-num js-line-number" data-line-number="745"></td>
<td id="LC745" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">initializer</span><span class="pl-k">=</span><span class="pl-c1">self</span>._initializer,</td>
</tr>
<tr>
<td id="L746" class="blob-num js-line-number" data-line-number="746"></td>
<td id="LC746" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">partitioner</span><span class="pl-k">=</span>maybe_proj_partitioner)</td>
</tr>
<tr>
<td id="L747" class="blob-num js-line-number" data-line-number="747"></td>
<td id="LC747" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L748" class="blob-num js-line-number" data-line-number="748"></td>
<td id="LC748" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>.built <span class="pl-k">=</span> <span class="pl-c1">True</span></td>
</tr>
<tr>
<td id="L749" class="blob-num js-line-number" data-line-number="749"></td>
<td id="LC749" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L750" class="blob-num js-line-number" data-line-number="750"></td>
<td id="LC750" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">call</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>):</td>
</tr>
<tr>
<td id="L751" class="blob-num js-line-number" data-line-number="751"></td>
<td id="LC751" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Run one step of LSTM.</span></td>
</tr>
<tr>
<td id="L752" class="blob-num js-line-number" data-line-number="752"></td>
<td id="LC752" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L753" class="blob-num js-line-number" data-line-number="753"></td>
<td id="LC753" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L754" class="blob-num js-line-number" data-line-number="754"></td>
<td id="LC754" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> inputs: input Tensor, 2D, `[batch, num_units].</span></td>
</tr>
<tr>
<td id="L755" class="blob-num js-line-number" data-line-number="755"></td>
<td id="LC755" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state: if `state_is_tuple` is False, this must be a state Tensor,</span></td>
</tr>
<tr>
<td id="L756" class="blob-num js-line-number" data-line-number="756"></td>
<td id="LC756" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `2-D, [batch, state_size]`. If `state_is_tuple` is True, this must be a</span></td>
</tr>
<tr>
<td id="L757" class="blob-num js-line-number" data-line-number="757"></td>
<td id="LC757" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> tuple of state Tensors, both `2-D`, with column sizes `c_state` and</span></td>
</tr>
<tr>
<td id="L758" class="blob-num js-line-number" data-line-number="758"></td>
<td id="LC758" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `m_state`.</span></td>
</tr>
<tr>
<td id="L759" class="blob-num js-line-number" data-line-number="759"></td>
<td id="LC759" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L760" class="blob-num js-line-number" data-line-number="760"></td>
<td id="LC760" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Returns:</span></td>
</tr>
<tr>
<td id="L761" class="blob-num js-line-number" data-line-number="761"></td>
<td id="LC761" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> A tuple containing:</span></td>
</tr>
<tr>
<td id="L762" class="blob-num js-line-number" data-line-number="762"></td>
<td id="LC762" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L763" class="blob-num js-line-number" data-line-number="763"></td>
<td id="LC763" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> - A `2-D, [batch, output_dim]`, Tensor representing the output of the</span></td>
</tr>
<tr>
<td id="L764" class="blob-num js-line-number" data-line-number="764"></td>
<td id="LC764" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> LSTM after reading `inputs` when previous state was `state`.</span></td>
</tr>
<tr>
<td id="L765" class="blob-num js-line-number" data-line-number="765"></td>
<td id="LC765" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Here output_dim is:</span></td>
</tr>
<tr>
<td id="L766" class="blob-num js-line-number" data-line-number="766"></td>
<td id="LC766" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> num_proj if num_proj was set,</span></td>
</tr>
<tr>
<td id="L767" class="blob-num js-line-number" data-line-number="767"></td>
<td id="LC767" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> num_units otherwise.</span></td>
</tr>
<tr>
<td id="L768" class="blob-num js-line-number" data-line-number="768"></td>
<td id="LC768" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> - Tensor(s) representing the new state of LSTM after reading `inputs` when</span></td>
</tr>
<tr>
<td id="L769" class="blob-num js-line-number" data-line-number="769"></td>
<td id="LC769" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the previous state was `state`. Same type and shape(s) as `state`.</span></td>
</tr>
<tr>
<td id="L770" class="blob-num js-line-number" data-line-number="770"></td>
<td id="LC770" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L771" class="blob-num js-line-number" data-line-number="771"></td>
<td id="LC771" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Raises:</span></td>
</tr>
<tr>
<td id="L772" class="blob-num js-line-number" data-line-number="772"></td>
<td id="LC772" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> ValueError: If input size cannot be inferred from inputs via</span></td>
</tr>
<tr>
<td id="L773" class="blob-num js-line-number" data-line-number="773"></td>
<td id="LC773" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> static shape inference.</span></td>
</tr>
<tr>
<td id="L774" class="blob-num js-line-number" data-line-number="774"></td>
<td id="LC774" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L775" class="blob-num js-line-number" data-line-number="775"></td>
<td id="LC775" class="blob-code blob-code-inner js-file-line"> num_proj <span class="pl-k">=</span> <span class="pl-c1">self</span>._num_units <span class="pl-k">if</span> <span class="pl-c1">self</span>._num_proj <span class="pl-k">is</span> <span class="pl-c1">None</span> <span class="pl-k">else</span> <span class="pl-c1">self</span>._num_proj</td>
</tr>
<tr>
<td id="L776" class="blob-num js-line-number" data-line-number="776"></td>
<td id="LC776" class="blob-code blob-code-inner js-file-line"> sigmoid <span class="pl-k">=</span> math_ops.sigmoid</td>
</tr>
<tr>
<td id="L777" class="blob-num js-line-number" data-line-number="777"></td>
<td id="LC777" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L778" class="blob-num js-line-number" data-line-number="778"></td>
<td id="LC778" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_is_tuple:</td>
</tr>
<tr>
<td id="L779" class="blob-num js-line-number" data-line-number="779"></td>
<td id="LC779" class="blob-code blob-code-inner js-file-line"> (c_prev, m_prev) <span class="pl-k">=</span> state</td>
</tr>
<tr>
<td id="L780" class="blob-num js-line-number" data-line-number="780"></td>
<td id="LC780" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L781" class="blob-num js-line-number" data-line-number="781"></td>
<td id="LC781" class="blob-code blob-code-inner js-file-line"> c_prev <span class="pl-k">=</span> array_ops.slice(state, [<span class="pl-c1">0</span>, <span class="pl-c1">0</span>], [<span class="pl-k">-</span><span class="pl-c1">1</span>, <span class="pl-c1">self</span>._num_units])</td>
</tr>
<tr>
<td id="L782" class="blob-num js-line-number" data-line-number="782"></td>
<td id="LC782" class="blob-code blob-code-inner js-file-line"> m_prev <span class="pl-k">=</span> array_ops.slice(state, [<span class="pl-c1">0</span>, <span class="pl-c1">self</span>._num_units], [<span class="pl-k">-</span><span class="pl-c1">1</span>, num_proj])</td>
</tr>
<tr>
<td id="L783" class="blob-num js-line-number" data-line-number="783"></td>
<td id="LC783" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L784" class="blob-num js-line-number" data-line-number="784"></td>
<td id="LC784" class="blob-code blob-code-inner js-file-line"> input_size <span class="pl-k">=</span> inputs.get_shape().with_rank(<span class="pl-c1">2</span>)[<span class="pl-c1">1</span>]</td>
</tr>
<tr>
<td id="L785" class="blob-num js-line-number" data-line-number="785"></td>
<td id="LC785" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> input_size.value <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L786" class="blob-num js-line-number" data-line-number="786"></td>
<td id="LC786" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Could not infer input size from inputs.get_shape()[-1]<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L787" class="blob-num js-line-number" data-line-number="787"></td>
<td id="LC787" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L788" class="blob-num js-line-number" data-line-number="788"></td>
<td id="LC788" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> i = input_gate, j = new_input, f = forget_gate, o = output_gate</span></td>
</tr>
<tr>
<td id="L789" class="blob-num js-line-number" data-line-number="789"></td>
<td id="LC789" class="blob-code blob-code-inner js-file-line"> lstm_matrix <span class="pl-k">=</span> math_ops.matmul(</td>
</tr>
<tr>
<td id="L790" class="blob-num js-line-number" data-line-number="790"></td>
<td id="LC790" class="blob-code blob-code-inner js-file-line"> array_ops.concat([inputs, m_prev], <span class="pl-c1">1</span>), <span class="pl-c1">self</span>._kernel)</td>
</tr>
<tr>
<td id="L791" class="blob-num js-line-number" data-line-number="791"></td>
<td id="LC791" class="blob-code blob-code-inner js-file-line"> lstm_matrix <span class="pl-k">=</span> nn_ops.bias_add(lstm_matrix, <span class="pl-c1">self</span>._bias)</td>
</tr>
<tr>
<td id="L792" class="blob-num js-line-number" data-line-number="792"></td>
<td id="LC792" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L793" class="blob-num js-line-number" data-line-number="793"></td>
<td id="LC793" class="blob-code blob-code-inner js-file-line"> i, j, f, o <span class="pl-k">=</span> array_ops.split(</td>
</tr>
<tr>
<td id="L794" class="blob-num js-line-number" data-line-number="794"></td>
<td id="LC794" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">value</span><span class="pl-k">=</span>lstm_matrix, <span class="pl-v">num_or_size_splits</span><span class="pl-k">=</span><span class="pl-c1">4</span>, <span class="pl-v">axis</span><span class="pl-k">=</span><span class="pl-c1">1</span>)</td>
</tr>
<tr>
<td id="L795" class="blob-num js-line-number" data-line-number="795"></td>
<td id="LC795" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Diagonal connections</span></td>
</tr>
<tr>
<td id="L796" class="blob-num js-line-number" data-line-number="796"></td>
<td id="LC796" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._use_peepholes:</td>
</tr>
<tr>
<td id="L797" class="blob-num js-line-number" data-line-number="797"></td>
<td id="LC797" class="blob-code blob-code-inner js-file-line"> c <span class="pl-k">=</span> (sigmoid(f <span class="pl-k">+</span> <span class="pl-c1">self</span>._forget_bias <span class="pl-k">+</span> <span class="pl-c1">self</span>._w_f_diag <span class="pl-k">*</span> c_prev) <span class="pl-k">*</span> c_prev <span class="pl-k">+</span></td>
</tr>
<tr>
<td id="L798" class="blob-num js-line-number" data-line-number="798"></td>
<td id="LC798" class="blob-code blob-code-inner js-file-line"> sigmoid(i <span class="pl-k">+</span> <span class="pl-c1">self</span>._w_i_diag <span class="pl-k">*</span> c_prev) <span class="pl-k">*</span> <span class="pl-c1">self</span>._activation(j))</td>
</tr>
<tr>
<td id="L799" class="blob-num js-line-number" data-line-number="799"></td>
<td id="LC799" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L800" class="blob-num js-line-number" data-line-number="800"></td>
<td id="LC800" class="blob-code blob-code-inner js-file-line"> c <span class="pl-k">=</span> (sigmoid(f <span class="pl-k">+</span> <span class="pl-c1">self</span>._forget_bias) <span class="pl-k">*</span> c_prev <span class="pl-k">+</span> sigmoid(i) <span class="pl-k">*</span></td>
</tr>
<tr>
<td id="L801" class="blob-num js-line-number" data-line-number="801"></td>
<td id="LC801" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._activation(j))</td>
</tr>
<tr>
<td id="L802" class="blob-num js-line-number" data-line-number="802"></td>
<td id="LC802" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L803" class="blob-num js-line-number" data-line-number="803"></td>
<td id="LC803" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._cell_clip <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L804" class="blob-num js-line-number" data-line-number="804"></td>
<td id="LC804" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> pylint: disable=invalid-unary-operand-type</span></td>
</tr>
<tr>
<td id="L805" class="blob-num js-line-number" data-line-number="805"></td>
<td id="LC805" class="blob-code blob-code-inner js-file-line"> c <span class="pl-k">=</span> clip_ops.clip_by_value(c, <span class="pl-k">-</span><span class="pl-c1">self</span>._cell_clip, <span class="pl-c1">self</span>._cell_clip)</td>
</tr>
<tr>
<td id="L806" class="blob-num js-line-number" data-line-number="806"></td>
<td id="LC806" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> pylint: enable=invalid-unary-operand-type</span></td>
</tr>
<tr>
<td id="L807" class="blob-num js-line-number" data-line-number="807"></td>
<td id="LC807" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._use_peepholes:</td>
</tr>
<tr>
<td id="L808" class="blob-num js-line-number" data-line-number="808"></td>
<td id="LC808" class="blob-code blob-code-inner js-file-line"> m <span class="pl-k">=</span> sigmoid(o <span class="pl-k">+</span> <span class="pl-c1">self</span>._w_o_diag <span class="pl-k">*</span> c) <span class="pl-k">*</span> <span class="pl-c1">self</span>._activation(c)</td>
</tr>
<tr>
<td id="L809" class="blob-num js-line-number" data-line-number="809"></td>
<td id="LC809" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L810" class="blob-num js-line-number" data-line-number="810"></td>
<td id="LC810" class="blob-code blob-code-inner js-file-line"> m <span class="pl-k">=</span> sigmoid(o) <span class="pl-k">*</span> <span class="pl-c1">self</span>._activation(c)</td>
</tr>
<tr>
<td id="L811" class="blob-num js-line-number" data-line-number="811"></td>
<td id="LC811" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L812" class="blob-num js-line-number" data-line-number="812"></td>
<td id="LC812" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._num_proj <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L813" class="blob-num js-line-number" data-line-number="813"></td>
<td id="LC813" class="blob-code blob-code-inner js-file-line"> m <span class="pl-k">=</span> math_ops.matmul(m, <span class="pl-c1">self</span>._proj_kernel)</td>
</tr>
<tr>
<td id="L814" class="blob-num js-line-number" data-line-number="814"></td>
<td id="LC814" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L815" class="blob-num js-line-number" data-line-number="815"></td>
<td id="LC815" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._proj_clip <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L816" class="blob-num js-line-number" data-line-number="816"></td>
<td id="LC816" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> pylint: disable=invalid-unary-operand-type</span></td>
</tr>
<tr>
<td id="L817" class="blob-num js-line-number" data-line-number="817"></td>
<td id="LC817" class="blob-code blob-code-inner js-file-line"> m <span class="pl-k">=</span> clip_ops.clip_by_value(m, <span class="pl-k">-</span><span class="pl-c1">self</span>._proj_clip, <span class="pl-c1">self</span>._proj_clip)</td>
</tr>
<tr>
<td id="L818" class="blob-num js-line-number" data-line-number="818"></td>
<td id="LC818" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> pylint: enable=invalid-unary-operand-type</span></td>
</tr>
<tr>
<td id="L819" class="blob-num js-line-number" data-line-number="819"></td>
<td id="LC819" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L820" class="blob-num js-line-number" data-line-number="820"></td>
<td id="LC820" class="blob-code blob-code-inner js-file-line"> new_state <span class="pl-k">=</span> (LSTMStateTuple(c, m) <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_is_tuple <span class="pl-k">else</span></td>
</tr>
<tr>
<td id="L821" class="blob-num js-line-number" data-line-number="821"></td>
<td id="LC821" class="blob-code blob-code-inner js-file-line"> array_ops.concat([c, m], <span class="pl-c1">1</span>))</td>
</tr>
<tr>
<td id="L822" class="blob-num js-line-number" data-line-number="822"></td>
<td id="LC822" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> m, new_state</td>
</tr>
<tr>
<td id="L823" class="blob-num js-line-number" data-line-number="823"></td>
<td id="LC823" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L824" class="blob-num js-line-number" data-line-number="824"></td>
<td id="LC824" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L825" class="blob-num js-line-number" data-line-number="825"></td>
<td id="LC825" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">_enumerated_map_structure_up_to</span>(<span class="pl-smi">shallow_structure</span>, <span class="pl-smi">map_fn</span>, <span class="pl-k">*</span><span class="pl-smi">args</span>, <span class="pl-k">**</span><span class="pl-smi">kwargs</span>):</td>
</tr>
<tr>
<td id="L826" class="blob-num js-line-number" data-line-number="826"></td>
<td id="LC826" class="blob-code blob-code-inner js-file-line"> ix <span class="pl-k">=</span> [<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L827" class="blob-num js-line-number" data-line-number="827"></td>
<td id="LC827" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">enumerated_fn</span>(<span class="pl-k">*</span><span class="pl-smi">inner_args</span>, <span class="pl-k">**</span><span class="pl-smi">inner_kwargs</span>):</td>
</tr>
<tr>
<td id="L828" class="blob-num js-line-number" data-line-number="828"></td>
<td id="LC828" class="blob-code blob-code-inner js-file-line"> r <span class="pl-k">=</span> map_fn(ix[<span class="pl-c1">0</span>], <span class="pl-k">*</span>inner_args, <span class="pl-k">**</span>inner_kwargs)</td>
</tr>
<tr>
<td id="L829" class="blob-num js-line-number" data-line-number="829"></td>
<td id="LC829" class="blob-code blob-code-inner js-file-line"> ix[<span class="pl-c1">0</span>] <span class="pl-k">+=</span> <span class="pl-c1">1</span></td>
</tr>
<tr>
<td id="L830" class="blob-num js-line-number" data-line-number="830"></td>
<td id="LC830" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> r</td>
</tr>
<tr>
<td id="L831" class="blob-num js-line-number" data-line-number="831"></td>
<td id="LC831" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> nest.map_structure_up_to(shallow_structure,</td>
</tr>
<tr>
<td id="L832" class="blob-num js-line-number" data-line-number="832"></td>
<td id="LC832" class="blob-code blob-code-inner js-file-line"> enumerated_fn, <span class="pl-k">*</span>args, <span class="pl-k">**</span>kwargs)</td>
</tr>
<tr>
<td id="L833" class="blob-num js-line-number" data-line-number="833"></td>
<td id="LC833" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L834" class="blob-num js-line-number" data-line-number="834"></td>
<td id="LC834" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L835" class="blob-num js-line-number" data-line-number="835"></td>
<td id="LC835" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">_default_dropout_state_filter_visitor</span>(<span class="pl-smi">substate</span>):</td>
</tr>
<tr>
<td id="L836" class="blob-num js-line-number" data-line-number="836"></td>
<td id="LC836" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">isinstance</span>(substate, LSTMStateTuple):</td>
</tr>
<tr>
<td id="L837" class="blob-num js-line-number" data-line-number="837"></td>
<td id="LC837" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Do not perform dropout on the memory state.</span></td>
</tr>
<tr>
<td id="L838" class="blob-num js-line-number" data-line-number="838"></td>
<td id="LC838" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> LSTMStateTuple(<span class="pl-v">c</span><span class="pl-k">=</span><span class="pl-c1">False</span>, <span class="pl-v">h</span><span class="pl-k">=</span><span class="pl-c1">True</span>)</td>
</tr>
<tr>
<td id="L839" class="blob-num js-line-number" data-line-number="839"></td>
<td id="LC839" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">elif</span> <span class="pl-c1">isinstance</span>(substate, tensor_array_ops.TensorArray):</td>
</tr>
<tr>
<td id="L840" class="blob-num js-line-number" data-line-number="840"></td>
<td id="LC840" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">False</span></td>
</tr>
<tr>
<td id="L841" class="blob-num js-line-number" data-line-number="841"></td>
<td id="LC841" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">True</span></td>
</tr>
<tr>
<td id="L842" class="blob-num js-line-number" data-line-number="842"></td>
<td id="LC842" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L843" class="blob-num js-line-number" data-line-number="843"></td>
<td id="LC843" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L844" class="blob-num js-line-number" data-line-number="844"></td>
<td id="LC844" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.DropoutWrapper<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L845" class="blob-num js-line-number" data-line-number="845"></td>
<td id="LC845" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">DropoutWrapper</span>(<span class="pl-e">RNNCell</span>):</td>
</tr>
<tr>
<td id="L846" class="blob-num js-line-number" data-line-number="846"></td>
<td id="LC846" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Operator adding dropout to inputs and outputs of the given cell.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L847" class="blob-num js-line-number" data-line-number="847"></td>
<td id="LC847" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L848" class="blob-num js-line-number" data-line-number="848"></td>
<td id="LC848" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__init__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">cell</span>, <span class="pl-smi">input_keep_prob</span><span class="pl-k">=</span><span class="pl-c1">1.0</span>, <span class="pl-smi">output_keep_prob</span><span class="pl-k">=</span><span class="pl-c1">1.0</span>,</td>
</tr>
<tr>
<td id="L849" class="blob-num js-line-number" data-line-number="849"></td>
<td id="LC849" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">state_keep_prob</span><span class="pl-k">=</span><span class="pl-c1">1.0</span>, <span class="pl-smi">variational_recurrent</span><span class="pl-k">=</span><span class="pl-c1">False</span>,</td>
</tr>
<tr>
<td id="L850" class="blob-num js-line-number" data-line-number="850"></td>
<td id="LC850" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">input_size</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">dtype</span><span class="pl-k">=</span><span class="pl-c1">None</span>, <span class="pl-smi">seed</span><span class="pl-k">=</span><span class="pl-c1">None</span>,</td>
</tr>
<tr>
<td id="L851" class="blob-num js-line-number" data-line-number="851"></td>
<td id="LC851" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">dropout_state_filter_visitor</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L852" class="blob-num js-line-number" data-line-number="852"></td>
<td id="LC852" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Create a cell with added input, state, and/or output dropout.</span></td>
</tr>
<tr>
<td id="L853" class="blob-num js-line-number" data-line-number="853"></td>
<td id="LC853" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L854" class="blob-num js-line-number" data-line-number="854"></td>
<td id="LC854" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> If `variational_recurrent` is set to `True` (**NOT** the default behavior),</span></td>
</tr>
<tr>
<td id="L855" class="blob-num js-line-number" data-line-number="855"></td>
<td id="LC855" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> then the same dropout mask is applied at every step, as described in:</span></td>
</tr>
<tr>
<td id="L856" class="blob-num js-line-number" data-line-number="856"></td>
<td id="LC856" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L857" class="blob-num js-line-number" data-line-number="857"></td>
<td id="LC857" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Y. Gal, Z Ghahramani. "A Theoretically Grounded Application of Dropout in</span></td>
</tr>
<tr>
<td id="L858" class="blob-num js-line-number" data-line-number="858"></td>
<td id="LC858" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Recurrent Neural Networks". https://arxiv.org/abs/1512.05287</span></td>
</tr>
<tr>
<td id="L859" class="blob-num js-line-number" data-line-number="859"></td>
<td id="LC859" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L860" class="blob-num js-line-number" data-line-number="860"></td>
<td id="LC860" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Otherwise a different dropout mask is applied at every time step.</span></td>
</tr>
<tr>
<td id="L861" class="blob-num js-line-number" data-line-number="861"></td>
<td id="LC861" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L862" class="blob-num js-line-number" data-line-number="862"></td>
<td id="LC862" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Note, by default (unless a custom `dropout_state_filter` is provided),</span></td>
</tr>
<tr>
<td id="L863" class="blob-num js-line-number" data-line-number="863"></td>
<td id="LC863" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the memory state (`c` component of any `LSTMStateTuple`) passing through</span></td>
</tr>
<tr>
<td id="L864" class="blob-num js-line-number" data-line-number="864"></td>
<td id="LC864" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> a `DropoutWrapper` is never modified. This behavior is described in the</span></td>
</tr>
<tr>
<td id="L865" class="blob-num js-line-number" data-line-number="865"></td>
<td id="LC865" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> above article.</span></td>
</tr>
<tr>
<td id="L866" class="blob-num js-line-number" data-line-number="866"></td>
<td id="LC866" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L867" class="blob-num js-line-number" data-line-number="867"></td>
<td id="LC867" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L868" class="blob-num js-line-number" data-line-number="868"></td>
<td id="LC868" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cell: an RNNCell, a projection to output_size is added to it.</span></td>
</tr>
<tr>
<td id="L869" class="blob-num js-line-number" data-line-number="869"></td>
<td id="LC869" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> input_keep_prob: unit Tensor or float between 0 and 1, input keep</span></td>
</tr>
<tr>
<td id="L870" class="blob-num js-line-number" data-line-number="870"></td>
<td id="LC870" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> probability; if it is constant and 1, no input dropout will be added.</span></td>
</tr>
<tr>
<td id="L871" class="blob-num js-line-number" data-line-number="871"></td>
<td id="LC871" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> output_keep_prob: unit Tensor or float between 0 and 1, output keep</span></td>
</tr>
<tr>
<td id="L872" class="blob-num js-line-number" data-line-number="872"></td>
<td id="LC872" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> probability; if it is constant and 1, no output dropout will be added.</span></td>
</tr>
<tr>
<td id="L873" class="blob-num js-line-number" data-line-number="873"></td>
<td id="LC873" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state_keep_prob: unit Tensor or float between 0 and 1, output keep</span></td>
</tr>
<tr>
<td id="L874" class="blob-num js-line-number" data-line-number="874"></td>
<td id="LC874" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> probability; if it is constant and 1, no output dropout will be added.</span></td>
</tr>
<tr>
<td id="L875" class="blob-num js-line-number" data-line-number="875"></td>
<td id="LC875" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> State dropout is performed on the outgoing states of the cell.</span></td>
</tr>
<tr>
<td id="L876" class="blob-num js-line-number" data-line-number="876"></td>
<td id="LC876" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> **Note** the state components to which dropout is applied when</span></td>
</tr>
<tr>
<td id="L877" class="blob-num js-line-number" data-line-number="877"></td>
<td id="LC877" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `state_keep_prob` is in `(0, 1)` are also determined by</span></td>
</tr>
<tr>
<td id="L878" class="blob-num js-line-number" data-line-number="878"></td>
<td id="LC878" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the argument `dropout_state_filter_visitor` (e.g. by default dropout</span></td>
</tr>
<tr>
<td id="L879" class="blob-num js-line-number" data-line-number="879"></td>
<td id="LC879" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> is never applied to the `c` component of an `LSTMStateTuple`).</span></td>
</tr>
<tr>
<td id="L880" class="blob-num js-line-number" data-line-number="880"></td>
<td id="LC880" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> variational_recurrent: Python bool. If `True`, then the same</span></td>
</tr>
<tr>
<td id="L881" class="blob-num js-line-number" data-line-number="881"></td>
<td id="LC881" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> dropout pattern is applied across all time steps per run call.</span></td>
</tr>
<tr>
<td id="L882" class="blob-num js-line-number" data-line-number="882"></td>
<td id="LC882" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> If this parameter is set, `input_size` **must** be provided.</span></td>
</tr>
<tr>
<td id="L883" class="blob-num js-line-number" data-line-number="883"></td>
<td id="LC883" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> input_size: (optional) (possibly nested tuple of) `TensorShape` objects</span></td>
</tr>
<tr>
<td id="L884" class="blob-num js-line-number" data-line-number="884"></td>
<td id="LC884" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> containing the depth(s) of the input tensors expected to be passed in to</span></td>
</tr>
<tr>
<td id="L885" class="blob-num js-line-number" data-line-number="885"></td>
<td id="LC885" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the `DropoutWrapper`. Required and used **iff**</span></td>
</tr>
<tr>
<td id="L886" class="blob-num js-line-number" data-line-number="886"></td>
<td id="LC886" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `variational_recurrent = True` and `input_keep_prob < 1`.</span></td>
</tr>
<tr>
<td id="L887" class="blob-num js-line-number" data-line-number="887"></td>
<td id="LC887" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> dtype: (optional) The `dtype` of the input, state, and output tensors.</span></td>
</tr>
<tr>
<td id="L888" class="blob-num js-line-number" data-line-number="888"></td>
<td id="LC888" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Required and used **iff** `variational_recurrent = True`.</span></td>
</tr>
<tr>
<td id="L889" class="blob-num js-line-number" data-line-number="889"></td>
<td id="LC889" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> seed: (optional) integer, the randomness seed.</span></td>
</tr>
<tr>
<td id="L890" class="blob-num js-line-number" data-line-number="890"></td>
<td id="LC890" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> dropout_state_filter_visitor: (optional), default: (see below). Function</span></td>
</tr>
<tr>
<td id="L891" class="blob-num js-line-number" data-line-number="891"></td>
<td id="LC891" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> that takes any hierarchical level of the state and returns</span></td>
</tr>
<tr>
<td id="L892" class="blob-num js-line-number" data-line-number="892"></td>
<td id="LC892" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> a scalar or depth=1 structure of Python booleans describing</span></td>
</tr>
<tr>
<td id="L893" class="blob-num js-line-number" data-line-number="893"></td>
<td id="LC893" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> which terms in the state should be dropped out. In addition, if the</span></td>
</tr>
<tr>
<td id="L894" class="blob-num js-line-number" data-line-number="894"></td>
<td id="LC894" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> function returns `True`, dropout is applied across this sublevel. If</span></td>
</tr>
<tr>
<td id="L895" class="blob-num js-line-number" data-line-number="895"></td>
<td id="LC895" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> the function returns `False`, dropout is not applied across this entire</span></td>
</tr>
<tr>
<td id="L896" class="blob-num js-line-number" data-line-number="896"></td>
<td id="LC896" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> sublevel.</span></td>
</tr>
<tr>
<td id="L897" class="blob-num js-line-number" data-line-number="897"></td>
<td id="LC897" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Default behavior: perform dropout on all terms except the memory (`c`)</span></td>
</tr>
<tr>
<td id="L898" class="blob-num js-line-number" data-line-number="898"></td>
<td id="LC898" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state of `LSTMCellState` objects, and don't try to apply dropout to</span></td>
</tr>
<tr>
<td id="L899" class="blob-num js-line-number" data-line-number="899"></td>
<td id="LC899" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `TensorArray` objects:</span></td>
</tr>
<tr>
<td id="L900" class="blob-num js-line-number" data-line-number="900"></td>
<td id="LC900" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> ```</span></td>
</tr>
<tr>
<td id="L901" class="blob-num js-line-number" data-line-number="901"></td>
<td id="LC901" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> def dropout_state_filter_visitor(s):</span></td>
</tr>
<tr>
<td id="L902" class="blob-num js-line-number" data-line-number="902"></td>
<td id="LC902" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> if isinstance(s, LSTMCellState):</span></td>
</tr>
<tr>
<td id="L903" class="blob-num js-line-number" data-line-number="903"></td>
<td id="LC903" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> # Never perform dropout on the c state.</span></td>
</tr>
<tr>
<td id="L904" class="blob-num js-line-number" data-line-number="904"></td>
<td id="LC904" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> return LSTMCellState(c=False, h=True)</span></td>
</tr>
<tr>
<td id="L905" class="blob-num js-line-number" data-line-number="905"></td>
<td id="LC905" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> elif isinstance(s, TensorArray):</span></td>
</tr>
<tr>
<td id="L906" class="blob-num js-line-number" data-line-number="906"></td>
<td id="LC906" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> return False</span></td>
</tr>
<tr>
<td id="L907" class="blob-num js-line-number" data-line-number="907"></td>
<td id="LC907" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> return True</span></td>
</tr>
<tr>
<td id="L908" class="blob-num js-line-number" data-line-number="908"></td>
<td id="LC908" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> ```</span></td>
</tr>
<tr>
<td id="L909" class="blob-num js-line-number" data-line-number="909"></td>
<td id="LC909" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L910" class="blob-num js-line-number" data-line-number="910"></td>
<td id="LC910" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Raises:</span></td>
</tr>
<tr>
<td id="L911" class="blob-num js-line-number" data-line-number="911"></td>
<td id="LC911" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided</span></td>
</tr>
<tr>
<td id="L912" class="blob-num js-line-number" data-line-number="912"></td>
<td id="LC912" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> but not `callable`.</span></td>
</tr>
<tr>
<td id="L913" class="blob-num js-line-number" data-line-number="913"></td>
<td id="LC913" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> ValueError: if any of the keep_probs are not between 0 and 1.</span></td>
</tr>
<tr>
<td id="L914" class="blob-num js-line-number" data-line-number="914"></td>
<td id="LC914" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L915" class="blob-num js-line-number" data-line-number="915"></td>
<td id="LC915" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> _like_rnncell(cell):</td>
</tr>
<tr>
<td id="L916" class="blob-num js-line-number" data-line-number="916"></td>
<td id="LC916" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">TypeError</span>(<span class="pl-s"><span class="pl-pds">"</span>The parameter cell is not a RNNCell.<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L917" class="blob-num js-line-number" data-line-number="917"></td>
<td id="LC917" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> (dropout_state_filter_visitor <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L918" class="blob-num js-line-number" data-line-number="918"></td>
<td id="LC918" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">and</span> <span class="pl-k">not</span> <span class="pl-c1">callable</span>(dropout_state_filter_visitor)):</td>
</tr>
<tr>
<td id="L919" class="blob-num js-line-number" data-line-number="919"></td>
<td id="LC919" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">TypeError</span>(<span class="pl-s"><span class="pl-pds">"</span>dropout_state_filter_visitor must be callable<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L920" class="blob-num js-line-number" data-line-number="920"></td>
<td id="LC920" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._dropout_state_filter <span class="pl-k">=</span> (</td>
</tr>
<tr>
<td id="L921" class="blob-num js-line-number" data-line-number="921"></td>
<td id="LC921" class="blob-code blob-code-inner js-file-line"> dropout_state_filter_visitor <span class="pl-k">or</span> _default_dropout_state_filter_visitor)</td>
</tr>
<tr>
<td id="L922" class="blob-num js-line-number" data-line-number="922"></td>
<td id="LC922" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> ops.name_scope(<span class="pl-s"><span class="pl-pds">"</span>DropoutWrapperInit<span class="pl-pds">"</span></span>):</td>
</tr>
<tr>
<td id="L923" class="blob-num js-line-number" data-line-number="923"></td>
<td id="LC923" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">tensor_and_const_value</span>(<span class="pl-smi">v</span>):</td>
</tr>
<tr>
<td id="L924" class="blob-num js-line-number" data-line-number="924"></td>
<td id="LC924" class="blob-code blob-code-inner js-file-line"> tensor_value <span class="pl-k">=</span> ops.convert_to_tensor(v)</td>
</tr>
<tr>
<td id="L925" class="blob-num js-line-number" data-line-number="925"></td>
<td id="LC925" class="blob-code blob-code-inner js-file-line"> const_value <span class="pl-k">=</span> tensor_util.constant_value(tensor_value)</td>
</tr>
<tr>
<td id="L926" class="blob-num js-line-number" data-line-number="926"></td>
<td id="LC926" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> (tensor_value, const_value)</td>
</tr>
<tr>
<td id="L927" class="blob-num js-line-number" data-line-number="927"></td>
<td id="LC927" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> prob, attr <span class="pl-k">in</span> [(input_keep_prob, <span class="pl-s"><span class="pl-pds">"</span>input_keep_prob<span class="pl-pds">"</span></span>),</td>
</tr>
<tr>
<td id="L928" class="blob-num js-line-number" data-line-number="928"></td>
<td id="LC928" class="blob-code blob-code-inner js-file-line"> (state_keep_prob, <span class="pl-s"><span class="pl-pds">"</span>state_keep_prob<span class="pl-pds">"</span></span>),</td>
</tr>
<tr>
<td id="L929" class="blob-num js-line-number" data-line-number="929"></td>
<td id="LC929" class="blob-code blob-code-inner js-file-line"> (output_keep_prob, <span class="pl-s"><span class="pl-pds">"</span>output_keep_prob<span class="pl-pds">"</span></span>)]:</td>
</tr>
<tr>
<td id="L930" class="blob-num js-line-number" data-line-number="930"></td>
<td id="LC930" class="blob-code blob-code-inner js-file-line"> tensor_prob, const_prob <span class="pl-k">=</span> tensor_and_const_value(prob)</td>
</tr>
<tr>
<td id="L931" class="blob-num js-line-number" data-line-number="931"></td>
<td id="LC931" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> const_prob <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L932" class="blob-num js-line-number" data-line-number="932"></td>
<td id="LC932" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> const_prob <span class="pl-k"><</span> <span class="pl-c1">0</span> <span class="pl-k">or</span> const_prob <span class="pl-k">></span> <span class="pl-c1">1</span>:</td>
</tr>
<tr>
<td id="L933" class="blob-num js-line-number" data-line-number="933"></td>
<td id="LC933" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Parameter <span class="pl-c1">%s</span> must be between 0 and 1: <span class="pl-c1">%d</span><span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L934" class="blob-num js-line-number" data-line-number="934"></td>
<td id="LC934" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">%</span> (attr, const_prob))</td>
</tr>
<tr>
<td id="L935" class="blob-num js-line-number" data-line-number="935"></td>
<td id="LC935" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">setattr</span>(<span class="pl-c1">self</span>, <span class="pl-s"><span class="pl-pds">"</span>_<span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> attr, <span class="pl-c1">float</span>(const_prob))</td>
</tr>
<tr>
<td id="L936" class="blob-num js-line-number" data-line-number="936"></td>
<td id="LC936" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L937" class="blob-num js-line-number" data-line-number="937"></td>
<td id="LC937" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">setattr</span>(<span class="pl-c1">self</span>, <span class="pl-s"><span class="pl-pds">"</span>_<span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> attr, tensor_prob)</td>
</tr>
<tr>
<td id="L938" class="blob-num js-line-number" data-line-number="938"></td>
<td id="LC938" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L939" class="blob-num js-line-number" data-line-number="939"></td>
<td id="LC939" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Set cell, variational_recurrent, seed before running the code below</span></td>
</tr>
<tr>
<td id="L940" class="blob-num js-line-number" data-line-number="940"></td>
<td id="LC940" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._cell <span class="pl-k">=</span> cell</td>
</tr>
<tr>
<td id="L941" class="blob-num js-line-number" data-line-number="941"></td>
<td id="LC941" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._variational_recurrent <span class="pl-k">=</span> variational_recurrent</td>
</tr>
<tr>
<td id="L942" class="blob-num js-line-number" data-line-number="942"></td>
<td id="LC942" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._seed <span class="pl-k">=</span> seed</td>
</tr>
<tr>
<td id="L943" class="blob-num js-line-number" data-line-number="943"></td>
<td id="LC943" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L944" class="blob-num js-line-number" data-line-number="944"></td>
<td id="LC944" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._recurrent_input_noise <span class="pl-k">=</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L945" class="blob-num js-line-number" data-line-number="945"></td>
<td id="LC945" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._recurrent_state_noise <span class="pl-k">=</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L946" class="blob-num js-line-number" data-line-number="946"></td>
<td id="LC946" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._recurrent_output_noise <span class="pl-k">=</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L947" class="blob-num js-line-number" data-line-number="947"></td>
<td id="LC947" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L948" class="blob-num js-line-number" data-line-number="948"></td>
<td id="LC948" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> variational_recurrent:</td>
</tr>
<tr>
<td id="L949" class="blob-num js-line-number" data-line-number="949"></td>
<td id="LC949" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> dtype <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L950" class="blob-num js-line-number" data-line-number="950"></td>
<td id="LC950" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(</td>
</tr>
<tr>
<td id="L951" class="blob-num js-line-number" data-line-number="951"></td>
<td id="LC951" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>When variational_recurrent=True, dtype must be provided<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L952" class="blob-num js-line-number" data-line-number="952"></td>
<td id="LC952" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L953" class="blob-num js-line-number" data-line-number="953"></td>
<td id="LC953" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">convert_to_batch_shape</span>(<span class="pl-smi">s</span>):</td>
</tr>
<tr>
<td id="L954" class="blob-num js-line-number" data-line-number="954"></td>
<td id="LC954" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Prepend a 1 for the batch dimension; for recurrent</span></td>
</tr>
<tr>
<td id="L955" class="blob-num js-line-number" data-line-number="955"></td>
<td id="LC955" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> variational dropout we use the same dropout mask for all</span></td>
</tr>
<tr>
<td id="L956" class="blob-num js-line-number" data-line-number="956"></td>
<td id="LC956" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> batch elements.</span></td>
</tr>
<tr>
<td id="L957" class="blob-num js-line-number" data-line-number="957"></td>
<td id="LC957" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> array_ops.concat(</td>
</tr>
<tr>
<td id="L958" class="blob-num js-line-number" data-line-number="958"></td>
<td id="LC958" class="blob-code blob-code-inner js-file-line"> ([<span class="pl-c1">1</span>], tensor_shape.TensorShape(s).as_list()), <span class="pl-c1">0</span>)</td>
</tr>
<tr>
<td id="L959" class="blob-num js-line-number" data-line-number="959"></td>
<td id="LC959" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L960" class="blob-num js-line-number" data-line-number="960"></td>
<td id="LC960" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">batch_noise</span>(<span class="pl-smi">s</span>, <span class="pl-smi">inner_seed</span>):</td>
</tr>
<tr>
<td id="L961" class="blob-num js-line-number" data-line-number="961"></td>
<td id="LC961" class="blob-code blob-code-inner js-file-line"> shape <span class="pl-k">=</span> convert_to_batch_shape(s)</td>
</tr>
<tr>
<td id="L962" class="blob-num js-line-number" data-line-number="962"></td>
<td id="LC962" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> random_ops.random_uniform(shape, <span class="pl-v">seed</span><span class="pl-k">=</span>inner_seed, <span class="pl-v">dtype</span><span class="pl-k">=</span>dtype)</td>
</tr>
<tr>
<td id="L963" class="blob-num js-line-number" data-line-number="963"></td>
<td id="LC963" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L964" class="blob-num js-line-number" data-line-number="964"></td>
<td id="LC964" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> (<span class="pl-k">not</span> <span class="pl-c1">isinstance</span>(<span class="pl-c1">self</span>._input_keep_prob, numbers.Real) <span class="pl-k">or</span></td>
</tr>
<tr>
<td id="L965" class="blob-num js-line-number" data-line-number="965"></td>
<td id="LC965" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._input_keep_prob <span class="pl-k"><</span> <span class="pl-c1">1.0</span>):</td>
</tr>
<tr>
<td id="L966" class="blob-num js-line-number" data-line-number="966"></td>
<td id="LC966" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> input_size <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L967" class="blob-num js-line-number" data-line-number="967"></td>
<td id="LC967" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(</td>
</tr>
<tr>
<td id="L968" class="blob-num js-line-number" data-line-number="968"></td>
<td id="LC968" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>When variational_recurrent=True and input_keep_prob < 1.0 or <span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L969" class="blob-num js-line-number" data-line-number="969"></td>
<td id="LC969" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>is unknown, input_size must be provided<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L970" class="blob-num js-line-number" data-line-number="970"></td>
<td id="LC970" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._recurrent_input_noise <span class="pl-k">=</span> _enumerated_map_structure_up_to(</td>
</tr>
<tr>
<td id="L971" class="blob-num js-line-number" data-line-number="971"></td>
<td id="LC971" class="blob-code blob-code-inner js-file-line"> input_size,</td>
</tr>
<tr>
<td id="L972" class="blob-num js-line-number" data-line-number="972"></td>
<td id="LC972" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">lambda</span> <span class="pl-smi">i</span>, <span class="pl-smi">s</span>: batch_noise(s, <span class="pl-v">inner_seed</span><span class="pl-k">=</span><span class="pl-c1">self</span>._gen_seed(<span class="pl-s"><span class="pl-pds">"</span>input<span class="pl-pds">"</span></span>, i)),</td>
</tr>
<tr>
<td id="L973" class="blob-num js-line-number" data-line-number="973"></td>
<td id="LC973" class="blob-code blob-code-inner js-file-line"> input_size)</td>
</tr>
<tr>
<td id="L974" class="blob-num js-line-number" data-line-number="974"></td>
<td id="LC974" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._recurrent_state_noise <span class="pl-k">=</span> _enumerated_map_structure_up_to(</td>
</tr>
<tr>
<td id="L975" class="blob-num js-line-number" data-line-number="975"></td>
<td id="LC975" class="blob-code blob-code-inner js-file-line"> cell.state_size,</td>
</tr>
<tr>
<td id="L976" class="blob-num js-line-number" data-line-number="976"></td>
<td id="LC976" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">lambda</span> <span class="pl-smi">i</span>, <span class="pl-smi">s</span>: batch_noise(s, <span class="pl-v">inner_seed</span><span class="pl-k">=</span><span class="pl-c1">self</span>._gen_seed(<span class="pl-s"><span class="pl-pds">"</span>state<span class="pl-pds">"</span></span>, i)),</td>
</tr>
<tr>
<td id="L977" class="blob-num js-line-number" data-line-number="977"></td>
<td id="LC977" class="blob-code blob-code-inner js-file-line"> cell.state_size)</td>
</tr>
<tr>
<td id="L978" class="blob-num js-line-number" data-line-number="978"></td>
<td id="LC978" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._recurrent_output_noise <span class="pl-k">=</span> _enumerated_map_structure_up_to(</td>
</tr>
<tr>
<td id="L979" class="blob-num js-line-number" data-line-number="979"></td>
<td id="LC979" class="blob-code blob-code-inner js-file-line"> cell.output_size,</td>
</tr>
<tr>
<td id="L980" class="blob-num js-line-number" data-line-number="980"></td>
<td id="LC980" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">lambda</span> <span class="pl-smi">i</span>, <span class="pl-smi">s</span>: batch_noise(s, <span class="pl-v">inner_seed</span><span class="pl-k">=</span><span class="pl-c1">self</span>._gen_seed(<span class="pl-s"><span class="pl-pds">"</span>output<span class="pl-pds">"</span></span>, i)),</td>
</tr>
<tr>
<td id="L981" class="blob-num js-line-number" data-line-number="981"></td>
<td id="LC981" class="blob-code blob-code-inner js-file-line"> cell.output_size)</td>
</tr>
<tr>
<td id="L982" class="blob-num js-line-number" data-line-number="982"></td>
<td id="LC982" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L983" class="blob-num js-line-number" data-line-number="983"></td>
<td id="LC983" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_gen_seed</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">salt_prefix</span>, <span class="pl-smi">index</span>):</td>
</tr>
<tr>
<td id="L984" class="blob-num js-line-number" data-line-number="984"></td>
<td id="LC984" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._seed <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L985" class="blob-num js-line-number" data-line-number="985"></td>
<td id="LC985" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">None</span></td>
</tr>
<tr>
<td id="L986" class="blob-num js-line-number" data-line-number="986"></td>
<td id="LC986" class="blob-code blob-code-inner js-file-line"> salt <span class="pl-k">=</span> <span class="pl-s"><span class="pl-pds">"</span><span class="pl-c1">%s</span>_<span class="pl-c1">%d</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> (salt_prefix, index)</td>
</tr>
<tr>
<td id="L987" class="blob-num js-line-number" data-line-number="987"></td>
<td id="LC987" class="blob-code blob-code-inner js-file-line"> string <span class="pl-k">=</span> (<span class="pl-c1">str</span>(<span class="pl-c1">self</span>._seed) <span class="pl-k">+</span> salt).encode(<span class="pl-s"><span class="pl-pds">"</span>utf-8<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L988" class="blob-num js-line-number" data-line-number="988"></td>
<td id="LC988" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">int</span>(hashlib.md5(string).hexdigest()[:<span class="pl-c1">8</span>], <span class="pl-c1">16</span>) <span class="pl-k">&</span> <span class="pl-c1"><span class="pl-k">0x</span>7FFFFFFF</span></td>
</tr>
<tr>
<td id="L989" class="blob-num js-line-number" data-line-number="989"></td>
<td id="LC989" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L990" class="blob-num js-line-number" data-line-number="990"></td>
<td id="LC990" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L991" class="blob-num js-line-number" data-line-number="991"></td>
<td id="LC991" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">wrapped_cell</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L992" class="blob-num js-line-number" data-line-number="992"></td>
<td id="LC992" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell</td>
</tr>
<tr>
<td id="L993" class="blob-num js-line-number" data-line-number="993"></td>
<td id="LC993" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L994" class="blob-num js-line-number" data-line-number="994"></td>
<td id="LC994" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L995" class="blob-num js-line-number" data-line-number="995"></td>
<td id="LC995" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L996" class="blob-num js-line-number" data-line-number="996"></td>
<td id="LC996" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell.state_size</td>
</tr>
<tr>
<td id="L997" class="blob-num js-line-number" data-line-number="997"></td>
<td id="LC997" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L998" class="blob-num js-line-number" data-line-number="998"></td>
<td id="LC998" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L999" class="blob-num js-line-number" data-line-number="999"></td>
<td id="LC999" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L1000" class="blob-num js-line-number" data-line-number="1000"></td>
<td id="LC1000" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell.output_size</td>
</tr>
<tr>
<td id="L1001" class="blob-num js-line-number" data-line-number="1001"></td>
<td id="LC1001" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1002" class="blob-num js-line-number" data-line-number="1002"></td>
<td id="LC1002" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">zero_state</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">batch_size</span>, <span class="pl-smi">dtype</span>):</td>
</tr>
<tr>
<td id="L1003" class="blob-num js-line-number" data-line-number="1003"></td>
<td id="LC1003" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> ops.name_scope(<span class="pl-c1">type</span>(<span class="pl-c1">self</span>).<span class="pl-c1">__name__</span> <span class="pl-k">+</span> <span class="pl-s"><span class="pl-pds">"</span>ZeroState<span class="pl-pds">"</span></span>, <span class="pl-v">values</span><span class="pl-k">=</span>[batch_size]):</td>
</tr>
<tr>
<td id="L1004" class="blob-num js-line-number" data-line-number="1004"></td>
<td id="LC1004" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell.zero_state(batch_size, dtype)</td>
</tr>
<tr>
<td id="L1005" class="blob-num js-line-number" data-line-number="1005"></td>
<td id="LC1005" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1006" class="blob-num js-line-number" data-line-number="1006"></td>
<td id="LC1006" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_variational_recurrent_dropout_value</span>(</td>
</tr>
<tr>
<td id="L1007" class="blob-num js-line-number" data-line-number="1007"></td>
<td id="LC1007" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">index</span>, <span class="pl-smi">value</span>, <span class="pl-smi">noise</span>, <span class="pl-smi">keep_prob</span>):</td>
</tr>
<tr>
<td id="L1008" class="blob-num js-line-number" data-line-number="1008"></td>
<td id="LC1008" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Performs dropout given the pre-calculated noise tensor.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1009" class="blob-num js-line-number" data-line-number="1009"></td>
<td id="LC1009" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> uniform [keep_prob, 1.0 + keep_prob)</span></td>
</tr>
<tr>
<td id="L1010" class="blob-num js-line-number" data-line-number="1010"></td>
<td id="LC1010" class="blob-code blob-code-inner js-file-line"> random_tensor <span class="pl-k">=</span> keep_prob <span class="pl-k">+</span> noise</td>
</tr>
<tr>
<td id="L1011" class="blob-num js-line-number" data-line-number="1011"></td>
<td id="LC1011" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1012" class="blob-num js-line-number" data-line-number="1012"></td>
<td id="LC1012" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)</span></td>
</tr>
<tr>
<td id="L1013" class="blob-num js-line-number" data-line-number="1013"></td>
<td id="LC1013" class="blob-code blob-code-inner js-file-line"> binary_tensor <span class="pl-k">=</span> math_ops.floor(random_tensor)</td>
</tr>
<tr>
<td id="L1014" class="blob-num js-line-number" data-line-number="1014"></td>
<td id="LC1014" class="blob-code blob-code-inner js-file-line"> ret <span class="pl-k">=</span> math_ops.div(value, keep_prob) <span class="pl-k">*</span> binary_tensor</td>
</tr>
<tr>
<td id="L1015" class="blob-num js-line-number" data-line-number="1015"></td>
<td id="LC1015" class="blob-code blob-code-inner js-file-line"> ret.set_shape(value.get_shape())</td>
</tr>
<tr>
<td id="L1016" class="blob-num js-line-number" data-line-number="1016"></td>
<td id="LC1016" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> ret</td>
</tr>
<tr>
<td id="L1017" class="blob-num js-line-number" data-line-number="1017"></td>
<td id="LC1017" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1018" class="blob-num js-line-number" data-line-number="1018"></td>
<td id="LC1018" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_dropout</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">values</span>, <span class="pl-smi">salt_prefix</span>, <span class="pl-smi">recurrent_noise</span>, <span class="pl-smi">keep_prob</span>,</td>
</tr>
<tr>
<td id="L1019" class="blob-num js-line-number" data-line-number="1019"></td>
<td id="LC1019" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">shallow_filtered_substructure</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L1020" class="blob-num js-line-number" data-line-number="1020"></td>
<td id="LC1020" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Decides whether to perform standard dropout or recurrent dropout.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1021" class="blob-num js-line-number" data-line-number="1021"></td>
<td id="LC1021" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1022" class="blob-num js-line-number" data-line-number="1022"></td>
<td id="LC1022" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> shallow_filtered_substructure <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L1023" class="blob-num js-line-number" data-line-number="1023"></td>
<td id="LC1023" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Put something so we traverse the entire structure; inside the</span></td>
</tr>
<tr>
<td id="L1024" class="blob-num js-line-number" data-line-number="1024"></td>
<td id="LC1024" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> dropout function we check to see if leafs of this are bool or not.</span></td>
</tr>
<tr>
<td id="L1025" class="blob-num js-line-number" data-line-number="1025"></td>
<td id="LC1025" class="blob-code blob-code-inner js-file-line"> shallow_filtered_substructure <span class="pl-k">=</span> values</td>
</tr>
<tr>
<td id="L1026" class="blob-num js-line-number" data-line-number="1026"></td>
<td id="LC1026" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1027" class="blob-num js-line-number" data-line-number="1027"></td>
<td id="LC1027" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> <span class="pl-c1">self</span>._variational_recurrent:</td>
</tr>
<tr>
<td id="L1028" class="blob-num js-line-number" data-line-number="1028"></td>
<td id="LC1028" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">dropout</span>(<span class="pl-smi">i</span>, <span class="pl-smi">do_dropout</span>, <span class="pl-smi">v</span>):</td>
</tr>
<tr>
<td id="L1029" class="blob-num js-line-number" data-line-number="1029"></td>
<td id="LC1029" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> <span class="pl-c1">isinstance</span>(do_dropout, <span class="pl-c1">bool</span>) <span class="pl-k">or</span> do_dropout:</td>
</tr>
<tr>
<td id="L1030" class="blob-num js-line-number" data-line-number="1030"></td>
<td id="LC1030" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> nn_ops.dropout(</td>
</tr>
<tr>
<td id="L1031" class="blob-num js-line-number" data-line-number="1031"></td>
<td id="LC1031" class="blob-code blob-code-inner js-file-line"> v, <span class="pl-v">keep_prob</span><span class="pl-k">=</span>keep_prob, <span class="pl-v">seed</span><span class="pl-k">=</span><span class="pl-c1">self</span>._gen_seed(salt_prefix, i))</td>
</tr>
<tr>
<td id="L1032" class="blob-num js-line-number" data-line-number="1032"></td>
<td id="LC1032" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L1033" class="blob-num js-line-number" data-line-number="1033"></td>
<td id="LC1033" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> v</td>
</tr>
<tr>
<td id="L1034" class="blob-num js-line-number" data-line-number="1034"></td>
<td id="LC1034" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> _enumerated_map_structure_up_to(</td>
</tr>
<tr>
<td id="L1035" class="blob-num js-line-number" data-line-number="1035"></td>
<td id="LC1035" class="blob-code blob-code-inner js-file-line"> shallow_filtered_substructure, dropout,</td>
</tr>
<tr>
<td id="L1036" class="blob-num js-line-number" data-line-number="1036"></td>
<td id="LC1036" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">*</span>[shallow_filtered_substructure, values])</td>
</tr>
<tr>
<td id="L1037" class="blob-num js-line-number" data-line-number="1037"></td>
<td id="LC1037" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L1038" class="blob-num js-line-number" data-line-number="1038"></td>
<td id="LC1038" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">dropout</span>(<span class="pl-smi">i</span>, <span class="pl-smi">do_dropout</span>, <span class="pl-smi">v</span>, <span class="pl-smi">n</span>):</td>
</tr>
<tr>
<td id="L1039" class="blob-num js-line-number" data-line-number="1039"></td>
<td id="LC1039" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> <span class="pl-c1">isinstance</span>(do_dropout, <span class="pl-c1">bool</span>) <span class="pl-k">or</span> do_dropout:</td>
</tr>
<tr>
<td id="L1040" class="blob-num js-line-number" data-line-number="1040"></td>
<td id="LC1040" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._variational_recurrent_dropout_value(i, v, n, keep_prob)</td>
</tr>
<tr>
<td id="L1041" class="blob-num js-line-number" data-line-number="1041"></td>
<td id="LC1041" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L1042" class="blob-num js-line-number" data-line-number="1042"></td>
<td id="LC1042" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> v</td>
</tr>
<tr>
<td id="L1043" class="blob-num js-line-number" data-line-number="1043"></td>
<td id="LC1043" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> _enumerated_map_structure_up_to(</td>
</tr>
<tr>
<td id="L1044" class="blob-num js-line-number" data-line-number="1044"></td>
<td id="LC1044" class="blob-code blob-code-inner js-file-line"> shallow_filtered_substructure, dropout,</td>
</tr>
<tr>
<td id="L1045" class="blob-num js-line-number" data-line-number="1045"></td>
<td id="LC1045" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">*</span>[shallow_filtered_substructure, values, recurrent_noise])</td>
</tr>
<tr>
<td id="L1046" class="blob-num js-line-number" data-line-number="1046"></td>
<td id="LC1046" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1047" class="blob-num js-line-number" data-line-number="1047"></td>
<td id="LC1047" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__call__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>, <span class="pl-smi">scope</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L1048" class="blob-num js-line-number" data-line-number="1048"></td>
<td id="LC1048" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Run the cell with the declared dropouts.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1049" class="blob-num js-line-number" data-line-number="1049"></td>
<td id="LC1049" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">_should_dropout</span>(<span class="pl-smi">p</span>):</td>
</tr>
<tr>
<td id="L1050" class="blob-num js-line-number" data-line-number="1050"></td>
<td id="LC1050" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> (<span class="pl-k">not</span> <span class="pl-c1">isinstance</span>(p, <span class="pl-c1">float</span>)) <span class="pl-k">or</span> p <span class="pl-k"><</span> <span class="pl-c1">1</span></td>
</tr>
<tr>
<td id="L1051" class="blob-num js-line-number" data-line-number="1051"></td>
<td id="LC1051" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1052" class="blob-num js-line-number" data-line-number="1052"></td>
<td id="LC1052" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> _should_dropout(<span class="pl-c1">self</span>._input_keep_prob):</td>
</tr>
<tr>
<td id="L1053" class="blob-num js-line-number" data-line-number="1053"></td>
<td id="LC1053" class="blob-code blob-code-inner js-file-line"> inputs <span class="pl-k">=</span> <span class="pl-c1">self</span>._dropout(inputs, <span class="pl-s"><span class="pl-pds">"</span>input<span class="pl-pds">"</span></span>,</td>
</tr>
<tr>
<td id="L1054" class="blob-num js-line-number" data-line-number="1054"></td>
<td id="LC1054" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._recurrent_input_noise,</td>
</tr>
<tr>
<td id="L1055" class="blob-num js-line-number" data-line-number="1055"></td>
<td id="LC1055" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._input_keep_prob)</td>
</tr>
<tr>
<td id="L1056" class="blob-num js-line-number" data-line-number="1056"></td>
<td id="LC1056" class="blob-code blob-code-inner js-file-line"> output, new_state <span class="pl-k">=</span> <span class="pl-c1">self</span>._cell(inputs, state, <span class="pl-v">scope</span><span class="pl-k">=</span>scope)</td>
</tr>
<tr>
<td id="L1057" class="blob-num js-line-number" data-line-number="1057"></td>
<td id="LC1057" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> _should_dropout(<span class="pl-c1">self</span>._state_keep_prob):</td>
</tr>
<tr>
<td id="L1058" class="blob-num js-line-number" data-line-number="1058"></td>
<td id="LC1058" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Identify which subsets of the state to perform dropout on and</span></td>
</tr>
<tr>
<td id="L1059" class="blob-num js-line-number" data-line-number="1059"></td>
<td id="LC1059" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> which ones to keep.</span></td>
</tr>
<tr>
<td id="L1060" class="blob-num js-line-number" data-line-number="1060"></td>
<td id="LC1060" class="blob-code blob-code-inner js-file-line"> shallow_filtered_substructure <span class="pl-k">=</span> nest.get_traverse_shallow_structure(</td>
</tr>
<tr>
<td id="L1061" class="blob-num js-line-number" data-line-number="1061"></td>
<td id="LC1061" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._dropout_state_filter, new_state)</td>
</tr>
<tr>
<td id="L1062" class="blob-num js-line-number" data-line-number="1062"></td>
<td id="LC1062" class="blob-code blob-code-inner js-file-line"> new_state <span class="pl-k">=</span> <span class="pl-c1">self</span>._dropout(new_state, <span class="pl-s"><span class="pl-pds">"</span>state<span class="pl-pds">"</span></span>,</td>
</tr>
<tr>
<td id="L1063" class="blob-num js-line-number" data-line-number="1063"></td>
<td id="LC1063" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._recurrent_state_noise,</td>
</tr>
<tr>
<td id="L1064" class="blob-num js-line-number" data-line-number="1064"></td>
<td id="LC1064" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._state_keep_prob,</td>
</tr>
<tr>
<td id="L1065" class="blob-num js-line-number" data-line-number="1065"></td>
<td id="LC1065" class="blob-code blob-code-inner js-file-line"> shallow_filtered_substructure)</td>
</tr>
<tr>
<td id="L1066" class="blob-num js-line-number" data-line-number="1066"></td>
<td id="LC1066" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> _should_dropout(<span class="pl-c1">self</span>._output_keep_prob):</td>
</tr>
<tr>
<td id="L1067" class="blob-num js-line-number" data-line-number="1067"></td>
<td id="LC1067" class="blob-code blob-code-inner js-file-line"> output <span class="pl-k">=</span> <span class="pl-c1">self</span>._dropout(output, <span class="pl-s"><span class="pl-pds">"</span>output<span class="pl-pds">"</span></span>,</td>
</tr>
<tr>
<td id="L1068" class="blob-num js-line-number" data-line-number="1068"></td>
<td id="LC1068" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._recurrent_output_noise,</td>
</tr>
<tr>
<td id="L1069" class="blob-num js-line-number" data-line-number="1069"></td>
<td id="LC1069" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._output_keep_prob)</td>
</tr>
<tr>
<td id="L1070" class="blob-num js-line-number" data-line-number="1070"></td>
<td id="LC1070" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> output, new_state</td>
</tr>
<tr>
<td id="L1071" class="blob-num js-line-number" data-line-number="1071"></td>
<td id="LC1071" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1072" class="blob-num js-line-number" data-line-number="1072"></td>
<td id="LC1072" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1073" class="blob-num js-line-number" data-line-number="1073"></td>
<td id="LC1073" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.ResidualWrapper<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L1074" class="blob-num js-line-number" data-line-number="1074"></td>
<td id="LC1074" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">ResidualWrapper</span>(<span class="pl-e">RNNCell</span>):</td>
</tr>
<tr>
<td id="L1075" class="blob-num js-line-number" data-line-number="1075"></td>
<td id="LC1075" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>RNNCell wrapper that ensures cell inputs are added to the outputs.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1076" class="blob-num js-line-number" data-line-number="1076"></td>
<td id="LC1076" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1077" class="blob-num js-line-number" data-line-number="1077"></td>
<td id="LC1077" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__init__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">cell</span>, <span class="pl-smi">residual_fn</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L1078" class="blob-num js-line-number" data-line-number="1078"></td>
<td id="LC1078" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Constructs a `ResidualWrapper` for `cell`.</span></td>
</tr>
<tr>
<td id="L1079" class="blob-num js-line-number" data-line-number="1079"></td>
<td id="LC1079" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1080" class="blob-num js-line-number" data-line-number="1080"></td>
<td id="LC1080" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L1081" class="blob-num js-line-number" data-line-number="1081"></td>
<td id="LC1081" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cell: An instance of `RNNCell`.</span></td>
</tr>
<tr>
<td id="L1082" class="blob-num js-line-number" data-line-number="1082"></td>
<td id="LC1082" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> residual_fn: (Optional) The function to map raw cell inputs and raw cell</span></td>
</tr>
<tr>
<td id="L1083" class="blob-num js-line-number" data-line-number="1083"></td>
<td id="LC1083" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> outputs to the actual cell outputs of the residual network.</span></td>
</tr>
<tr>
<td id="L1084" class="blob-num js-line-number" data-line-number="1084"></td>
<td id="LC1084" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs</span></td>
</tr>
<tr>
<td id="L1085" class="blob-num js-line-number" data-line-number="1085"></td>
<td id="LC1085" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> and outputs.</span></td>
</tr>
<tr>
<td id="L1086" class="blob-num js-line-number" data-line-number="1086"></td>
<td id="LC1086" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1087" class="blob-num js-line-number" data-line-number="1087"></td>
<td id="LC1087" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._cell <span class="pl-k">=</span> cell</td>
</tr>
<tr>
<td id="L1088" class="blob-num js-line-number" data-line-number="1088"></td>
<td id="LC1088" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._residual_fn <span class="pl-k">=</span> residual_fn</td>
</tr>
<tr>
<td id="L1089" class="blob-num js-line-number" data-line-number="1089"></td>
<td id="LC1089" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1090" class="blob-num js-line-number" data-line-number="1090"></td>
<td id="LC1090" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L1091" class="blob-num js-line-number" data-line-number="1091"></td>
<td id="LC1091" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L1092" class="blob-num js-line-number" data-line-number="1092"></td>
<td id="LC1092" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell.state_size</td>
</tr>
<tr>
<td id="L1093" class="blob-num js-line-number" data-line-number="1093"></td>
<td id="LC1093" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1094" class="blob-num js-line-number" data-line-number="1094"></td>
<td id="LC1094" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L1095" class="blob-num js-line-number" data-line-number="1095"></td>
<td id="LC1095" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L1096" class="blob-num js-line-number" data-line-number="1096"></td>
<td id="LC1096" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell.output_size</td>
</tr>
<tr>
<td id="L1097" class="blob-num js-line-number" data-line-number="1097"></td>
<td id="LC1097" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1098" class="blob-num js-line-number" data-line-number="1098"></td>
<td id="LC1098" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">zero_state</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">batch_size</span>, <span class="pl-smi">dtype</span>):</td>
</tr>
<tr>
<td id="L1099" class="blob-num js-line-number" data-line-number="1099"></td>
<td id="LC1099" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> ops.name_scope(<span class="pl-c1">type</span>(<span class="pl-c1">self</span>).<span class="pl-c1">__name__</span> <span class="pl-k">+</span> <span class="pl-s"><span class="pl-pds">"</span>ZeroState<span class="pl-pds">"</span></span>, <span class="pl-v">values</span><span class="pl-k">=</span>[batch_size]):</td>
</tr>
<tr>
<td id="L1100" class="blob-num js-line-number" data-line-number="1100"></td>
<td id="LC1100" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell.zero_state(batch_size, dtype)</td>
</tr>
<tr>
<td id="L1101" class="blob-num js-line-number" data-line-number="1101"></td>
<td id="LC1101" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1102" class="blob-num js-line-number" data-line-number="1102"></td>
<td id="LC1102" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__call__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>, <span class="pl-smi">scope</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L1103" class="blob-num js-line-number" data-line-number="1103"></td>
<td id="LC1103" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Run the cell and then apply the residual_fn on its inputs to its outputs.</span></td>
</tr>
<tr>
<td id="L1104" class="blob-num js-line-number" data-line-number="1104"></td>
<td id="LC1104" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1105" class="blob-num js-line-number" data-line-number="1105"></td>
<td id="LC1105" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L1106" class="blob-num js-line-number" data-line-number="1106"></td>
<td id="LC1106" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> inputs: cell inputs.</span></td>
</tr>
<tr>
<td id="L1107" class="blob-num js-line-number" data-line-number="1107"></td>
<td id="LC1107" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state: cell state.</span></td>
</tr>
<tr>
<td id="L1108" class="blob-num js-line-number" data-line-number="1108"></td>
<td id="LC1108" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> scope: optional cell scope.</span></td>
</tr>
<tr>
<td id="L1109" class="blob-num js-line-number" data-line-number="1109"></td>
<td id="LC1109" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1110" class="blob-num js-line-number" data-line-number="1110"></td>
<td id="LC1110" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Returns:</span></td>
</tr>
<tr>
<td id="L1111" class="blob-num js-line-number" data-line-number="1111"></td>
<td id="LC1111" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Tuple of cell outputs and new state.</span></td>
</tr>
<tr>
<td id="L1112" class="blob-num js-line-number" data-line-number="1112"></td>
<td id="LC1112" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1113" class="blob-num js-line-number" data-line-number="1113"></td>
<td id="LC1113" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Raises:</span></td>
</tr>
<tr>
<td id="L1114" class="blob-num js-line-number" data-line-number="1114"></td>
<td id="LC1114" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> TypeError: If cell inputs and outputs have different structure (type).</span></td>
</tr>
<tr>
<td id="L1115" class="blob-num js-line-number" data-line-number="1115"></td>
<td id="LC1115" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> ValueError: If cell inputs and outputs have different structure (value).</span></td>
</tr>
<tr>
<td id="L1116" class="blob-num js-line-number" data-line-number="1116"></td>
<td id="LC1116" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1117" class="blob-num js-line-number" data-line-number="1117"></td>
<td id="LC1117" class="blob-code blob-code-inner js-file-line"> outputs, new_state <span class="pl-k">=</span> <span class="pl-c1">self</span>._cell(inputs, state, <span class="pl-v">scope</span><span class="pl-k">=</span>scope)</td>
</tr>
<tr>
<td id="L1118" class="blob-num js-line-number" data-line-number="1118"></td>
<td id="LC1118" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> Ensure shapes match</span></td>
</tr>
<tr>
<td id="L1119" class="blob-num js-line-number" data-line-number="1119"></td>
<td id="LC1119" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">assert_shape_match</span>(<span class="pl-smi">inp</span>, <span class="pl-smi">out</span>):</td>
</tr>
<tr>
<td id="L1120" class="blob-num js-line-number" data-line-number="1120"></td>
<td id="LC1120" class="blob-code blob-code-inner js-file-line"> inp.get_shape().assert_is_compatible_with(out.get_shape())</td>
</tr>
<tr>
<td id="L1121" class="blob-num js-line-number" data-line-number="1121"></td>
<td id="LC1121" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">default_residual_fn</span>(<span class="pl-smi">inputs</span>, <span class="pl-smi">outputs</span>):</td>
</tr>
<tr>
<td id="L1122" class="blob-num js-line-number" data-line-number="1122"></td>
<td id="LC1122" class="blob-code blob-code-inner js-file-line"> nest.assert_same_structure(inputs, outputs)</td>
</tr>
<tr>
<td id="L1123" class="blob-num js-line-number" data-line-number="1123"></td>
<td id="LC1123" class="blob-code blob-code-inner js-file-line"> nest.map_structure(assert_shape_match, inputs, outputs)</td>
</tr>
<tr>
<td id="L1124" class="blob-num js-line-number" data-line-number="1124"></td>
<td id="LC1124" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> nest.map_structure(<span class="pl-k">lambda</span> <span class="pl-smi">inp</span>, <span class="pl-smi">out</span>: inp <span class="pl-k">+</span> out, inputs, outputs)</td>
</tr>
<tr>
<td id="L1125" class="blob-num js-line-number" data-line-number="1125"></td>
<td id="LC1125" class="blob-code blob-code-inner js-file-line"> res_outputs <span class="pl-k">=</span> (<span class="pl-c1">self</span>._residual_fn <span class="pl-k">or</span> default_residual_fn)(inputs, outputs)</td>
</tr>
<tr>
<td id="L1126" class="blob-num js-line-number" data-line-number="1126"></td>
<td id="LC1126" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> (res_outputs, new_state)</td>
</tr>
<tr>
<td id="L1127" class="blob-num js-line-number" data-line-number="1127"></td>
<td id="LC1127" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1128" class="blob-num js-line-number" data-line-number="1128"></td>
<td id="LC1128" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1129" class="blob-num js-line-number" data-line-number="1129"></td>
<td id="LC1129" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.DeviceWrapper<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L1130" class="blob-num js-line-number" data-line-number="1130"></td>
<td id="LC1130" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">DeviceWrapper</span>(<span class="pl-e">RNNCell</span>):</td>
</tr>
<tr>
<td id="L1131" class="blob-num js-line-number" data-line-number="1131"></td>
<td id="LC1131" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Operator that ensures an RNNCell runs on a particular device.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1132" class="blob-num js-line-number" data-line-number="1132"></td>
<td id="LC1132" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1133" class="blob-num js-line-number" data-line-number="1133"></td>
<td id="LC1133" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__init__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">cell</span>, <span class="pl-smi">device</span>):</td>
</tr>
<tr>
<td id="L1134" class="blob-num js-line-number" data-line-number="1134"></td>
<td id="LC1134" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Construct a `DeviceWrapper` for `cell` with device `device`.</span></td>
</tr>
<tr>
<td id="L1135" class="blob-num js-line-number" data-line-number="1135"></td>
<td id="LC1135" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1136" class="blob-num js-line-number" data-line-number="1136"></td>
<td id="LC1136" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Ensures the wrapped `cell` is called with `tf.device(device)`.</span></td>
</tr>
<tr>
<td id="L1137" class="blob-num js-line-number" data-line-number="1137"></td>
<td id="LC1137" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1138" class="blob-num js-line-number" data-line-number="1138"></td>
<td id="LC1138" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L1139" class="blob-num js-line-number" data-line-number="1139"></td>
<td id="LC1139" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cell: An instance of `RNNCell`.</span></td>
</tr>
<tr>
<td id="L1140" class="blob-num js-line-number" data-line-number="1140"></td>
<td id="LC1140" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> device: A device string or function, for passing to `tf.device`.</span></td>
</tr>
<tr>
<td id="L1141" class="blob-num js-line-number" data-line-number="1141"></td>
<td id="LC1141" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1142" class="blob-num js-line-number" data-line-number="1142"></td>
<td id="LC1142" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._cell <span class="pl-k">=</span> cell</td>
</tr>
<tr>
<td id="L1143" class="blob-num js-line-number" data-line-number="1143"></td>
<td id="LC1143" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._device <span class="pl-k">=</span> device</td>
</tr>
<tr>
<td id="L1144" class="blob-num js-line-number" data-line-number="1144"></td>
<td id="LC1144" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1145" class="blob-num js-line-number" data-line-number="1145"></td>
<td id="LC1145" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L1146" class="blob-num js-line-number" data-line-number="1146"></td>
<td id="LC1146" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L1147" class="blob-num js-line-number" data-line-number="1147"></td>
<td id="LC1147" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell.state_size</td>
</tr>
<tr>
<td id="L1148" class="blob-num js-line-number" data-line-number="1148"></td>
<td id="LC1148" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1149" class="blob-num js-line-number" data-line-number="1149"></td>
<td id="LC1149" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L1150" class="blob-num js-line-number" data-line-number="1150"></td>
<td id="LC1150" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L1151" class="blob-num js-line-number" data-line-number="1151"></td>
<td id="LC1151" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell.output_size</td>
</tr>
<tr>
<td id="L1152" class="blob-num js-line-number" data-line-number="1152"></td>
<td id="LC1152" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1153" class="blob-num js-line-number" data-line-number="1153"></td>
<td id="LC1153" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">zero_state</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">batch_size</span>, <span class="pl-smi">dtype</span>):</td>
</tr>
<tr>
<td id="L1154" class="blob-num js-line-number" data-line-number="1154"></td>
<td id="LC1154" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> ops.name_scope(<span class="pl-c1">type</span>(<span class="pl-c1">self</span>).<span class="pl-c1">__name__</span> <span class="pl-k">+</span> <span class="pl-s"><span class="pl-pds">"</span>ZeroState<span class="pl-pds">"</span></span>, <span class="pl-v">values</span><span class="pl-k">=</span>[batch_size]):</td>
</tr>
<tr>
<td id="L1155" class="blob-num js-line-number" data-line-number="1155"></td>
<td id="LC1155" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> ops.device(<span class="pl-c1">self</span>._device):</td>
</tr>
<tr>
<td id="L1156" class="blob-num js-line-number" data-line-number="1156"></td>
<td id="LC1156" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell.zero_state(batch_size, dtype)</td>
</tr>
<tr>
<td id="L1157" class="blob-num js-line-number" data-line-number="1157"></td>
<td id="LC1157" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1158" class="blob-num js-line-number" data-line-number="1158"></td>
<td id="LC1158" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__call__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>, <span class="pl-smi">scope</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L1159" class="blob-num js-line-number" data-line-number="1159"></td>
<td id="LC1159" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Run the cell on specified device.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1160" class="blob-num js-line-number" data-line-number="1160"></td>
<td id="LC1160" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> ops.device(<span class="pl-c1">self</span>._device):</td>
</tr>
<tr>
<td id="L1161" class="blob-num js-line-number" data-line-number="1161"></td>
<td id="LC1161" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cell(inputs, state, <span class="pl-v">scope</span><span class="pl-k">=</span>scope)</td>
</tr>
<tr>
<td id="L1162" class="blob-num js-line-number" data-line-number="1162"></td>
<td id="LC1162" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1163" class="blob-num js-line-number" data-line-number="1163"></td>
<td id="LC1163" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1164" class="blob-num js-line-number" data-line-number="1164"></td>
<td id="LC1164" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@tf_export</span>(<span class="pl-s"><span class="pl-pds">"</span>nn.rnn_cell.MultiRNNCell<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L1165" class="blob-num js-line-number" data-line-number="1165"></td>
<td id="LC1165" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">MultiRNNCell</span>(<span class="pl-e">RNNCell</span>):</td>
</tr>
<tr>
<td id="L1166" class="blob-num js-line-number" data-line-number="1166"></td>
<td id="LC1166" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>RNN cell composed sequentially of multiple simple cells.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1167" class="blob-num js-line-number" data-line-number="1167"></td>
<td id="LC1167" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1168" class="blob-num js-line-number" data-line-number="1168"></td>
<td id="LC1168" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__init__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">cells</span>, <span class="pl-smi">state_is_tuple</span><span class="pl-k">=</span><span class="pl-c1">True</span>):</td>
</tr>
<tr>
<td id="L1169" class="blob-num js-line-number" data-line-number="1169"></td>
<td id="LC1169" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Create a RNN cell composed sequentially of a number of RNNCells.</span></td>
</tr>
<tr>
<td id="L1170" class="blob-num js-line-number" data-line-number="1170"></td>
<td id="LC1170" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1171" class="blob-num js-line-number" data-line-number="1171"></td>
<td id="LC1171" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L1172" class="blob-num js-line-number" data-line-number="1172"></td>
<td id="LC1172" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cells: list of RNNCells that will be composed in this order.</span></td>
</tr>
<tr>
<td id="L1173" class="blob-num js-line-number" data-line-number="1173"></td>
<td id="LC1173" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state_is_tuple: If True, accepted and returned states are n-tuples, where</span></td>
</tr>
<tr>
<td id="L1174" class="blob-num js-line-number" data-line-number="1174"></td>
<td id="LC1174" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> `n = len(cells)`. If False, the states are all</span></td>
</tr>
<tr>
<td id="L1175" class="blob-num js-line-number" data-line-number="1175"></td>
<td id="LC1175" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> concatenated along the column axis. This latter behavior will soon be</span></td>
</tr>
<tr>
<td id="L1176" class="blob-num js-line-number" data-line-number="1176"></td>
<td id="LC1176" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> deprecated.</span></td>
</tr>
<tr>
<td id="L1177" class="blob-num js-line-number" data-line-number="1177"></td>
<td id="LC1177" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1178" class="blob-num js-line-number" data-line-number="1178"></td>
<td id="LC1178" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Raises:</span></td>
</tr>
<tr>
<td id="L1179" class="blob-num js-line-number" data-line-number="1179"></td>
<td id="LC1179" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> ValueError: if cells is empty (not allowed), or at least one of the cells</span></td>
</tr>
<tr>
<td id="L1180" class="blob-num js-line-number" data-line-number="1180"></td>
<td id="LC1180" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> returns a state tuple but the flag `state_is_tuple` is `False`.</span></td>
</tr>
<tr>
<td id="L1181" class="blob-num js-line-number" data-line-number="1181"></td>
<td id="LC1181" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1182" class="blob-num js-line-number" data-line-number="1182"></td>
<td id="LC1182" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">super</span>(MultiRNNCell, <span class="pl-c1">self</span>).<span class="pl-c1">__init__</span>()</td>
</tr>
<tr>
<td id="L1183" class="blob-num js-line-number" data-line-number="1183"></td>
<td id="LC1183" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> cells:</td>
</tr>
<tr>
<td id="L1184" class="blob-num js-line-number" data-line-number="1184"></td>
<td id="LC1184" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Must specify at least one cell for MultiRNNCell.<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L1185" class="blob-num js-line-number" data-line-number="1185"></td>
<td id="LC1185" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> nest.is_sequence(cells):</td>
</tr>
<tr>
<td id="L1186" class="blob-num js-line-number" data-line-number="1186"></td>
<td id="LC1186" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">TypeError</span>(</td>
</tr>
<tr>
<td id="L1187" class="blob-num js-line-number" data-line-number="1187"></td>
<td id="LC1187" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>cells must be a list or tuple, but saw: <span class="pl-c1">%s</span>.<span class="pl-pds">"</span></span> <span class="pl-k">%</span> cells)</td>
</tr>
<tr>
<td id="L1188" class="blob-num js-line-number" data-line-number="1188"></td>
<td id="LC1188" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1189" class="blob-num js-line-number" data-line-number="1189"></td>
<td id="LC1189" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._cells <span class="pl-k">=</span> cells</td>
</tr>
<tr>
<td id="L1190" class="blob-num js-line-number" data-line-number="1190"></td>
<td id="LC1190" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._state_is_tuple <span class="pl-k">=</span> state_is_tuple</td>
</tr>
<tr>
<td id="L1191" class="blob-num js-line-number" data-line-number="1191"></td>
<td id="LC1191" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> state_is_tuple:</td>
</tr>
<tr>
<td id="L1192" class="blob-num js-line-number" data-line-number="1192"></td>
<td id="LC1192" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">any</span>(nest.is_sequence(c.state_size) <span class="pl-k">for</span> c <span class="pl-k">in</span> <span class="pl-c1">self</span>._cells):</td>
</tr>
<tr>
<td id="L1193" class="blob-num js-line-number" data-line-number="1193"></td>
<td id="LC1193" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Some cells return tuples of states, but the flag <span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L1194" class="blob-num js-line-number" data-line-number="1194"></td>
<td id="LC1194" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>state_is_tuple is not set. State sizes are: <span class="pl-c1">%s</span><span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L1195" class="blob-num js-line-number" data-line-number="1195"></td>
<td id="LC1195" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">%</span> <span class="pl-c1">str</span>([c.state_size <span class="pl-k">for</span> c <span class="pl-k">in</span> <span class="pl-c1">self</span>._cells]))</td>
</tr>
<tr>
<td id="L1196" class="blob-num js-line-number" data-line-number="1196"></td>
<td id="LC1196" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1197" class="blob-num js-line-number" data-line-number="1197"></td>
<td id="LC1197" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L1198" class="blob-num js-line-number" data-line-number="1198"></td>
<td id="LC1198" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L1199" class="blob-num js-line-number" data-line-number="1199"></td>
<td id="LC1199" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_is_tuple:</td>
</tr>
<tr>
<td id="L1200" class="blob-num js-line-number" data-line-number="1200"></td>
<td id="LC1200" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">tuple</span>(cell.state_size <span class="pl-k">for</span> cell <span class="pl-k">in</span> <span class="pl-c1">self</span>._cells)</td>
</tr>
<tr>
<td id="L1201" class="blob-num js-line-number" data-line-number="1201"></td>
<td id="LC1201" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L1202" class="blob-num js-line-number" data-line-number="1202"></td>
<td id="LC1202" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">sum</span>([cell.state_size <span class="pl-k">for</span> cell <span class="pl-k">in</span> <span class="pl-c1">self</span>._cells])</td>
</tr>
<tr>
<td id="L1203" class="blob-num js-line-number" data-line-number="1203"></td>
<td id="LC1203" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1204" class="blob-num js-line-number" data-line-number="1204"></td>
<td id="LC1204" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L1205" class="blob-num js-line-number" data-line-number="1205"></td>
<td id="LC1205" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L1206" class="blob-num js-line-number" data-line-number="1206"></td>
<td id="LC1206" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._cells[<span class="pl-k">-</span><span class="pl-c1">1</span>].output_size</td>
</tr>
<tr>
<td id="L1207" class="blob-num js-line-number" data-line-number="1207"></td>
<td id="LC1207" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1208" class="blob-num js-line-number" data-line-number="1208"></td>
<td id="LC1208" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">zero_state</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">batch_size</span>, <span class="pl-smi">dtype</span>):</td>
</tr>
<tr>
<td id="L1209" class="blob-num js-line-number" data-line-number="1209"></td>
<td id="LC1209" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> ops.name_scope(<span class="pl-c1">type</span>(<span class="pl-c1">self</span>).<span class="pl-c1">__name__</span> <span class="pl-k">+</span> <span class="pl-s"><span class="pl-pds">"</span>ZeroState<span class="pl-pds">"</span></span>, <span class="pl-v">values</span><span class="pl-k">=</span>[batch_size]):</td>
</tr>
<tr>
<td id="L1210" class="blob-num js-line-number" data-line-number="1210"></td>
<td id="LC1210" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_is_tuple:</td>
</tr>
<tr>
<td id="L1211" class="blob-num js-line-number" data-line-number="1211"></td>
<td id="LC1211" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">tuple</span>(cell.zero_state(batch_size, dtype) <span class="pl-k">for</span> cell <span class="pl-k">in</span> <span class="pl-c1">self</span>._cells)</td>
</tr>
<tr>
<td id="L1212" class="blob-num js-line-number" data-line-number="1212"></td>
<td id="LC1212" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L1213" class="blob-num js-line-number" data-line-number="1213"></td>
<td id="LC1213" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> We know here that state_size of each cell is not a tuple and</span></td>
</tr>
<tr>
<td id="L1214" class="blob-num js-line-number" data-line-number="1214"></td>
<td id="LC1214" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"><span class="pl-c">#</span> presumably does not contain TensorArrays or anything else fancy</span></td>
</tr>
<tr>
<td id="L1215" class="blob-num js-line-number" data-line-number="1215"></td>
<td id="LC1215" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">super</span>(MultiRNNCell, <span class="pl-c1">self</span>).zero_state(batch_size, dtype)</td>
</tr>
<tr>
<td id="L1216" class="blob-num js-line-number" data-line-number="1216"></td>
<td id="LC1216" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1217" class="blob-num js-line-number" data-line-number="1217"></td>
<td id="LC1217" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">call</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>):</td>
</tr>
<tr>
<td id="L1218" class="blob-num js-line-number" data-line-number="1218"></td>
<td id="LC1218" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Run this multi-layer cell on inputs, starting from state.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1219" class="blob-num js-line-number" data-line-number="1219"></td>
<td id="LC1219" class="blob-code blob-code-inner js-file-line"> cur_state_pos <span class="pl-k">=</span> <span class="pl-c1">0</span></td>
</tr>
<tr>
<td id="L1220" class="blob-num js-line-number" data-line-number="1220"></td>
<td id="LC1220" class="blob-code blob-code-inner js-file-line"> cur_inp <span class="pl-k">=</span> inputs</td>
</tr>
<tr>
<td id="L1221" class="blob-num js-line-number" data-line-number="1221"></td>
<td id="LC1221" class="blob-code blob-code-inner js-file-line"> new_states <span class="pl-k">=</span> []</td>
</tr>
<tr>
<td id="L1222" class="blob-num js-line-number" data-line-number="1222"></td>
<td id="LC1222" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> i, cell <span class="pl-k">in</span> <span class="pl-c1">enumerate</span>(<span class="pl-c1">self</span>._cells):</td>
</tr>
<tr>
<td id="L1223" class="blob-num js-line-number" data-line-number="1223"></td>
<td id="LC1223" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> vs.variable_scope(<span class="pl-s"><span class="pl-pds">"</span>cell_<span class="pl-c1">%d</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span> i):</td>
</tr>
<tr>
<td id="L1224" class="blob-num js-line-number" data-line-number="1224"></td>
<td id="LC1224" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_is_tuple:</td>
</tr>
<tr>
<td id="L1225" class="blob-num js-line-number" data-line-number="1225"></td>
<td id="LC1225" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> nest.is_sequence(state):</td>
</tr>
<tr>
<td id="L1226" class="blob-num js-line-number" data-line-number="1226"></td>
<td id="LC1226" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(</td>
</tr>
<tr>
<td id="L1227" class="blob-num js-line-number" data-line-number="1227"></td>
<td id="LC1227" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"</span>Expected state to be a tuple of length <span class="pl-c1">%d</span>, but received: <span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span></td>
</tr>
<tr>
<td id="L1228" class="blob-num js-line-number" data-line-number="1228"></td>
<td id="LC1228" class="blob-code blob-code-inner js-file-line"> (<span class="pl-c1">len</span>(<span class="pl-c1">self</span>.state_size), state))</td>
</tr>
<tr>
<td id="L1229" class="blob-num js-line-number" data-line-number="1229"></td>
<td id="LC1229" class="blob-code blob-code-inner js-file-line"> cur_state <span class="pl-k">=</span> state[i]</td>
</tr>
<tr>
<td id="L1230" class="blob-num js-line-number" data-line-number="1230"></td>
<td id="LC1230" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L1231" class="blob-num js-line-number" data-line-number="1231"></td>
<td id="LC1231" class="blob-code blob-code-inner js-file-line"> cur_state <span class="pl-k">=</span> array_ops.slice(state, [<span class="pl-c1">0</span>, cur_state_pos],</td>
</tr>
<tr>
<td id="L1232" class="blob-num js-line-number" data-line-number="1232"></td>
<td id="LC1232" class="blob-code blob-code-inner js-file-line"> [<span class="pl-k">-</span><span class="pl-c1">1</span>, cell.state_size])</td>
</tr>
<tr>
<td id="L1233" class="blob-num js-line-number" data-line-number="1233"></td>
<td id="LC1233" class="blob-code blob-code-inner js-file-line"> cur_state_pos <span class="pl-k">+=</span> cell.state_size</td>
</tr>
<tr>
<td id="L1234" class="blob-num js-line-number" data-line-number="1234"></td>
<td id="LC1234" class="blob-code blob-code-inner js-file-line"> cur_inp, new_state <span class="pl-k">=</span> cell(cur_inp, cur_state)</td>
</tr>
<tr>
<td id="L1235" class="blob-num js-line-number" data-line-number="1235"></td>
<td id="LC1235" class="blob-code blob-code-inner js-file-line"> new_states.append(new_state)</td>
</tr>
<tr>
<td id="L1236" class="blob-num js-line-number" data-line-number="1236"></td>
<td id="LC1236" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1237" class="blob-num js-line-number" data-line-number="1237"></td>
<td id="LC1237" class="blob-code blob-code-inner js-file-line"> new_states <span class="pl-k">=</span> (<span class="pl-c1">tuple</span>(new_states) <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_is_tuple <span class="pl-k">else</span></td>
</tr>
<tr>
<td id="L1238" class="blob-num js-line-number" data-line-number="1238"></td>
<td id="LC1238" class="blob-code blob-code-inner js-file-line"> array_ops.concat(new_states, <span class="pl-c1">1</span>))</td>
</tr>
<tr>
<td id="L1239" class="blob-num js-line-number" data-line-number="1239"></td>
<td id="LC1239" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1240" class="blob-num js-line-number" data-line-number="1240"></td>
<td id="LC1240" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> cur_inp, new_states</td>
</tr>
<tr>
<td id="L1241" class="blob-num js-line-number" data-line-number="1241"></td>
<td id="LC1241" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1242" class="blob-num js-line-number" data-line-number="1242"></td>
<td id="LC1242" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1243" class="blob-num js-line-number" data-line-number="1243"></td>
<td id="LC1243" class="blob-code blob-code-inner js-file-line"><span class="pl-k">class</span> <span class="pl-en">_SlimRNNCell</span>(<span class="pl-e">RNNCell</span>):</td>
</tr>
<tr>
<td id="L1244" class="blob-num js-line-number" data-line-number="1244"></td>
<td id="LC1244" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>A simple wrapper for slim.rnn_cells.<span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1245" class="blob-num js-line-number" data-line-number="1245"></td>
<td id="LC1245" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1246" class="blob-num js-line-number" data-line-number="1246"></td>
<td id="LC1246" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__init__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">cell_fn</span>):</td>
</tr>
<tr>
<td id="L1247" class="blob-num js-line-number" data-line-number="1247"></td>
<td id="LC1247" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">"""</span>Create a SlimRNNCell from a cell_fn.</span></td>
</tr>
<tr>
<td id="L1248" class="blob-num js-line-number" data-line-number="1248"></td>
<td id="LC1248" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1249" class="blob-num js-line-number" data-line-number="1249"></td>
<td id="LC1249" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Args:</span></td>
</tr>
<tr>
<td id="L1250" class="blob-num js-line-number" data-line-number="1250"></td>
<td id="LC1250" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> cell_fn: a function which takes (inputs, state, scope) and produces the</span></td>
</tr>
<tr>
<td id="L1251" class="blob-num js-line-number" data-line-number="1251"></td>
<td id="LC1251" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> outputs and the new_state. Additionally when called with inputs=None and</span></td>
</tr>
<tr>
<td id="L1252" class="blob-num js-line-number" data-line-number="1252"></td>
<td id="LC1252" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> state=None it should return (initial_outputs, initial_state).</span></td>
</tr>
<tr>
<td id="L1253" class="blob-num js-line-number" data-line-number="1253"></td>
<td id="LC1253" class="blob-code blob-code-inner js-file-line"><span class="pl-s"></span></td>
</tr>
<tr>
<td id="L1254" class="blob-num js-line-number" data-line-number="1254"></td>
<td id="LC1254" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> Raises:</span></td>
</tr>
<tr>
<td id="L1255" class="blob-num js-line-number" data-line-number="1255"></td>
<td id="LC1255" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> TypeError: if cell_fn is not callable</span></td>
</tr>
<tr>
<td id="L1256" class="blob-num js-line-number" data-line-number="1256"></td>
<td id="LC1256" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> ValueError: if cell_fn cannot produce a valid initial state.</span></td>
</tr>
<tr>
<td id="L1257" class="blob-num js-line-number" data-line-number="1257"></td>
<td id="LC1257" class="blob-code blob-code-inner js-file-line"><span class="pl-s"> <span class="pl-pds">"""</span></span></td>
</tr>
<tr>
<td id="L1258" class="blob-num js-line-number" data-line-number="1258"></td>
<td id="LC1258" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-k">not</span> <span class="pl-c1">callable</span>(cell_fn):</td>
</tr>
<tr>
<td id="L1259" class="blob-num js-line-number" data-line-number="1259"></td>
<td id="LC1259" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">TypeError</span>(<span class="pl-s"><span class="pl-pds">"</span>cell_fn <span class="pl-c1">%s</span> needs to be callable<span class="pl-pds">"</span></span>, cell_fn)</td>
</tr>
<tr>
<td id="L1260" class="blob-num js-line-number" data-line-number="1260"></td>
<td id="LC1260" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._cell_fn <span class="pl-k">=</span> cell_fn</td>
</tr>
<tr>
<td id="L1261" class="blob-num js-line-number" data-line-number="1261"></td>
<td id="LC1261" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._cell_name <span class="pl-k">=</span> cell_fn.func.<span class="pl-c1">__name__</span></td>
</tr>
<tr>
<td id="L1262" class="blob-num js-line-number" data-line-number="1262"></td>
<td id="LC1262" class="blob-code blob-code-inner js-file-line"> init_output, init_state <span class="pl-k">=</span> <span class="pl-c1">self</span>._cell_fn(<span class="pl-c1">None</span>, <span class="pl-c1">None</span>)</td>
</tr>
<tr>
<td id="L1263" class="blob-num js-line-number" data-line-number="1263"></td>
<td id="LC1263" class="blob-code blob-code-inner js-file-line"> output_shape <span class="pl-k">=</span> init_output.get_shape()</td>
</tr>
<tr>
<td id="L1264" class="blob-num js-line-number" data-line-number="1264"></td>
<td id="LC1264" class="blob-code blob-code-inner js-file-line"> state_shape <span class="pl-k">=</span> init_state.get_shape()</td>
</tr>
<tr>
<td id="L1265" class="blob-num js-line-number" data-line-number="1265"></td>
<td id="LC1265" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._output_size <span class="pl-k">=</span> output_shape.with_rank(<span class="pl-c1">2</span>)[<span class="pl-c1">1</span>].value</td>
</tr>
<tr>
<td id="L1266" class="blob-num js-line-number" data-line-number="1266"></td>
<td id="LC1266" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">self</span>._state_size <span class="pl-k">=</span> state_shape.with_rank(<span class="pl-c1">2</span>)[<span class="pl-c1">1</span>].value</td>
</tr>
<tr>
<td id="L1267" class="blob-num js-line-number" data-line-number="1267"></td>
<td id="LC1267" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._output_size <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L1268" class="blob-num js-line-number" data-line-number="1268"></td>
<td id="LC1268" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Initial output created by <span class="pl-c1">%s</span> has invalid shape <span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span></td>
</tr>
<tr>
<td id="L1269" class="blob-num js-line-number" data-line-number="1269"></td>
<td id="LC1269" class="blob-code blob-code-inner js-file-line"> (<span class="pl-c1">self</span>._cell_name, output_shape))</td>
</tr>
<tr>
<td id="L1270" class="blob-num js-line-number" data-line-number="1270"></td>
<td id="LC1270" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">self</span>._state_size <span class="pl-k">is</span> <span class="pl-c1">None</span>:</td>
</tr>
<tr>
<td id="L1271" class="blob-num js-line-number" data-line-number="1271"></td>
<td id="LC1271" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">raise</span> <span class="pl-c1">ValueError</span>(<span class="pl-s"><span class="pl-pds">"</span>Initial state created by <span class="pl-c1">%s</span> has invalid shape <span class="pl-c1">%s</span><span class="pl-pds">"</span></span> <span class="pl-k">%</span></td>
</tr>
<tr>
<td id="L1272" class="blob-num js-line-number" data-line-number="1272"></td>
<td id="LC1272" class="blob-code blob-code-inner js-file-line"> (<span class="pl-c1">self</span>._cell_name, state_shape))</td>
</tr>
<tr>
<td id="L1273" class="blob-num js-line-number" data-line-number="1273"></td>
<td id="LC1273" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1274" class="blob-num js-line-number" data-line-number="1274"></td>
<td id="LC1274" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L1275" class="blob-num js-line-number" data-line-number="1275"></td>
<td id="LC1275" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">state_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L1276" class="blob-num js-line-number" data-line-number="1276"></td>
<td id="LC1276" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._state_size</td>
</tr>
<tr>
<td id="L1277" class="blob-num js-line-number" data-line-number="1277"></td>
<td id="LC1277" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1278" class="blob-num js-line-number" data-line-number="1278"></td>
<td id="LC1278" class="blob-code blob-code-inner js-file-line"> <span class="pl-en">@</span><span class="pl-c1">property</span></td>
</tr>
<tr>
<td id="L1279" class="blob-num js-line-number" data-line-number="1279"></td>
<td id="LC1279" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-en">output_size</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>):</td>
</tr>
<tr>
<td id="L1280" class="blob-num js-line-number" data-line-number="1280"></td>
<td id="LC1280" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">self</span>._output_size</td>
</tr>
<tr>
<td id="L1281" class="blob-num js-line-number" data-line-number="1281"></td>
<td id="LC1281" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L1282" class="blob-num js-line-number" data-line-number="1282"></td>
<td id="LC1282" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">def</span> <span class="pl-c1">__call__</span>(<span class="pl-smi"><span class="pl-smi">self</span></span>, <span class="pl-smi">inputs</span>, <span class="pl-smi">state</span>, <span class="pl-smi">scope</span><span class="pl-k">=</span><span class="pl-c1">None</span>):</td>
</tr>
<tr>
<td id="L1283" class="blob-num js-line-number" data-line-number="1283"></td>
<td id="LC1283" class="blob-code blob-code-inner js-file-line"> scope <span class="pl-k">=</span> scope <span class="pl-k">or</span> <span class="pl-c1">self</span>._cell_name</td>
</tr>
<tr>
<td id="L1284" class="blob-num js-line-number" data-line-number="1284"></td>
<td id="LC1284" class="blob-code blob-code-inner js-file-line"> output, state <span class="pl-k">=</span> <span class="pl-c1">self</span>._cell_fn(inputs, state, <span class="pl-v">scope</span><span class="pl-k">=</span>scope)</td>
</tr>
<tr>
<td id="L1285" class="blob-num js-line-number" data-line-number="1285"></td>
<td id="LC1285" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> output, state</td>
</tr>
</table>
<div class="BlobToolbar position-absolute js-file-line-actions dropdown js-menu-container js-select-menu d-none" aria-hidden="true">
<button class="btn-octicon ml-0 px-2 p-0 bg-white border border-gray-dark rounded-1 dropdown-toggle js-menu-target" id="js-file-line-action-button" type="button" aria-expanded="false" aria-haspopup="true" aria-label="Inline file action toolbar" aria-controls="inline-file-actions">
<svg class="octicon octicon-kebab-horizontal" viewBox="0 0 13 16" version="1.1" width="13" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.5 9a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3z"/></svg>
</button>
<div class="dropdown-menu-content js-menu-content" id="inline-file-actions">
<ul class="BlobToolbar-dropdown dropdown-menu dropdown-menu-se mt-2">
<li><clipboard-copy class="dropdown-item" style="cursor:pointer;" id="js-copy-lines" data-original-text="Copy lines">Copy lines</clipboard-copy></li>
<li><clipboard-copy class="dropdown-item" id="js-copy-permalink" style="cursor:pointer;" data-original-text="Copy permalink">Copy permalink</clipboard-copy></li>
<li><a class="dropdown-item js-update-url-with-hash" id="js-view-git-blame" href="/tensorflow/tensorflow/blame/cbc658095ae228f2f557af47e4901d552573aa15/tensorflow/python/ops/rnn_cell_impl.py">View git blame</a></li>
<li><a class="dropdown-item" id="js-new-issue" href="/tensorflow/tensorflow/issues/new">Open new issue</a></li>
</ul>
</div>
</div>
</div>
</div>
<button type="button" data-facebox="#jump-to-line" data-facebox-class="linejump" data-hotkey="l" class="d-none">Jump to Line</button>
<div id="jump-to-line" style="display:none">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-jump-to-line-form" action="" accept-charset="UTF-8" method="get"><input name="utf8" type="hidden" value="✓" />
<input class="form-control linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
<button type="submit" class="btn">Go</button>
</form> </div>
</div>
<div class="modal-backdrop js-touch-events"></div>
</div>
</div>
</div>
</div>
<div class="footer container-lg px-3" role="contentinfo">
<div class="position-relative d-flex flex-justify-between py-6 mt-6 f6 text-gray border-top border-gray-light ">
<ul class="list-style-none d-flex flex-wrap ">
<li class="mr-3">© 2018 <span title="0.23060s from unicorn-1000010005-crk39">GitHub</span>, Inc.</li>
<li class="mr-3"><a href="https://help.github.com/articles/github-terms-of-service/" data-ga-click="Footer, go to terms, text:terms">Terms</a></li>
<li class="mr-3"><a href="https://github.com/site/privacy" data-ga-click="Footer, go to privacy, text:privacy">Privacy</a></li>
<li class="mr-3"><a href="https://help.github.com/articles/github-security/" data-ga-click="Footer, go to security, text:security">Security</a></li>
<li class="mr-3"><a href="https://status.github.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
<li><a data-ga-click="Footer, go to help, text:help" href="https://help.github.com">Help</a></li>
</ul>
<a aria-label="Homepage" title="GitHub" class="footer-octicon" href="https://github.com">
<svg height="24" class="octicon octicon-mark-github" viewBox="0 0 16 16" version="1.1" width="24" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/></svg>
</a>
<ul class="list-style-none d-flex flex-wrap ">
<li class="mr-3"><a data-ga-click="Footer, go to contact, text:contact" href="https://github.com/contact">Contact GitHub</a></li>
<li class="mr-3"><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
<li class="mr-3"><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
<li class="mr-3"><a href="https://shop.github.com" data-ga-click="Footer, go to shop, text:shop">Shop</a></li>
<li class="mr-3"><a data-ga-click="Footer, go to blog, text:blog" href="https://github.com/blog">Blog</a></li>
<li><a data-ga-click="Footer, go to about, text:about" href="https://github.com/about">About</a></li>
</ul>
</div>
</div>
<div id="ajax-error-message" class="ajax-error-message flash flash-error">
<svg class="octicon octicon-alert" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"/></svg>
<button type="button" class="flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
<svg class="octicon octicon-x" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
</button>
You can't perform that action at this time.
</div>
<script crossorigin="anonymous" type="application/javascript" src="https://assets-cdn.github.com/assets/compat-af4f19572e1d43f69746f70aa5f01dc9.js"></script>
<script crossorigin="anonymous" type="application/javascript" src="https://assets-cdn.github.com/assets/frameworks-dd22b45b9495a43602787a69962f696c.js"></script>
<script crossorigin="anonymous" async="async" type="application/javascript" src="https://assets-cdn.github.com/assets/github-48130667aaece5c4ce8ee28ecdb29539.js"></script>
<div class="js-stale-session-flash stale-session-flash flash flash-warn flash-banner d-none">
<svg class="octicon octicon-alert" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"/></svg>
<span class="signed-in-tab-flash">You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
<span class="signed-out-tab-flash">You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
</div>
<div class="facebox" id="facebox" style="display:none;">
<div class="facebox-popup">
<div class="facebox-content" role="dialog" aria-labelledby="facebox-header" aria-describedby="facebox-description">
</div>
<button type="button" class="facebox-close js-facebox-close" aria-label="Close modal">
<svg class="octicon octicon-x" viewBox="0 0 12 16" version="1.1" width="12" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
</button>
</div>
</div>
</body>
</html>
|
import os
import re
import codecs
from utils import create_dico, create_mapping, zero_digits
from utils import iob2, iob_iobes
def load_sentences(path, lower, zeros):
"""
Load sentences. A line must contain at least a word and its tag.
Sentences are separated by empty lines.
"""
sentences = []
sentence = []
for line in codecs.open(path, 'r', 'utf8'):
line = zero_digits(line.rstrip()) if zeros else line.rstrip()
if not line:
if len(sentence) > 0:
if 'DOCSTART' not in sentence[0][0]:
sentences.append(sentence)
sentence = []
else:
word = line.split()
assert len(word) >= 2
sentence.append(word)
if len(sentence) > 0:
if 'DOCSTART' not in sentence[0][0]:
sentences.append(sentence)
return sentences
def update_tag_scheme(sentences, tag_scheme):
"""
Check and update sentences tagging scheme to IOB2.
Only IOB1 and IOB2 schemes are accepted.
"""
for i, s in enumerate(sentences):
tags = [w[-1] for w in s]
# Check that tags are given in the IOB format
if not iob2(tags):
s_str = '\n'.join(' '.join(w) for w in s)
raise Exception('Sentences should be given in IOB format! ' +
'Please check sentence %i:\n%s' % (i, s_str))
if tag_scheme == 'iob':
# If format was IOB1, we convert to IOB2
for word, new_tag in zip(s, tags):
word[-1] = new_tag
elif tag_scheme == 'iobes':
new_tags = iob_iobes(tags)
for word, new_tag in zip(s, new_tags):
word[-1] = new_tag
else:
raise Exception('Unknown tagging scheme!')
def word_mapping(sentences, lower):
"""
Create a dictionary and a mapping of words, sorted by frequency.
"""
words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]
dico = create_dico(words)
dico['<UNK>'] = 10000000
word_to_id, id_to_word = create_mapping(dico)
print "Found %i unique words (%i in total)" % (
len(dico), sum(len(x) for x in words)
)
return dico, word_to_id, id_to_word
def char_mapping(sentences):
"""
Create a dictionary and mapping of characters, sorted by frequency.
"""
chars = ["".join([w[0] for w in s]) for s in sentences]
dico = create_dico(chars)
char_to_id, id_to_char = create_mapping(dico)
print "Found %i unique characters" % len(dico)
return dico, char_to_id, id_to_char
def tag_mapping(sentences):
"""
Create a dictionary and a mapping of tags, sorted by frequency.
"""
tags = [[word[-1] for word in s] for s in sentences]
dico = create_dico(tags)
tag_to_id, id_to_tag = create_mapping(dico)
print "Found %i unique named entity tags" % len(dico)
return dico, tag_to_id, id_to_tag
def cap_feature(s):
"""
Capitalization feature:
0 = low caps
1 = all caps
2 = first letter caps
3 = one capital (not first letter)
"""
if s.lower() == s:
return 0
elif s.upper() == s:
return 1
elif s[0].upper() == s[0]:
return 2
else:
return 3
def prepare_dataset(sentences, word_to_id, char_to_id, tag_to_id, lower=False):
"""
Prepare the dataset. Return a list of lists of dictionaries containing:
- word indexes
- word char indexes
- tag indexes
"""
def f(x): return x.lower() if lower else x
data = []
for s in sentences:
str_words = [w[0] for w in s]
words = [word_to_id[f(w) if f(w) in word_to_id else '<UNK>']
for w in str_words]
# Skip characters that are not in the training set
chars = [[char_to_id[c] for c in w if c in char_to_id]
for w in str_words]
caps = [cap_feature(w) for w in str_words]
tags = [tag_to_id[w[-1]] for w in s]
data.append({
'str_words': str_words,
'words': words,
'chars': chars,
'caps': caps,
'tags': tags,
})
return data
def augment_with_pretrained(dictionary, ext_emb_path, words):
"""
Augment the dictionary with words that have a pretrained embedding.
If `words` is None, we add every word that has a pretrained embedding
to the dictionary, otherwise, we only add the words that are given by
`words` (typically the words in the development and test sets.)
"""
print 'Loading pretrained embeddings from %s...' % ext_emb_path
assert os.path.isfile(ext_emb_path)
# Load pretrained embeddings from file
pretrained = set([
line.rstrip().split()[0].strip()
for line in codecs.open(ext_emb_path, 'r', 'utf-8')
if len(ext_emb_path) > 0
])
# We either add every word in the pretrained file,
# or only words given in the `words` list to which
# we can assign a pretrained embedding
if words is None:
for word in pretrained:
if word not in dictionary:
dictionary[word] = 0
else:
for word in words:
if any(x in pretrained for x in [
word,
word.lower(),
re.sub('\d', '0', word.lower())
]) and word not in dictionary:
dictionary[word] = 0
word_to_id, id_to_word = create_mapping(dictionary)
return dictionary, word_to_id, id_to_word
|
import numpy as np
from vispy.scene.visuals import Compound, Line, Mesh, Text
from vispy.visuals.transforms import STTransform
from ...layers.shapes._shapes_utils import triangulate_ellipse
from ...utils.colormaps.standardize_color import transform_color
from ...utils.theme import get_theme
from ...utils.translations import trans
def make_dashed_line(num_dashes, axis):
"""Make a dashed line.
Parameters
----------
num_dashes : int
Number of dashes in the line.
axis : int
Axis which is dashed.
Returns
-------
np.ndarray
Dashed line, of shape (num_dashes, 3) with zeros in
the non dashed axes and line segments in the dashed
axis.
"""
dashes = np.linspace(0, 1, num_dashes * 2)
dashed_line_ends = np.concatenate(
[[dashes[2 * i], dashes[2 * i + 1]] for i in range(num_dashes)], axis=0
)
dashed_line = np.zeros((2 * num_dashes, 3))
dashed_line[:, axis] = np.array(dashed_line_ends)
return dashed_line
def make_arrow_head(num_segments, axis):
"""Make an arrowhead line.
Parameters
----------
num_segments : int
Number of segments in the arrowhead.
axis
Arrowhead direction.
Returns
-------
np.ndarray, np.ndarray
Vertices and faces of the arrowhead.
"""
corners = np.array([[-1, -1], [-1, 1], [1, 1], [1, -1]]) * 0.1
vertices, faces = triangulate_ellipse(corners, num_segments)
full_vertices = np.zeros((num_segments + 1, 3))
inds = list(range(3))
inds.pop(axis)
full_vertices[:, inds] = vertices
full_vertices[:, axis] = 0.9
full_vertices[0, axis] = 1.02
return full_vertices, faces
def color_lines(colors):
if len(colors) == 2:
return np.concatenate(
[[colors[0]] * 2, [colors[1]] * 2],
axis=0,
)
elif len(colors) == 3:
return np.concatenate(
[[colors[0]] * 2, [colors[1]] * 2, [colors[2]] * 2],
axis=0,
)
else:
return ValueError(
trans._(
'Either 2 or 3 colors must be provided, got {number}.',
deferred=True,
number=len(colors),
)
)
def color_dashed_lines(colors):
if len(colors) == 2:
return np.concatenate(
[[colors[0]] * 2, [colors[1]] * 4 * 2],
axis=0,
)
elif len(colors) == 3:
return np.concatenate(
[[colors[0]] * 2, [colors[1]] * 4 * 2, [colors[2]] * 8 * 2],
axis=0,
)
else:
return ValueError(
trans._(
'Either 2 or 3 colors must be provided, got {number}.',
deferred=True,
number=len(colors),
)
)
def color_arrowheads(colors, num_segments):
if len(colors) == 2:
return np.concatenate(
[[colors[0]] * num_segments, [colors[1]] * num_segments],
axis=0,
)
elif len(colors) == 3:
return np.concatenate(
[
[colors[0]] * num_segments,
[colors[1]] * num_segments,
[colors[2]] * num_segments,
],
axis=0,
)
else:
return ValueError(
trans._(
'Either 2 or 3 colors must be provided, got {number}.',
deferred=True,
number=len(colors),
)
)
class VispyAxesOverlay:
"""Axes indicating world coordinate origin and orientation."""
_NUM_SEGMENTS_ARROWHEAD = 100
def __init__(self, viewer, parent=None, order=0):
self._viewer = viewer
self._scale = 1
# Target axes length in canvas pixels
self._target_length = 80
# CMYRGB for 6 axes data in x, y, z, ... ordering
self._default_color = [
[0, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 0, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1],
]
# Text offset from line end position
self._text_offsets = 0.1 * np.array([1, 1, 1])
# note order is x, y, z for VisPy
self._line_data2D = np.array(
[[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0]]
)
self._line_data3D = np.array(
[[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]]
)
# note order is x, y, z for VisPy
self._dashed_line_data2D = np.concatenate(
[[[1, 0, 0], [0, 0, 0]], make_dashed_line(4, axis=1)],
axis=0,
)
self._dashed_line_data3D = np.concatenate(
[
[[1, 0, 0], [0, 0, 0]],
make_dashed_line(4, axis=1),
make_dashed_line(8, axis=2),
],
axis=0,
)
# note order is x, y, z for VisPy
vertices = np.empty((0, 3))
faces = np.empty((0, 3))
for axis in range(2):
v, f = make_arrow_head(self._NUM_SEGMENTS_ARROWHEAD, axis)
faces = np.concatenate([faces, f + len(vertices)], axis=0)
vertices = np.concatenate([vertices, v], axis=0)
self._default_arrow_vertices2D = vertices
self._default_arrow_faces2D = faces.astype(int)
vertices = np.empty((0, 3))
faces = np.empty((0, 3))
for axis in range(3):
v, f = make_arrow_head(self._NUM_SEGMENTS_ARROWHEAD, axis)
faces = np.concatenate([faces, f + len(vertices)], axis=0)
vertices = np.concatenate([vertices, v], axis=0)
self._default_arrow_vertices3D = vertices
self._default_arrow_faces3D = faces.astype(int)
self.node = Compound(
[Line(connect='segments', method='gl', width=3), Mesh(), Text()],
parent=parent,
)
self.node.transform = STTransform()
self.node.order = order
# Add a text node to display axes labels
self.text_node = self.node._subvisuals[2]
self.text_node.font_size = 10
self.text_node.anchors = ('center', 'center')
self.text_node.text = f'{1}'
self.node.canvas._backend.destroyed.connect(self._set_canvas_none)
# End Note
self._viewer.events.theme.connect(self._on_data_change)
self._viewer.axes.events.visible.connect(self._on_visible_change)
self._viewer.axes.events.colored.connect(self._on_data_change)
self._viewer.axes.events.dashed.connect(self._on_data_change)
self._viewer.axes.events.labels.connect(self._on_data_change)
self._viewer.axes.events.arrows.connect(self._on_data_change)
self._viewer.dims.events.order.connect(self._on_data_change)
self._viewer.dims.events.range.connect(self._on_data_change)
self._viewer.dims.events.ndisplay.connect(self._on_data_change)
self._viewer.dims.events.axis_labels.connect(self._on_data_change)
self._viewer.camera.events.zoom.connect(self._on_zoom_change)
self._on_visible_change(None)
self._on_data_change(None)
def _set_canvas_none(self):
self.node._set_canvas(None)
self.text_node._set_canvas(None)
def _on_visible_change(self, event):
"""Change visibiliy of axes."""
self.node.visible = self._viewer.axes.visible
self._on_zoom_change(event)
self._on_data_change(event)
def _on_data_change(self, event):
"""Change style of axes."""
if not self._viewer.axes.visible:
return
# Determine which axes are displayed
axes = self._viewer.dims.displayed
# Actual number of displayed dims
ndisplay = len(self._viewer.dims.displayed)
# Determine the labels of those axes
axes_labels = [self._viewer.dims.axis_labels[a] for a in axes[::-1]]
# Counting backwards from total number of dimensions
# determine axes positions. This is done as by default
# the last NumPy axis corresponds to the first Vispy axis
reversed_axes = [self._viewer.dims.ndim - 1 - a for a in axes[::-1]]
# Determine colors of axes based on reverse position
if self._viewer.axes.colored:
axes_colors = [
self._default_color[ra % len(self._default_color)]
for ra in reversed_axes
]
else:
# the reason for using the `as_hex` here is to avoid
# `UserWarning` which is emitted when RGB values are above 1
background_color = get_theme(
self._viewer.theme, False
).canvas.as_hex()
background_color = transform_color(background_color)[0]
color = np.subtract(1, background_color)
color[-1] = background_color[-1]
axes_colors = [color] * ndisplay
# Determine data based on number of displayed dimensions and
# axes visualization parameters
if self._viewer.axes.dashed and ndisplay == 2:
data = self._dashed_line_data2D
color = color_dashed_lines(axes_colors)
text_data = self._line_data2D[1::2]
elif self._viewer.axes.dashed and ndisplay == 3:
data = self._dashed_line_data3D
color = color_dashed_lines(axes_colors)
text_data = self._line_data3D[1::2]
elif not self._viewer.axes.dashed and ndisplay == 2:
data = self._line_data2D
color = color_lines(axes_colors)
text_data = self._line_data2D[1::2]
elif not self._viewer.axes.dashed and ndisplay == 3:
data = self._line_data3D
color = color_lines(axes_colors)
text_data = self._line_data3D[1::2]
else:
raise ValueError(
trans._(
'Axes dash status and ndisplay combination not supported',
deferred=True,
)
)
if self._viewer.axes.arrows and ndisplay == 2:
arrow_vertices = self._default_arrow_vertices2D
arrow_faces = self._default_arrow_faces2D
arrow_color = color_arrowheads(
axes_colors, self._NUM_SEGMENTS_ARROWHEAD
)
elif self._viewer.axes.arrows and ndisplay == 3:
arrow_vertices = self._default_arrow_vertices3D
arrow_faces = self._default_arrow_faces3D
arrow_color = color_arrowheads(
axes_colors, self._NUM_SEGMENTS_ARROWHEAD
)
else:
arrow_vertices = np.zeros((3, 3))
arrow_faces = np.array([[0, 1, 2]])
arrow_color = [[0, 0, 0, 0]]
self.node._subvisuals[0].set_data(data, color)
self.node._subvisuals[1].set_data(
vertices=arrow_vertices,
faces=arrow_faces,
face_colors=arrow_color,
)
# Set visibility status of text
self.text_node.visible = (
self._viewer.axes.visible and self._viewer.axes.labels
)
self.text_node.text = axes_labels
self.text_node.color = axes_colors
self.text_node.pos = text_data + self._text_offsets
def _on_zoom_change(self, event):
"""Update axes length based on zoom scale."""
if not self._viewer.axes.visible:
return
scale = 1 / self._viewer.camera.zoom
# If scale has not changed, do not redraw
if abs(np.log10(self._scale) - np.log10(scale)) < 1e-4:
return
self._scale = scale
scale_canvas2world = self._scale
target_canvas_pixels = self._target_length
scale = target_canvas_pixels * scale_canvas2world
# Update axes scale
self.node.transform.scale = [scale, scale, scale, 1]
|
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
from augment import Augment
AUTO = tf.data.experimental.AUTOTUNE
def set_dataset(task, data_path):
trainset = pd.read_csv(
os.path.join(
data_path, 'imagenet_trainset.csv'
)).values.tolist()
trainset = [[os.path.join(data_path, t[0]), t[1]] for t in trainset]
if task == 'lincls':
valset = pd.read_csv(
os.path.join(
data_path, 'imagenet_valset.csv'
)).values.tolist()
valset = [[os.path.join(data_path, t[0]), t[1]] for t in valset]
return np.array(trainset, dtype='object'), np.array(valset, dtype='object')
return np.array(trainset, dtype='object')
class DataLoader:
def __init__(self, args, mode, datalist, batch_size, num_workers=1, shuffle=True):
self.args = args
self.mode = mode
self.datalist = datalist
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.dataloader = self._dataloader()
def __len__(self):
return len(self.datalist)
def fetch_dataset(self, path, y=None):
x = tf.io.read_file(path)
if y is not None:
return tf.data.Dataset.from_tensors((x, y))
return tf.data.Dataset.from_tensors(x)
def augmentation(self, img, shape):
augset = Augment(self.args, self.mode)
if self.args.task in ['v1', 'v2']:
img_list = []
for _ in range(2): # query, key
aug_img = tf.identity(img)
if self.args.task == 'v1':
aug_img = augset._augmentv1(aug_img, shape) # moco v1
else:
radius = np.random.choice([3, 5])
aug_img = augset._augmentv2(aug_img, shape, (radius, radius)) # moco v2
img_list.append(aug_img)
return img_list
else:
return augset._augment_lincls(img, shape)
def dataset_parser(self, value, label=None):
shape = tf.image.extract_jpeg_shape(value)
img = tf.io.decode_jpeg(value, channels=3)
if label is None:
# moco
query, key = self.augmentation(img, shape)
inputs = {'query': query, 'key': key}
labels = tf.zeros([])
else:
# lincls
inputs = self.augmentation(img, shape)
labels = tf.one_hot(label, self.args.classes)
return (inputs, labels)
def shuffle_BN(self, value, labels):
if self.num_workers > 1:
pre_shuffle = [(i, value['key'][i]) for i in range(self.batch_size)]
random.shuffle(pre_shuffle)
shuffle_idx = []
value_temp = []
for vv in pre_shuffle:
shuffle_idx.append(vv[0])
value_temp.append(tf.expand_dims(vv[1], axis=0))
value['key'] = tf.concat(value_temp, axis=0)
unshuffle_idx = np.array(shuffle_idx).argsort().tolist()
value.update({'unshuffle': unshuffle_idx})
return (value, labels)
def _dataloader(self):
self.imglist = self.datalist[:,0].tolist()
if self.args.task in ['v1', 'v2']:
dataset = tf.data.Dataset.from_tensor_slices(self.imglist)
else:
self.labellist = self.datalist[:,1].tolist()
dataset = tf.data.Dataset.from_tensor_slices((self.imglist, self.labellist))
dataset = dataset.repeat()
if self.shuffle:
dataset = dataset.shuffle(len(self.datalist))
dataset = dataset.interleave(self.fetch_dataset, num_parallel_calls=AUTO)
dataset = dataset.map(self.dataset_parser, num_parallel_calls=AUTO)
dataset = dataset.batch(self.batch_size)
dataset = dataset.prefetch(AUTO)
if self.args.shuffle_bn and self.args.task in ['v1', 'v2']:
# only moco
dataset = dataset.map(self.shuffle_BN, num_parallel_calls=AUTO)
return dataset
|
#!/usr/bin/python
#coding:utf-8
import numpy as np
import logging
import mylog
import mykmeans as ml
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
def str2num(s):
a = ['very_low', 'Low', 'Middle', 'High']
for i in range(0, len(a)):
if a[i] == s:
return float(i)
if __name__ == '__main__':
filename = './data/data_user_modeling.txt'
train_data = np.loadtxt(filename, delimiter = ',', converters = {5:str2num})
logger.debug(train_data)
logger.debug(train_data.shape)
train_x = train_data[:,0:-1]
train_y = train_data[:,-1]
logger.debug(train_x)
logger.debug(train_y)
param = {}
param['use_random_for_k'] = 1
param['k'] = [i for i in range(0, 258, 1)]
param['n_clusters'] = 4
param['max_iter'] = 100
kmeans = ml.Kmeans(param)
kmeans.Fit(train_x)
# logger.debug(kmeans)
pred = kmeans.Predict(train_x)
logger.info('train_y:%s', train_y)
logger.info(' pred:%s', pred)
# logger.info('k-means准确率:%f', 1.0*sum(pred == train_y)/len(train_y))
# ml.PickingRightK(train_x, param)
import myplot
myplot.Figure()
ml.FitMulti(train_x, param, 100)
ml.BisectingFitMulti(train_x, param, 100)
myplot.Legend(['k-means','bisecting'])
myplot.Title('user modeling')
myplot.Show()
|
"""
Generalized linear models optimized with online gradient descent from :mod:`creme.optim`.
"""
from .fm import FMRegressor
from .lin_reg import LinearRegression
from .log_reg import LogisticRegression
from .pa import PAClassifier
from .pa import PARegressor
from .softmax import SoftmaxRegression
__all__ = [
'FMRegressor',
'LinearRegression',
'LogisticRegression',
'PAClassifier',
'PARegressor',
'SoftmaxRegression'
]
|
import json
import os
from astropy.table import Table, Column
from ..config import exporters
from ..qt.widgets import ScatterWidget, HistogramWidget
from ..core import Subset
def save_page(page, page_number, label, subset):
""" Convert a tab of a glue session into a D3PO page
:param page: Tuple of data viewers to save
:param label: Tab label
"""
result = {}
# layout settings
result['grid'] = {'nRows': 1, 'nColumns': len(page)}
result['name'] = str(label)
result['caption'] = 'Generated by Glue'
# style settings
d = page[0]._data[0]
unselected = dict(opacity=d.style.alpha,
size=d.style.markersize / 2,
color=d.style.color)
result['markerStyle'] = dict(unselected=unselected)
if subset is not None:
s = subset.style
selected = dict(opacity=s.alpha, size=s.markersize / 2, color=s.color)
result['markerStyle']['selected'] = selected
result['selection'] = {'type': 'booleanColumn',
'columnName': 'selection_%i' % page_number}
result['histogramStyle'] = result['markerStyle']
# save each plot
result['plots'] = map(save_plot, page, range(len(page)))
return result
def save_plot_base(plot, index):
result = {}
result['gridPosition'] = [0, index]
return result
def save_plot(plot, index):
dispatch = {ScatterWidget: save_scatter,
HistogramWidget: save_histogram}
typ = type(plot)
return dispatch[typ](plot, index)
def save_scatter(plot, index):
""" Convert a single glue scatter plot to a D3PO plot
:param plot: Glue scatter plot
:class:`~glue.qt.widgets.scatter_widget.ScatterWidget`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
props = plot.properties
result['type'] = 'scatter'
result['xAxis'] = dict(columnName=props['xatt'].label,
range=[props['xmin'], props['xmax']])
result['yAxis'] = dict(columnName=props['yatt'].label,
range=[props['ymin'], props['ymax']])
# XXX log scales
return result
def save_histogram(plot, index):
""" Convert a single histogram to a D3PO plot
:param plot: Glue histogram
:type plot: :class:`~glue.qt.widgets.histogram_widget.HistogramWidget`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
props = plot.properties
result['type'] = 'histogram'
result['xAxis'] = dict(columnName=props['component'].label,
bins=props['nbins'],
range=[props['xmin'], props['xmax']])
# XXX normed, cumultive, log
return result
def stage_subsets(application):
"""
Return a tuple of the subset to use for each stage/tab,
or None if the tab has no subset
If more than one subset is used per stage/tab, returns None
"""
result = []
for page in application.viewers:
subset = None
for viewer in page:
for layer_artist in viewer.layers:
if not layer_artist.visible:
continue
s = layer_artist.layer
if not isinstance(s, Subset):
continue
if subset is not None and s is not subset:
return None
if subset is None:
subset = s
result.append(subset)
return tuple(result)
def can_save_d3po(application):
"""
Check whether an application can be exported to D3PO.
Raises an exception if not
"""
dc = application.session.data_collection
if len(dc) != 1:
raise ValueError("D3PO Export only supports a single dataset")
data = dc[0]
for tab in application.viewers:
for viewer in tab:
if not isinstance(viewer, (ScatterWidget, HistogramWidget)):
raise ValueError("D3PO Export only supports scatter "
"and histogram plots")
if sum(len(tab) for tab in application.viewers) == 0:
raise ValueError("D3PO Export requires at least one scatterplot "
"or histogram")
if stage_subsets(application) is None:
raise ValueError("D3PO Export restricted to 0 or 1 subsets visible "
"in each tab")
def make_data_file(data, subsets, path):
"""
Create the data.csv file, given Data and tuple of subsets
"""
data_path = os.path.join(path, 'data.csv')
t = Table([data[c] for c in data.components],
names=[c.label for c in data.components])
for i, subset in enumerate(subsets):
if subset is None:
continue
c = Column(data=subset.to_mask().astype('i'), name='selection_%i' % i)
t.add_column(c)
t.write(data_path, format='ascii', delimiter=',')
def save_d3po(application, path):
"""Save a Glue session to a D3PO bundle.
Currently, this has the following restrictions:
- The Glue session must have only one dataset open, and 0 or 1 subsets
- Only scatter plots or histograms are present
- At least one plot is present
:param application: Glue appication to save
:param path: Path to directory to save in. Will be created if needed
"""
if os.path.exists(path) and not os.path.isdir(path):
os.unlink(path)
if not os.path.exists(path):
os.mkdir(path)
data = application.session.data_collection[0]
subsets = stage_subsets(application)
viewers = application.viewers
# data.csv
make_data_file(data, subsets, path)
# states.json
result = {}
result['filename'] = 'data.csv' # XXX don't think this is needed?
result['title'] = "Glue export of %s" % data.label
result['states'] = map(save_page, application.viewers,
range(len(viewers)),
application.tab_names,
subsets)
state_path = os.path.join(path, 'states.json')
with open(state_path, 'w') as outfile:
json.dump(result, outfile, indent=2)
# index.html
html_path = os.path.join(path, 'index.html')
with open(html_path, 'w') as outfile:
outfile.write(HTML)
# show the result
launch(path)
def launch(path):
"""Start a server to view an exported D3PO bundle, and open a browser.
:param path: The TLD of the bundle
"""
from SocketServer import TCPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from random import randrange
from socket import error
import webbrowser
from threading import Thread
os.chdir(path)
while True:
try:
PORT = randrange(8000, 9000)
server = TCPServer(("", PORT), SimpleHTTPRequestHandler, False)
server.allow_reuse_address = True
server.server_bind()
break
except error: # port already taken
pass
print 'Serving D3PO on port 0.0.0.0:%i' % PORT
server.server_activate()
thread = Thread(target=server.serve_forever)
thread.setDaemon(True) # do not prevent shutdown
thread.start()
webbrowser.open('http://0.0.0.0:%i' % PORT)
exporters.add('D3PO', save_d3po, can_save_d3po, outmode='directory')
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/style.css">
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/d3po.css">
<link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro:100,200,300,400,700' rel='stylesheet' type='text/css'>
<style>
#footer {
position: fixed;
bottom: 0;
right: 0;
}
</style>
<!-- not to be confused with Planet Telex -->
<!-- Javscript dependencies -->
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script src="http://d3po.org/static/js/util.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script src="http://d3po.org/static/js/d3po.js"></script>
<script src="http://d3po.org/static/js/d3po.init.js"></script>
</head>
<body>
<div id="svg"><svg></svg></div>
<div id="controls">
<ul class="navigation">
</ul>
</div>
<div id="caption"></div>
<div id="footer">
More information: <a href="http://d3po.org">d3po.org</a>
</div>
<script type="text/javascript">
$(document).ready(function() {
initialize('states.json', 'data.csv');
}
);
</script>
</body>
</html>
"""
|
# -*- coding: utf-8 -*-
#
# dedupe documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 10 11:27:59 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dedupe'
copyright = u'2018, Forest Gregg, Derek Eder, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9.4'
# The full version, including alpha/beta/rc tags.
release = '1.9.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'common_*.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Custom stylesheet
# html_style = 'css/custom.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# 'canonical_url': 'https://docs.dedupe.io/'
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dedupedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'dedupe.tex', u'dedupe Documentation',
u'Forest Gregg, Derek Eder, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dedupe', u'dedupe Documentation',
[u'Forest Gregg, Derek Eder, and contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dedupe', u'dedupe Documentation',
u'Forest Gregg, Derek Eder, and contributors', 'dedupe', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This manages a User record in the Auth service.
A User stores basic information from a KeyCloak user (including the KeyCloak GUID).
"""
import datetime
from flask import current_app
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, String, and_, or_
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from auth_api.utils.enums import AccessType, LoginSource, Status, UserStatus
from auth_api.utils.roles import Role
from auth_api.utils.user_context import UserContext, user_context
from .base_model import BaseModel
from .db import db
from .membership import Membership as MembershipModel
from .org import Org as OrgModel
from .user_status_code import UserStatusCode
class User(BaseModel):
"""This is the model for a User."""
__tablename__ = 'users'
__versioned__ = {
'exclude': ['modified', 'modified_by_id', 'modified_by', 'created']
}
id = Column(Integer, primary_key=True)
username = Column('username', String(100), index=True)
firstname = Column('first_name', String(200), index=True)
lastname = Column('last_name', String(200), index=True)
email = Column('email', String(200), index=True)
keycloak_guid = Column(
'keycloak_guid', UUID(as_uuid=True), unique=True, nullable=True # bcros users comes with no guid
)
is_terms_of_use_accepted = Column(Boolean(), default=False, nullable=True)
terms_of_use_accepted_version = Column(
ForeignKey('documents.version_id'), nullable=True
)
# a type for the user to identify what kind of user it is..ie anonymous , bcsc etc ..similar to login source
type = Column('type', String(200), nullable=True)
status = Column(ForeignKey('user_status_codes.id'))
idp_userid = Column('idp_userid', String(256), index=True)
login_source = Column('login_source', String(200), nullable=True)
login_time = Column(DateTime, default=None, nullable=True)
contacts = relationship('ContactLink', primaryjoin='User.id == ContactLink.user_id', lazy='select')
orgs = relationship('Membership',
primaryjoin='and_(User.id == Membership.user_id, or_(Membership.status == ' + str(
Status.ACTIVE.value) + ', Membership.status == ' + str(
Status.PENDING_APPROVAL.value) + '))', lazy='select') # noqa:E127
terms_of_use_version = relationship('Documents', foreign_keys=[terms_of_use_accepted_version], uselist=False,
lazy='select')
user_status = relationship('UserStatusCode', foreign_keys=[status], lazy='subquery')
@classmethod
def find_by_username(cls, username):
"""Return the first user with the provided username."""
return cls.query.filter_by(username=username).first()
@classmethod
@user_context
def find_by_jwt_token(cls, **kwargs):
"""Find an existing user by the keycloak GUID and (idpUserId is null or from token) in the provided token."""
user_from_context: UserContext = kwargs['user_context']
return db.session.query(User).filter(
and_(User.keycloak_guid == user_from_context.sub,
or_(User.idp_userid == user_from_context.token_info.get('idp_userid', None),
User.idp_userid.is_(None)))).one_or_none()
@classmethod
@user_context
def create_from_jwt_token(cls, first_name: str, last_name: str, **kwargs):
"""Create a User from the provided JWT."""
user_from_context: UserContext = kwargs['user_context']
token = user_from_context.token_info
if token:
user = User(
username=user_from_context.user_name,
firstname=first_name,
lastname=last_name,
email=token.get('email', None),
keycloak_guid=user_from_context.sub,
created=datetime.datetime.now(),
login_source=user_from_context.login_source,
status=UserStatusCode.get_default_type(),
idp_userid=token.get('idp_userid', None),
login_time=datetime.datetime.now(),
type=cls._get_type(user_from_context=user_from_context)
)
current_app.logger.debug(
'Creating user from JWT:{}; User:{}'.format(token, user)
)
user.save()
return user
return None
@classmethod
@user_context
def update_from_jwt_token(cls, user, # pylint:disable=too-many-arguments
first_name: str, last_name: str, is_login: bool = False, **kwargs):
"""Update a User from the provided JWT."""
user_from_context: UserContext = kwargs['user_context']
token = user_from_context.token_info
if not token or not user:
return None
# Do not save if nothing has been changed
# pylint: disable=too-many-boolean-expressions
if not is_login \
and (user.username == user_from_context.user_name or user.username) \
and user.firstname == first_name \
and user.lastname == last_name \
and user.email == token.get('email', user.email) \
and (str(user.keycloak_guid) == user_from_context.sub or user.keycloak_guid) \
and user.status == UserStatus.ACTIVE.value \
and (user.login_source == user_from_context.login_source or user.login_source) \
and user.idp_userid == token.get('idp_userid', None):
return user
current_app.logger.debug(
'Updating user from JWT:{}; User:{}'.format(token, user)
)
user.username = user_from_context.user_name or user.username
user.firstname = first_name
user.lastname = last_name
user.email = token.get('email', user.email)
user.modified = datetime.datetime.now()
if token.get('accessType', None) == AccessType.ANONYMOUS.value: # update kcguid for anonymous users
user.keycloak_guid = user_from_context.sub or user.keycloak_guid
# If this user is marked as Inactive, this login will re-activate them
user.status = UserStatus.ACTIVE.value
user.login_source = user_from_context.login_source or user.login_source
user.type = cls._get_type(user_from_context)
# If this is a request during login, update login_time
if is_login:
user.login_time = datetime.datetime.now()
user.idp_userid = token.get('idp_userid')
cls.commit()
return user
@classmethod
def find_users(cls, first_name, last_name, email):
"""Return a set of users with either the given username or the given email."""
# TODO: This needs to be improved for scalability. Paging large datasets etc.
if first_name == '' and last_name == '' and email == '':
return cls.query.all()
return cls.query.filter(or_(cls.firstname == first_name, cls.lastname == last_name, cls.email == email)).all()
@classmethod
@user_context
def update_terms_of_use(cls, is_terms_accepted, terms_of_use_version, **kwargs):
"""Update the terms of service for the user."""
user_from_context: UserContext = kwargs['user_context']
if user_from_context.token_info:
user = cls.find_by_jwt_token()
user.is_terms_of_use_accepted = is_terms_accepted
user.terms_of_use_accepted_version = terms_of_use_version
current_app.logger.debug(
'Updating users Terms of use is_terms_accepted:{}; terms_of_use_version:{}'.format(
is_terms_accepted, terms_of_use_version)
)
cls.save(user)
return user
return None
@classmethod
def find_users_by_org_id_by_status_by_roles(cls, org_id, roles, status=Status.ACTIVE.value):
"""Find all members of the org with a status."""
return db.session.query(User). \
join(MembershipModel,
(User.id == MembershipModel.user_id) & (MembershipModel.status == status) &
(MembershipModel.membership_type_code.in_(roles))). \
join(OrgModel).filter(OrgModel.id == org_id).all()
def delete(self):
"""Users cannot be deleted so intercept the ORM by just returning."""
return self
@classmethod
def _get_type(cls, user_from_context: UserContext) -> str:
"""Return type of the user from the token info."""
user_type: str = None
if user_from_context.roles:
if Role.ANONYMOUS_USER.value in user_from_context.roles \
or user_from_context.login_source == LoginSource.BCROS.value:
user_type = Role.ANONYMOUS_USER.name
elif Role.GOV_ACCOUNT_USER.value in user_from_context.roles:
user_type = Role.GOV_ACCOUNT_USER.name
elif Role.PUBLIC_USER.value in user_from_context.roles \
or user_from_context.login_source in [LoginSource.BCEID.value, LoginSource.BCSC.value]:
user_type = Role.PUBLIC_USER.name
elif user_from_context.is_staff():
user_type = Role.STAFF.name
elif user_from_context.is_system():
user_type = Role.SYSTEM.name
return user_type
|
from netcdf2geotiff import rgb_geotiff, singleband_geotiff
rgb_geotiff("test3.nc", "test3.tif", "RED", "GREEN", "BLUE", "lat", "lon")
singleband_geotiff("test3.nc", "tests3.tif", "IDEPIX_SNOW_ICE", "lat", "lon")
|
from collections import defaultdict
from aoc.util import load_input
def turn(d, fun, sxy, exy):
sx, sy = map(int, sxy.split(","))
ex, ey = map(int, exy.split(","))
for x in range(sx, ex + 1):
for y in range(sy, ey + 1):
d[(x, y)] = fun(d[(x, y)])
def run(data, toggle, turn_on, turn_off):
grid = defaultdict(lambda: 0)
for line in data:
token = line.split()
if line.startswith("toggle"):
turn(grid, toggle, token[1], token[3])
elif line.startswith("turn on"):
turn(grid, turn_on, token[2], token[4])
elif line.startswith("turn off"):
turn(grid, turn_off, token[2], token[4])
else:
raise Exception
return sum(grid.values())
def part1(lines):
return run(lines, lambda v: not v, lambda _: True, lambda _: False)
def part2(lines):
return run(lines, lambda x: x + 2, lambda x: x + 1, lambda x: max(0, x - 1))
if __name__ == "__main__":
data = load_input(__file__, 2015, "6")
print(part1(data))
print(part2(data))
|
from sly import Parser
from sly.yacc import _decorator as _
from .domas_lexer import DomasLexer
from .domas_quadruples import Quadruple
from .domas_errors import *
from . import domas_semantic_cube as sm
import json # to debug only
import os
import copy
os.system('color')
class DomasParser(Parser):
# Parser directives
tokens = DomasLexer.tokens
# debugfile = 'parser.out'
start = 'programa'
# Tables
function_table = {}
class_table = {}
constant_table = {'int': [], 'float': [], 'string': [], 'bool': []}
# Stacks
stack_of_stacks = [[], []] # operands, operators !important
stack_vars = []
last_arr_t = []
displacements = []
for_var_dir = []
break_stack = []
# Lists
quadruples = []
jumps = []
# Counters
quad_counter = 1
param_counter = 0
temp_counter = 0
attr_counter = 1
# Aux vars
current_class = None
last_arr_id = None
last_type = None
last_func_added = None
has_returned = False
found_errors = False
types = ['int', 'float', 'string', 'bool', 'void']
operators = ['+', '-', '*', '/', '<',
'>', '<=', '>=', '==', '<>', '&', '|']
# Add a function to the function table
def add_to_func_table(self, id, return_type):
self.function_table[id] = {
'return_type': return_type,
'vars': {},
'num_types': '0\u001f' * len(self.types),
'params': '',
'num_temps': '0\u001f' * len(self.types)
}
# Checks if a variable exists
def check_variable_exists(self, var):
if self.current_class != None:
return var in self.function_table[self.curr_scope]['vars'] or var in self.class_table[self.current_class]['vars']
return var in self.function_table[self.curr_scope]['vars'] or var in self.function_table[self.program_name]['vars']
# Returns the type of a variable if it exists
def get_var_type(self, var, tok):
if not self.check_variable_exists(var):
self.found_errors = True
print('ERROR: No variable\033[1m',
var, '\033[0mwas found.')
print(' Missing reference found on line',
tok.lineno)
return None
if self.current_class != None:
if var in self.function_table[self.curr_scope]['vars']:
return self.function_table[self.curr_scope]['vars'][var]['type']
return self.class_table[self.current_class]['vars'][var]['type']
if var in self.function_table[self.curr_scope]['vars']:
return self.function_table[self.curr_scope]['vars'][var]['type']
return self.function_table[self.program_name]['vars'][var]['type']
# Updates the amount of temporals used in fucntions.
def update_num_temps(self, func_num_temps, type_idx, quantity=1):
lst = func_num_temps.split('\u001f')
lst[type_idx] = str(int(lst[type_idx]) + quantity)
return '\u001f'.join(lst)
# Cheks if a var is an array by finding its first dimension
def check_var_is_array(self, var):
if not var:
return
if var['dir'] >= 4500 and var['dir'] < 6000:
return False
if not self.check_variable_exists(var['value']):
return False
if self.current_class != None:
if var['value'] in self.function_table[self.curr_scope]['vars']:
return 'd1' in self.function_table[self.curr_scope]['vars'][var['value']]
else:
return 'd1' in self.class_table[self.current_class]['vars'][var['value']]
elif var['value'] in self.function_table[self.curr_scope]['vars']:
return 'd1' in self.function_table[self.curr_scope]['vars'][var['value']]
else:
return 'd1' in self.function_table[self.program_name]['vars'][var['value']]
# Makes quadruples for arithemtic operators and pushes them into the quadruple stack
def make_and_push_quad(self):
ro = self.stack_of_stacks[-2].pop()
lo = self.stack_of_stacks[-2].pop()
op = self.stack_of_stacks[-1].pop()
if not ro or not lo:
raise SystemError("Reached unsolvable state")
r_type = sm.checkOperation(lo['type'], ro['type'], op)
self.last_type = r_type
idx = self.types.index(r_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
t_dir = idx * 300 + \
int(num_temps.split('\u001f')[idx]) + 3000
self.stack_of_stacks[-2].append(
{'value': 't' + str(self.temp_counter), 'type': r_type, 'dir': t_dir})
if self.check_var_is_array(lo):
lo_dir = '$' + str(self.last_arr_t.pop())
else:
lo_dir = lo['dir']
if self.check_var_is_array(ro):
ro_dir = '$' + str(self.last_arr_t.pop())
else:
ro_dir = ro['dir']
self.quadruples.append(
Quadruple(lo_dir, ro_dir, op, t_dir))
self.temp_counter += 1
self.quad_counter += 1
@_('PROGRAM ID pro1 SEMI pro0 declarations')
def programa(self, p):
if self.found_errors:
raise CompilationError()
# func_dir_out = open('debug/funcdir.out', 'w')
# class_dir_out = open('debug/classdir.out', 'w')
# func_dir_out.write(json.dumps(
# self.function_table, indent=2))
# class_dir_out.write(json.dumps(
# self.class_table, indent=2))
return (self.program_name, self.function_table, self.class_table, self.constant_table, self.quadruples)
# Creates goto main quadruple and appends it to the quadruple list
@_('')
def pro0(self, p):
self.quadruples.append(Quadruple(-1, -1, 'goto', -1))
self.quad_counter += 1
# creates function table
@_('')
def pro1(self, p):
self.program_name = p[-1]
self.curr_scope = p[-1]
self.function_table[p[-1]] = {
'return_type': None,
'vars': {}, 'num_types': '0\u001f0\u001f0\u001f0\u001f0\u001f'
}
@ _('class_declaration out_class var_declaration function_definition main')
def declarations(self, p):
return 'declarations'
@ _('''CLASS ID cd1 inherits LCURL ATTRIBUTES attribute_declaration METHODS
method_definition RCURL class_declaration''', 'empty')
def class_declaration(self, p):
return 'class_declaration'
# Adds class to class table
@ _('')
def cd1(self, p):
if p[-1] in self.class_table:
self.found_errors = True
print(
'ERROR: A class with the name\033[1m', p[-1], '\033[0mhas already been defined')
print(' Redefinition found on line',
self.symstack[-1].lineno)
else:
self.class_table[p[-1]] = {
'vars': {},
'num_types': '0\u001f0\u001f0\u001f0\u001f0\u001f'
}
self.current_class = p[-1]
@ _('INHERITS ID cd3', 'empty')
def inherits(self, p):
return 'inherits'
# Copies the information from parent class to its child.
@ _('')
def cd3(self, p):
if not p[-1] in self.class_table:
self.found_errors = True
print('ERROR: Id\033[1m', p[-1],
'\033[0mis not defined as a class')
print(' Missing reference found on line',
self.symstack[-1].lineno)
else:
self.class_table[self.current_class] = copy.deepcopy(
self.class_table[p[-1]])
@ _('VAR ID ad1 attr_vector', 'VAR ID ad1 attr_simple_var', 'empty')
def attribute_declaration(self, p):
return 'attribute_declaration'
# Appends declared variable to the stack of variables if it doesn't exist
@ _('')
def ad1(self, p):
if p[-1] in self.class_table[self.current_class]['vars']:
self.found_errors = True
print('ERROR: An attribute\033[1m',
p[-1], '\033[0mhas already been defined')
print(' Redefinition found on line',
self.symstack[-1].lineno)
else:
self.stack_vars.append(p[-1])
@ _('''LBRACKET CTE_I ad2 attr_multidim RBRACKET COLON simple_type ad4 SEMI
attribute_declaration''')
def attr_vector(self, p):
return 'vector'
# Adds the first dimension of an array to the information of the variable
@ _('')
def ad2(self, p):
self.latest_var = self.stack_vars.pop()
if self.class_table[self.current_class]['vars']:
self.class_table[self.current_class]['vars'][self.latest_var] = {
'd1': p[-1]}
else:
self.class_table[self.current_class]['vars'] = {
self.latest_var: {'d1': p[-1]}}
@ _('COMMA CTE_I ad3', 'empty')
def attr_multidim(self, p):
return 'attr_multidim'
# Adds the second dimension of an array to the information of the variable.
@ _('')
def ad3(self, p):
self.class_table[self.current_class]['vars'][self.latest_var]['d2'] = p[-1]
# Adds the type and direction to the information of the variable declared and updated the amount of types used in the class.
@ _('')
def ad4(self, p):
idx = self.types.index(p[-1])
num_types = self.class_table[self.current_class]['num_types']
if 'd1' in self.class_table[self.current_class]['vars'][self.latest_var]:
q = self.class_table[self.current_class]['vars'][self.latest_var]['d1']
if 'd2' in self.class_table[self.current_class]['vars'][self.latest_var]:
q *= self.class_table[self.current_class]['vars'][self.latest_var]['d2']
self.class_table[self.current_class]['vars'][self.latest_var]['dir'] = 6000 + idx * \
300 + int(num_types.split('\u001f')[idx])
self.class_table[self.current_class]['vars'][self.latest_var]['type'] = p[-1]
self.class_table[self.current_class]['num_types'] = self.update_num_temps(
num_types, idx, q)
@ _('attr_var_list COLON simple_type ad5 SEMI attribute_declaration')
def attr_simple_var(self, p):
return 'attr_simple_var'
@ _('COMMA ID ad1 attr_var_list', 'empty')
def attr_var_list(self, p):
return 'attr_var_list'
# Pops the stack of variables and giving each their corresponding type.
@ _('')
def ad5(self, p):
while len(self.stack_vars) > 0:
curr_var = self.stack_vars.pop()
if curr_var in self.class_table[self.current_class]['vars']:
self.found_errors = True
print('ERROR: An attribute\033[1m',
curr_var, '\033[0mhas already been defined in class', self.current_class)
print(' Redefinition found on line',
self.symstack[-2].lineno)
idx = self.types.index(p[-1])
num_types = self.class_table[self.current_class]['num_types']
self.class_table[self.current_class]['num_types'] = self.update_num_temps(
num_types, idx)
self.class_table[self.current_class]['vars'][curr_var] = {
'type': p[-1], 'dir': 6000 + idx * 300 + int(num_types.split('\u001f')[idx])}
@ _('''def_type fd1 FUNCTION ID md3 LPAREN m_parameters
RPAREN LCURL fd4 var_declaration statements RCURL fd5 fd6
method_definition''', 'empty')
def method_definition(self, p):
return 'method_definition'
# Adds the id with the name of the class prefixed to the function table.
@ _('')
def md3(self, p):
if p[-1] in self.class_table[self.current_class]['vars']:
self.found_errors = True
print('ERROR: An attribute or method\033[1m',
p[-1], '\033[0mhas already been defined in class', self.current_class)
print(' Redefinition found on line',
self.symstack[-1].lineno)
else:
self.add_to_func_table(
self.current_class + '.' + p[-1], self.curr_func_type)
self.last_func_added = self.current_class + '.' + p[-1]
self.curr_scope = self.last_func_added
idx = self.types.index(self.curr_func_type)
num_types = self.function_table[self.program_name]['num_types']
self.function_table[self.program_name]['num_types'] = self.update_num_temps(
num_types, idx)
self.function_table[self.program_name]['vars'][self.current_class + '.' + p[-1]] = {
'type': self.curr_func_type, 'dir': 0, 'real_dir': idx * 300 + int(num_types.split('\u001f')[idx])}
@_('ID p1 COLON simple_type m2 m_param_choose', 'empty')
def m_parameters(self, p):
return 'parameters'
# Adds the id and type of the parameter to the table of variables of the current method.
@ _('')
def m2(self, p):
if self.latest_var in self.function_table[self.curr_scope]['vars']:
self.found_errors = True
print('ERROR: A parameter\033[1m',
self.latest_var, '\033[0mhas already been declared for method', self.last_func_added.split('.')[1], 'in class', self.current_class)
print(' Redefinition found on line',
self.symstack[-2].lineno)
else:
idx = self.types.index(p[-1])
self.function_table[self.curr_scope]['params'] += str(
idx)
num_types = self.function_table[self.curr_scope]['num_types']
self.function_table[self.curr_scope]['num_types'] = self.update_num_temps(
num_types, idx)
self.function_table[self.curr_scope]['vars'][self.latest_var] = {
'type': p[-1],
'dir': 1500 + idx * 300 + int(num_types.split('\u001f')[idx])
}
@ _('COMMA m_parameters', 'empty')
def m_param_choose(self, p):
return 'm_param_choose'
@ _('')
def out_class(self, p):
self.current_class = None
self.curr_scope = self.program_name
@ _('VAR ID gvd1 vector', 'VAR ID gvd1 simple_var', 'empty')
def var_declaration(self, p):
return p[0]
# Appends the current var to the stack of variables of it doesn't exist.
@ _('')
def gvd1(self, p):
if p[-1] in self.function_table:
self.found_errors = True
print('ERROR: A function with ID\033[1m',
p[-1], '\033[0mhas already been declared. Variables may not share name with functions')
print(' Redefinition found on line',
self.symstack[-1].lineno)
# elif self.current_class != None and p[-1] in self.class_table[self.current_class]:
# raise RedefinitionError(p[-1])
else:
self.stack_vars.append(p[-1])
@ _('LBRACKET CTE_I gvd2 multidim RBRACKET COLON simple_type gvd4 SEMI var_declaration')
def vector(self, p):
return 'vector'
# Adds the variable and its first dimension to the variable table of the current scope.
@ _('')
def gvd2(self, p):
self.latest_var = self.stack_vars.pop()
if self.function_table[self.curr_scope]['vars']:
self.function_table[self.curr_scope]['vars'][self.latest_var] = {
'd1': p[-1]
}
else:
self.function_table[self.curr_scope]['vars'] = {
self.latest_var: {'d1': p[-1]}
}
@ _('COMMA CTE_I gvd3', 'empty')
def multidim(self, p):
return 'multidim'
@ _('var_list COLON composite_type gvd6 SEMI var_declaration', 'var_list COLON simple_type gvd5 SEMI var_declaration')
def simple_var(self, p):
return 'simple_var'
@ _('COMMA ID gvd1 var_list', 'empty')
def var_list(self, p):
return 'var_list'
# Adds the second dimension to the latest variable in the current scope
@ _('')
def gvd3(self, p):
self.function_table[self.curr_scope]['vars'][self.latest_var]['d2'] = p[-1]
# Adds the type and address to the information of the variable declared and updated the amount of types used in the class.
@ _('')
def gvd4(self, p):
idx = self.types.index(p[-1])
num_types = self.function_table[self.curr_scope]['num_types']
offset = 1500 if self.curr_scope != self.program_name else 0
if 'd1' in self.function_table[self.curr_scope]['vars'][self.latest_var]:
q = self.function_table[self.curr_scope]['vars'][self.latest_var]['d1']
if 'd2' in self.function_table[self.curr_scope]['vars'][self.latest_var]:
q *= self.function_table[self.curr_scope]['vars'][self.latest_var]['d2']
self.function_table[self.curr_scope]['vars'][self.latest_var]['dir'] = idx * \
300 + int(num_types.split('\u001f')[idx]) + offset
self.function_table[self.curr_scope]['vars'][self.latest_var]['type'] = p[-1]
self.function_table[self.curr_scope]['num_types'] = self.update_num_temps(
num_types, idx, q)
# Pops the var stack, adding their ids and types to the variable directory of the current scope
@ _('')
def gvd5(self, p):
while len(self.stack_vars) > 0:
curr_var = self.stack_vars.pop()
if curr_var in self.function_table[self.curr_scope]['vars']:
self.found_errors = True
print('ERROR: A variable\033[1m',
curr_var, '\033[0mhas already been declared.')
print(' Redefinition found on line',
self.symstack[-5].lineno)
idx = self.types.index(p[-1])
num_types = self.function_table[self.curr_scope]['num_types']
offset = 1500 if self.curr_scope != self.program_name else 0
self.function_table[self.curr_scope]['vars'][curr_var] = {
'type': p[-1], 'dir': idx * 300 + int(num_types.split('\u001f')[idx]) + offset}
self.function_table[self.curr_scope]['num_types'] = self.update_num_temps(
num_types, idx)
# Same as gvd5 but it also adds the variables of the class in question to the table of the current scope
@ _('')
def gvd6(self, p):
while len(self.stack_vars) > 0:
var_id = self.stack_vars.pop()
if var_id in self.function_table[self.curr_scope]['vars']:
self.found_errors = True
print('ERROR: A variable\033[1m',
var_id, '\033[0mhas already been declared.')
print(' Redefinition found on line',
self.symstack[-5].lineno)
offset = 1500 if self.curr_scope != self.program_name else 0
num_types = self.function_table[self.curr_scope]['num_types']
base_addrs = [int(n) for n in num_types.split('\u001f')[:-1]]
if not p[-1] in self.class_table:
for i in range(1, len(self.symstack)):
if hasattr(self.symstack[i * -1], 'lineno'):
lineno = self.symstack[i * -1].lineno
break
print('ERROR: No class\033[1m',
p[-1], '\033[0mwas found.')
print(' Missing reference found on line',
lineno)
return
for attr in self.class_table[p[-1]]['vars']:
attr_type = self.class_table[p[-1]]['vars'][attr]['type']
idx = self.types.index(attr_type)
num_types = self.function_table[self.curr_scope]['num_types']
q = 1
self.function_table[self.curr_scope]['vars'][var_id + '.' + attr] = {
'type': attr_type,
'dir': base_addrs[idx] + self.class_table[p[-1]]['vars'][attr]['dir'] - 6000 + offset
}
if 'd1' in self.class_table[p[-1]]['vars'][attr]:
q = self.class_table[p[-1]]['vars'][attr]['d1']
self.function_table[self.curr_scope]['vars'][var_id +
'.' + attr]['d1'] = q
if 'd2' in self.class_table[p[-1]]['vars'][attr]:
d2 = self.class_table[p[-1]]['vars'][attr]['d2']
q *= d2
self.function_table[self.curr_scope]['vars'][var_id +
'.' + attr]['d2'] = d2
self.function_table[self.curr_scope]['num_types'] = self.update_num_temps(
num_types, idx, q)
self.function_table[self.curr_scope]['vars'][var_id] = {
'type': p[-1]}
@ _('def_type fd1 FUNCTION ID fd3 LPAREN parameters RPAREN LCURL fd4 var_declaration statements RCURL fd5 fd6 function_definition', 'empty')
def function_definition(self, p):
return 'function_definition'
# saves thee current function type in a variable
@ _('')
def fd1(self, p):
self.curr_func_type = p[-1]
# Adds the id of the function to the function table
@ _('')
def fd3(self, p):
if p[-1] in self.function_table:
self.found_errors = True
print('ERROR: A function\033[1m',
p[-1], '\033[0mhas already been defined.')
print(' Redefinition found on line',
self.symstack[-1].lineno)
elif p[-1] in self.function_table[self.program_name]['vars']:
self.found_errors = True
print('ERROR: A global variable\033[1m',
p[-1], '\033[0mhas been declared. Functions may not share names with global variables')
print(' Redefinition found on line',
self.symstack[-1].lineno)
else:
self.add_to_func_table(p[-1], self.curr_func_type)
self.last_func_added = p[-1]
self.curr_scope = self.last_func_added
idx = self.types.index(self.curr_func_type)
num_types = self.function_table[self.program_name]['num_types']
self.function_table[self.program_name]['num_types'] = self.update_num_temps(
num_types, idx)
self.function_table[self.program_name]['vars'][p[-1]] = {
'type': self.curr_func_type, 'dir': 0, 'real_dir': idx * 300 + int(num_types.split('\u001f')[idx])}
# Adds the start of the quadruples related to the current function to the ts information in the function table
@ _('')
def fd4(self, p):
if not self.last_func_added:
return
self.function_table[self.last_func_added]['start'] = self.quad_counter
# Deletes the variable table of the current scope
@ _('')
def fd5(self, p):
if not self.last_func_added:
return
del self.function_table[self.last_func_added]['vars']
# Creates and appends the end_func quadruple to the quadruple stack
@ _('')
def fd6(self, p):
if self.curr_func_type != 'void' and self.has_returned == False:
self.found_errors = True
print('ERROR: Function\033[1m',
self.curr_scope, '\033[0mis missing a return statement')
print(' Non-void functions must have a return statement.')
self.quadruples.append(Quadruple(-1, -1, 'end_func', -1))
self.quad_counter += 1
self.temp_counter = 1
self.has_returned = False
@ _('statement statements', 'empty')
def statements(self, p):
return 'statements'
@ _('simple_type', 'VOID')
def def_type(self, p):
return p[0]
@ _('INT', 'FLOAT', 'STRING', 'BOOL')
def simple_type(self, p):
return p[0]
@ _('ID')
def composite_type(self, p):
return p[0]
@ _('ID p1 COLON simple_type p2 param_choose', 'empty')
def parameters(self, p):
return 'parameters'
@ _('COMMA parameters', 'empty')
def param_choose(self, p):
return 'param_choose'
# Saves the ID of the parameter ina variable
@ _('')
def p1(self, p):
self.latest_var = p[-1]
# Adds the type of the parameter to its information in the variable table of the current scope
@ _('')
def p2(self, p):
if self.latest_var in self.function_table[self.curr_scope]['vars']:
self.found_errors = True
print('ERROR: A parameter\033[1m',
self.latest_var, '\033[0mhas already been declared for function', self.last_func_added)
print(' Redefinition found on line',
self.symstack[-2].lineno)
idx = self.types.index(p[-1])
self.function_table[self.curr_scope]['params'] += str(idx)
num_types = self.function_table[self.curr_scope]['num_types']
offset = 1500 if self.curr_scope != self.program_name else 0
self.function_table[self.curr_scope]['num_types'] = self.update_num_temps(
num_types, idx)
self.function_table[self.curr_scope]['vars'][self.latest_var] = {
'type': p[-1], 'dir': idx * 300 + int(num_types.split('\u001f')[idx]) + offset}
@_('assignment', 'call_to_void_function', 'function_returns', 'read', 'print',
'decision_statement', 'repetition_statement', 'BREAK br0 SEMI')
def statement(self, p):
return 'statement'
# Adds the quadruple counter to the break stack
@_('')
def br0(self, p):
if len(self.jumps) == 0:
self.found_errors = True
print('ERROR: break statement on line',
self.symstack[-1].lineno, 'used outside a loop')
self.quadruples.append(Quadruple(-1, -1, 'goto', None))
self.break_stack.append(self.quad_counter)
self.quad_counter += 1
@_('variable ass1 EQUALS expression ass2 SEMI')
def assignment(self, p):
return 'assignment'
# Save the id in a variable
@_('')
def ass1(self, p):
self.latest_var = p[-1]
# Generate the quadruple containing an '=' as its operator. Get the addresses of the left and res
@_('')
def ass2(self, p):
made_quad = False
while(len(self.stack_of_stacks[-1])):
self.make_and_push_quad()
made_quad = True
lo = self.stack_of_stacks[-2].pop()
if not lo:
return
v_type = self.get_var_type(self.latest_var, self.symstack[-2])
if not v_type:
return
self.last_type = sm.checkOperation(v_type, lo['type'], '=')
if not made_quad and self.check_var_is_array(lo):
lo_dir = '$' + str(self.last_arr_t.pop())
else:
lo_dir = lo['dir']
if self.current_class != None:
if self.latest_var in self.class_table[self.current_class]['vars']:
if 'd1' in self.class_table[self.current_class]['vars'][self.latest_var]:
if not self.last_arr_t:
return
var_dir = '$' + str(self.last_arr_t.pop())
else:
var_dir = self.class_table[self.current_class]['vars'][self.latest_var]['dir']
else:
if 'd1' in self.function_table[self.curr_scope]['vars'][self.latest_var]:
if not self.last_arr_t:
return
var_dir = '$' + str(self.last_arr_t.pop())
else:
var_dir = self.function_table[self.curr_scope]['vars'][self.latest_var]['dir']
elif self.latest_var in self.function_table[self.curr_scope]['vars']:
if 'd1' in self.function_table[self.curr_scope]['vars'][self.latest_var]:
if not self.last_arr_t:
return
var_dir = '$' + str(self.last_arr_t.pop())
else:
var_dir = self.function_table[self.curr_scope]['vars'][self.latest_var]['dir']
else:
if 'd1' in self.function_table[self.program_name]['vars'][self.latest_var]:
if not self.last_arr_t:
return
var_dir = '$' + str(self.last_arr_t.pop())
else:
var_dir = self.function_table[self.program_name]['vars'][self.latest_var]['dir']
q = Quadruple(lo_dir, -1, '=', var_dir)
self.quadruples.append(q)
self.quad_counter += 1
@_('id_or_attribute', 'id_or_attribute v0 LBRACKET expression v1 RBRACKET',
'id_or_attribute v0 LBRACKET expression v2 COMMA v4 expression v3 RBRACKET')
def variable(self, p):
return p[0]
# Checks that the variable exists and is an array
@_('')
def v0(self, p):
self.check_variable_exists(p[-1])
if self.current_class != None:
if not 'd1' in self.class_table[self.current_class]['vars'][p[-1]]:
self.found_errors = True
print('ERROR: Variable\033[1m',
p[-1], '\033[0mis not an array or matrix.')
self.stack_of_stacks.append([])
self.stack_of_stacks.append([])
return
elif p[-1] in self.function_table[self.curr_scope]['vars']:
if not 'd1' in self.function_table[self.curr_scope]['vars'][p[-1]]:
self.found_errors = True
print('ERROR: Variable\033[1m',
p[-1], '\033[0mis not an array or matrix.')
self.stack_of_stacks.append([])
self.stack_of_stacks.append([])
return
elif not 'd1' in self.function_table[self.program_name]['vars'][p[-1]]:
self.found_errors = True
print('ERROR: Variable\033[1m',
p[-1], '\033[0mis not an array or matrix.')
self.stack_of_stacks.append([])
self.stack_of_stacks.append([])
return
self.last_arr_id = p[-1]
self.stack_of_stacks.append([])
self.stack_of_stacks.append([])
# Checks that the variable is a matrix
@_('')
def v4(self, p):
self.check_variable_exists(self.last_arr_id)
if self.current_class != None:
if not 'd2' in self.class_table[self.current_class]['vars'][self.last_arr_id]:
self.found_errors = True
print('ERROR: Variable\033[1m',
self.last_arr_id, '\033[0mis not a matrix.')
elif self.last_arr_id in self.function_table[self.curr_scope]['vars']:
if not 'd2' in self.function_table[self.curr_scope]['vars'][self.last_arr_id]:
self.found_errors = True
print('ERROR: Variable\033[1m',
self.last_arr_id, '\033[0mis not a matrix.')
else:
if not 'd2' in self.function_table[self.program_name]['vars'][self.last_arr_id]:
self.found_errors = True
print('ERROR: Variable\033[1m',
self.last_arr_id, '\033[0mis not a matrix.')
self.stack_of_stacks.append([])
self.stack_of_stacks.append([])
# Calculates the address of the array index
@_('')
def v1(self, p):
made_quad = False
while(len(self.stack_of_stacks[-1])):
ro = self.stack_of_stacks[-2].pop()
lo = self.stack_of_stacks[-2].pop()
op = self.stack_of_stacks[-1].pop()
self.last_type = sm.checkOperation(lo['type'], ro['type'], op)
idx = self.types.index(self.last_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = idx * 300 + \
int(num_temps.split('\u001f')[idx]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
self.quadruples.append(
Quadruple(lo['dir'], ro['dir'], op, t_dir))
self.temp_counter += 1
self.quad_counter += 1
made_quad = True
if self.current_class != None and self.last_arr_id in self.class_table[self.current_class]['vars']:
t_addr = self.quadruples[-1].res if made_quad else self.stack_of_stacks[-2].pop()[
'dir']
if (t_addr % 1500) // 300 != 0 and (t_addr % 1500) // 300 != 1:
raise TypeError('Type mismatch')
lms = self.class_table[self.current_class]['vars'][self.last_arr_id]['d1']
self.quadruples.append(Quadruple(0, lms, 'verify', t_addr))
self.quad_counter += 1
dir_b = self.class_table[self.current_class]['vars'][self.last_arr_id]['dir']
if not dir_b in self.constant_table['int']:
self.constant_table['int'].append(dir_b)
cons_dir = self.constant_table['int'].index(dir_b) + 4500
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = int(num_temps.split('\u001f')[0]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, 0)
self.quadruples.append(Quadruple(cons_dir, t_addr, '+', t_dir))
self.quad_counter += 1
self.last_arr_t.append(t_dir)
elif self.last_arr_id in self.function_table[self.curr_scope]['vars']:
t_addr = self.quadruples[-1].res if made_quad else self.stack_of_stacks[-2].pop()[
'dir']
if (t_addr % 1500) // 300 != 0 and (t_addr % 1500) // 300 != 1:
raise TypeError('Type mismatch')
lms = self.function_table[self.curr_scope]['vars'][self.last_arr_id]['d1']
self.quadruples.append(Quadruple(0, lms, 'verify', t_addr))
self.quad_counter += 1
dir_b = self.function_table[self.curr_scope]['vars'][self.last_arr_id]['dir']
if not dir_b in self.constant_table['int']:
self.constant_table['int'].append(dir_b)
cons_dir = self.constant_table['int'].index(dir_b) + 4500
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = int(num_temps.split('\u001f')[0]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, 0)
self.quadruples.append(Quadruple(cons_dir, t_addr, '+', t_dir))
self.quad_counter += 1
self.last_arr_t.append(t_dir)
else:
t_addr = self.quadruples[-1].res if made_quad else self.stack_of_stacks[-2].pop()[
'dir']
if (t_addr % 1500) // 300 != 0 and (t_addr % 1500) // 300 != 1:
raise TypeError('Type mismatch')
lms = self.function_table[self.program_name]['vars'][self.last_arr_id]['d1']
self.quadruples.append(Quadruple(0, lms, 'verify', t_addr))
self.quad_counter += 1
dir_b = self.function_table[self.program_name]['vars'][self.last_arr_id]['dir']
if not dir_b in self.constant_table['int']:
self.constant_table['int'].append(dir_b)
cons_dir = self.constant_table['int'].index(dir_b) + 4500
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = int(num_temps.split('\u001f')[0]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, 0)
self.quadruples.append(Quadruple(cons_dir, t_addr, '+', t_dir))
self.quad_counter += 1
self.last_arr_t.append(t_dir)
self.stack_of_stacks.pop()
self.stack_of_stacks.pop()
# Calculate the address of the matrix index
@_('')
def v2(self, p):
made_quad = False
while(len(self.stack_of_stacks[-1])):
ro = self.stack_of_stacks[-2].pop()
lo = self.stack_of_stacks[-2].pop()
op = self.stack_of_stacks[-1].pop()
self.last_type = sm.checkOperation(lo['type'], ro['type'], op)
idx = self.types.index(self.last_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = idx * 300 + \
int(num_temps.split('\u001f')[idx]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
self.quadruples.append(
Quadruple(lo['dir'], ro['dir'], op, t_dir))
self.temp_counter += 1
self.quad_counter += 1
made_quad = True
if self.current_class != None:
pass
elif self.last_arr_id in self.function_table[self.curr_scope]['vars']:
t_addr = self.quadruples[-1].res if made_quad else self.stack_of_stacks[-2].pop()[
'dir']
if (t_addr % 1500) // 300 != 0 and (t_addr % 1500) // 300 != 1:
raise TypeError('Type mismatch')
lms = self.function_table[self.curr_scope]['vars'][self.last_arr_id]['d1']
self.quadruples.append(Quadruple(0, lms, 'verify', t_addr))
self.quad_counter += 1
d2 = self.function_table[self.curr_scope]['vars'][self.last_arr_id]['d2']
if not d2 in self.constant_table['int']:
self.constant_table['int'].append(d2)
cons_dir = self.constant_table['int'].index(d2) + 4500
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = int(num_temps.split('\u001f')[0]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, 0)
self.quadruples.append(Quadruple(cons_dir, t_addr, '*', t_dir))
self.quad_counter += 1
self.displacements.append(t_dir)
else:
t_addr = self.quadruples[-1].res if made_quad else self.stack_of_stacks[-2].pop()[
'dir']
if (t_addr % 1500) // 300 != 0 and (t_addr % 1500) // 300 != 1:
raise TypeError('Type mismatch')
lms = self.function_table[self.program_name]['vars'][self.last_arr_id]['d1']
self.quadruples.append(Quadruple(0, lms, 'verify', t_addr))
self.quad_counter += 1
d2 = self.function_table[self.program_name]['vars'][self.last_arr_id]['d2']
if not d2 in self.constant_table['int']:
self.constant_table['int'].append(d2)
cons_dir = self.constant_table['int'].index(d2) + 4500
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = int(num_temps.split('\u001f')[0]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, 0)
self.quadruples.append(Quadruple(cons_dir, t_addr, '*', t_dir))
self.quad_counter += 1
self.displacements.append(t_dir)
self.stack_of_stacks.pop()
self.stack_of_stacks.pop()
# Calculate s1*d2
@_('')
def v3(self, p):
made_quad = False
while(len(self.stack_of_stacks[-1])):
ro = self.stack_of_stacks[-2].pop()
lo = self.stack_of_stacks[-2].pop()
op = self.stack_of_stacks[-1].pop()
self.last_type = sm.checkOperation(lo['type'], ro['type'], op)
idx = self.types.index(self.last_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = idx * 300 + \
int(num_temps.split('\u001f')[idx]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
self.quadruples.append(
Quadruple(lo['dir'], ro['dir'], op, t_dir))
self.temp_counter += 1
self.quad_counter += 1
made_quad = True
if self.current_class != None:
pass
elif self.last_arr_id in self.function_table[self.curr_scope]['vars']:
t_addr = self.quadruples[-1].res if made_quad else self.stack_of_stacks[-2].pop()[
'dir']
if (t_addr % 1500) // 300 != 0 and (t_addr % 1500) // 300 != 1:
raise TypeError('Type mismatch')
lms = self.function_table[self.curr_scope]['vars'][self.last_arr_id]['d2']
self.quadruples.append(Quadruple(0, lms, 'verify', t_addr))
self.quad_counter += 1
dir_b = self.function_table[self.curr_scope]['vars'][self.last_arr_id]['dir']
if not dir_b in self.constant_table['int']:
self.constant_table['int'].append(dir_b)
cons_dir = self.constant_table['int'].index(dir_b) + 4500
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = int(num_temps.split('\u001f')[0]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, 0, 2)
self.quadruples.append(
Quadruple(self.displacements.pop(), t_addr, '+', t_dir))
self.quadruples.append(Quadruple(cons_dir, t_dir, '+', t_dir + 1))
self.quad_counter += 2
self.last_arr_t.append(t_dir + 1)
else:
t_addr = self.quadruples[-1].res if made_quad else self.stack_of_stacks[-2].pop()[
'dir']
if (t_addr % 1500) // 300 != 0 and (t_addr % 1500) // 300 != 1:
raise TypeError('Type mismatch')
lms = self.function_table[self.program_name]['vars'][self.last_arr_id]['d2']
self.quadruples.append(Quadruple(0, lms, 'verify', t_addr))
self.quad_counter += 1
dir_b = self.function_table[self.program_name]['vars'][self.last_arr_id]['dir']
if not dir_b in self.constant_table['int']:
self.constant_table['int'].append(dir_b)
cons_dir = self.constant_table['int'].index(dir_b) + 4500
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = int(num_temps.split('\u001f')[0]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, 0, 2)
self.quadruples.append(
Quadruple(self.displacements.pop(), t_addr, '+', t_dir))
self.quadruples.append(Quadruple(cons_dir, t_dir, '+', t_dir + 1))
self.quad_counter += 2
self.last_arr_t.append(t_dir + 1)
self.stack_of_stacks.pop()
self.stack_of_stacks.pop()
@_('ID', 'ID DOT ID')
def id_or_attribute(self, p):
if len(p) > 1:
return p[0] + p[1] + p[2]
return p[0]
# Returns value, type and address of the constant
@_('variable', 'CTE_I', 'CTE_F', 'CTE_STRING', 'cte_bool', 'call_to_function')
def var_cte(self, p):
offset = 4500
if hasattr(p, 'CTE_I'):
cte_type = 'int'
if not p[0] in self.constant_table['int']:
self.constant_table['int'].append(p[0])
cons_dir = self.constant_table['int'].index(p[0]) + offset
elif hasattr(p, 'CTE_F'):
cte_type = 'float'
if not p[0] in self.constant_table['float']:
self.constant_table['float'].append(p[0])
cons_dir = self.constant_table['float'].index(p[0]) + offset + 300
elif hasattr(p, 'CTE_STRING'):
cte_type = 'string'
if not p[0] in self.constant_table['string']:
self.constant_table['string'].append(p[0])
cons_dir = self.constant_table['string'].index(p[0]) + offset + 600
elif hasattr(p, 'cte_bool'):
cte_type = 'bool'
if not p[0] in self.constant_table['bool']:
self.constant_table['bool'].append(p[0])
cons_dir = self.constant_table['bool'].index(p[0]) + offset + 900
elif hasattr(p, 'call_to_function'):
return p[0]
else:
if not self.check_variable_exists(p[0]):
for i in range(1, len(self.symstack)):
if hasattr(self.symstack[i * -1], 'lineno'):
lineno = self.symstack[i * -1].lineno
break
self.found_errors = True
print('ERROR: No variable\033[1m',
p[0], '\033[0mwas found.')
print(' Missing reference found on line',
lineno)
return
if self.current_class != None and p[0] in self.class_table[self.current_class]['vars']:
cte_type = self.class_table[self.current_class]['vars'][p[0]]['type']
cons_dir = self.class_table[self.current_class]['vars'][p[0]]['dir']
else:
cte_type = self.get_var_type(p[0], self.symstack[-2])
if p[0] in self.function_table[self.curr_scope]['vars']:
cons_dir = self.function_table[self.curr_scope]['vars'][p[0]]['dir']
else:
cons_dir = self.function_table[self.program_name]['vars'][p[0]]['dir']
return {'value': p[0], 'type': cte_type, 'dir': cons_dir}
@_('constant e2 operator e3 expression', 'constant e2', 'LPAREN e1 expression RPAREN e4', 'LPAREN e1 expression RPAREN e4 operator e3 expression')
def expression(self, p):
if hasattr(p, 'LPAREN'):
return p[2]
return p[0]
# Append the open parenthesis to the stack of operators in the stacks_of_stacks
@_('')
def e1(self, p):
self.stack_of_stacks[-1].append('(')
# Appende the operand to the stack of operands in the stacks_of _stacks
@_('')
def e2(self, p):
self.stack_of_stacks[-2].append(p[-1])
# Makes the quadruple of operation
@_('')
def e3(self, p):
if len(self.stack_of_stacks[-1]) == 0 or self.stack_of_stacks[-1][-1] == '(':
self.stack_of_stacks[-1].append(p[-1])
elif self.stack_of_stacks[-1][-1] == '*' or self.stack_of_stacks[-1][-1] == '/':
self.make_and_push_quad()
if (self.stack_of_stacks[-1] and (self.stack_of_stacks[-1][-1] == '+' or self.stack_of_stacks[-1][-1] == '-')) and (p[-1] == '+' or p[-1] == '-'):
self.make_and_push_quad()
self.stack_of_stacks[-1].append(p[-1])
elif p[-1] == '*' or p[-1] == '/':
self.stack_of_stacks[-1].append(p[-1])
elif self.stack_of_stacks[-1][-1] == '+' or self.stack_of_stacks[-1][-1] == '-':
self.make_and_push_quad()
self.stack_of_stacks[-1].append(p[-1])
elif p[-1] == '+' or p[-1] == '-':
self.stack_of_stacks[-1].append(p[-1])
elif self.stack_of_stacks[-1][-1] in sm.comparison_ops or self.stack_of_stacks[-1][-1] in sm.equality_ops:
self.make_and_push_quad()
self.stack_of_stacks[-1].append(p[-1])
elif p[-1] in sm.comparison_ops or p[-1] in sm.equality_ops:
self.stack_of_stacks[-1].append(p[-1])
elif self.stack_of_stacks[-1][-1] in sm.logic_ops:
self.make_and_push_quad()
self.stack_of_stacks[-1].append(p[-1])
elif p[-1] in sm.logic_ops:
self.stack_of_stacks[-1].append(p[-1])
# Pops the operator stack and makes quads until an open parenthesis is found
@_('')
def e4(self, p):
while(self.stack_of_stacks[-1][-1] != '('):
self.make_and_push_quad()
self.stack_of_stacks[-1].pop()
@_('AND', 'OR')
def logical_operator(self, p):
return p[-1]
@_('LT', 'GT', 'SAME', 'GEQ', 'LEQ', 'NEQ')
def relational_operator(self, p):
return p[0]
@_('PLUS', 'MINUS', 'MULTIPLY', 'DIVIDE')
def arithmetic_operator(self, p):
return p[0]
@_('logical_operator', 'relational_operator', 'arithmetic_operator')
def operator(self, p):
return p[0]
@_('PLUS var_cte', 'MINUS var_cte', 'var_cte')
def constant(self, p):
if len(p) > 1 and p[1] == '-':
return -p.var_cte
else:
return p.var_cte
@_('READ LPAREN read_h')
def read(self, p):
return 'read'
@_('variable r1 COMMA read_h', 'variable r1 RPAREN SEMI')
def read_h(self, p):
return 'read_h'
# Makes the read quadruple wiht res being the address prefixed with a dollar sign ($)
@_('')
def r1(self, p):
if self.current_class != None and p[-1] in self.class_table[self.current_class]['vars']:
if 'd1' in self.class_table[self.current_class]['vars'][p[-1]]:
var_addr = '$' + str(self.last_arr_t.pop())
else:
var_addr = self.class_table[self.current_class]['vars'][p[-1]]['dir']
elif p[-1] in self.function_table[self.curr_scope]['vars']:
if 'd1' in self.function_table[self.curr_scope]['vars'][p[-1]]:
var_addr = '$' + str(self.last_arr_t.pop())
else:
var_addr = self.function_table[self.curr_scope]['vars'][p[-1]]['dir']
elif p[-1] in self.function_table[self.program_name]['vars']:
if 'd1' in self.function_table[self.program_name]['vars'][p[-1]]:
var_addr = '$' + str(self.last_arr_t.pop())
else:
var_addr = self.function_table[self.program_name]['vars'][p[-1]]['dir']
else:
raise UndeclaredIdError(p[-1])
self.quadruples.append(Quadruple(-1, -1, 'read', var_addr))
self.quad_counter += 1
@_('function_or_method vf0 ctf2 LPAREN func_params RPAREN fp2 fp3 ctf0 ctf3')
def call_to_function(self, p):
if not self.check_variable_exists(self.called_func):
# self.found_errors = True
# print('ERROR: No function\033[1m',
# self.called_func, '\033[0mwas found.')
# print(' Missing reference found on line',
# self.symstack[-5].lineno)
return
func_dir = self.function_table[self.program_name]['vars'][self.called_func]['dir']
func_type = self.function_table[self.called_func]['return_type']
return {'value': 't' + str(self.temp_counter - 1), 'type': func_type, 'dir': func_dir}
# Append two empty stacks to the stack_of_stacks
@_('')
def ctf2(self, p):
self.stack_of_stacks.append([])
self.stack_of_stacks.append([])
# Pops the staks from the stacks_of_stacks
@_('')
def ctf3(self, p):
self.stack_of_stacks.pop()
self.stack_of_stacks.pop()
# Call the return value in the address of the corresponding variable
@_('')
def ctf0(self, p):
if not self.check_variable_exists(self.called_func):
self.found_errors = True
print('ERROR: No function\033[1m',
self.called_func, '\033[0mwas found.')
print(' Missing reference found on line',
self.symstack[-3].lineno)
return
func_dir = self.function_table[self.program_name]['vars'][self.called_func]['real_dir']
func_type = self.function_table[self.program_name]['vars'][self.called_func]['type']
idx = self.types.index(func_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
# self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
# num_temps, idx)
t_dir = idx * 300 + \
int(num_temps.split('\u001f')[idx]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
self.quadruples.append(
Quadruple(func_dir, -1, '=', t_dir))
self.function_table[self.program_name]['vars'][self.called_func]['dir'] = t_dir
self.quad_counter += 1
self.temp_counter += 1
#
@_('ID ctf1', 'ID DOT ID')
def function_or_method(self, p):
if(len(p) == 2):
return (p[0], None)
else:
var_type = self.get_var_type(p[0], self.symstack[-1])
quads = []
for attr in self.class_table[var_type]['vars']:
var_dir = self.function_table[self.curr_scope]['vars'][p[0]+'.'+attr]['dir']
attr_dir = self.class_table[var_type]['vars'][attr]['dir']
quads.append(Quadruple(var_dir, -2, '=', attr_dir))
# self.quad_counter += 1
return (var_type + p[1] + p[2], quads)
# Check if name of the function being called exists in the function table
@_('')
def ctf1(self, p):
if self.current_class != None:
if not self.current_class + '.' + p[-1] in self.function_table:
self.found_errors = True
print('ERROR: No function\033[1m',
p[-1], '\033[0mwas found.')
print(' Missing reference found on line',
self.symstack[-1].lineno)
elif not p[-1] in self.function_table:
self.found_errors = True
print('ERROR: No function\033[1m',
p[-1], '\033[0mwas found.')
print(' Missing reference found on line',
self.symstack[-1].lineno)
@_('COMMA expression fp1 param_list', 'empty')
def param_list(self, p):
return 'param_list'
@_('PRINT LPAREN res_write RPAREN SEMI')
def print(self, p):
return 'print'
@_('expression pr1 comma_thing')
def res_write(self, p):
return 'res_write'
@_('COMMA res_write', 'empty')
def comma_thing(self, p):
return 'comma_thing'
# Make quadruples if the stack of operators is not empty and Make print quadruple
@_('')
def pr1(self, p):
made_quad = False
while(len(self.stack_of_stacks[-1])):
ro = self.stack_of_stacks[-2].pop()
lo = self.stack_of_stacks[-2].pop()
op = self.stack_of_stacks[-1].pop()
self.last_type = sm.checkOperation(lo['type'], ro['type'], op)
idx = self.types.index(self.last_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = idx * 300 + \
int(num_temps.split('\u001f')[idx]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
if not lo['dir'] in range(4500, 6000) and self.check_var_is_array(lo):
lo_dir = '$' + str(self.last_arr_t.pop())
else:
lo_dir = lo['dir']
if not ro['dir'] in range(4500, 6000) and self.check_var_is_array(ro):
ro_dir = '$' + str(self.last_arr_t.pop())
else:
ro_dir = ro['dir']
self.quadruples.append(
Quadruple(lo_dir, ro_dir, op, t_dir))
self.temp_counter += 1
self.quad_counter += 1
made_quad = True
if made_quad:
last_quad = self.quadruples[-1]
self.quadruples.append(
Quadruple(-1, -1, 'print', last_quad.res))
self.quad_counter += 1
else:
var = self.stack_of_stacks[-2].pop()
if not var:
return
if self.check_var_is_array(var):
var_dir = '$' + str(self.last_arr_t.pop())
else:
var_dir = var['dir']
self.quadruples.append(
Quadruple(-1, -1, 'print', var_dir))
self.quad_counter += 1
@_('TRUE', 'FALSE')
def cte_bool(self, p):
return p[0]
@_('IF LPAREN expression dec1 RPAREN THEN LCURL statements RCURL else_stm')
def decision_statement(self, p):
return 'decision_statement'
# Make quadruples if the stack of operators is not empty and goto_f quadruple
@_('')
def dec1(self, p):
while len(self.stack_of_stacks[-1]):
ro = self.stack_of_stacks[-2].pop()
lo = self.stack_of_stacks[-2].pop()
op = self.stack_of_stacks[-1].pop()
self.last_type = sm.checkOperation(lo['type'], ro['type'], op)
idx = self.types.index(self.last_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = idx * 300 + 3000
r_type = sm.checkOperation(lo['type'], ro['type'], op)
self.stack_of_stacks[-2].append(
{'value': 't' + str(self.temp_counter), 'type': r_type, 'dir': t_dir})
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
if self.check_var_is_array(lo):
lo_dir = '$' + str(self.last_arr_t.pop())
else:
lo_dir = lo['dir']
if self.check_var_is_array(ro):
ro_dir = '$' + str(self.last_arr_t.pop())
else:
ro_dir = ro['dir']
self.quadruples.append(
Quadruple(lo_dir, ro_dir, op, t_dir))
self.temp_counter += 1
self.quad_counter += 1
lo = self.stack_of_stacks[-2].pop()
if lo['type'] != 'bool':
raise SyntaxError(
'Expression to evaluate in if statement is not boolean')
else:
self.quadruples.append(Quadruple(-1, lo['dir'], 'goto_f', -1))
self.jumps.append(self.quad_counter)
self.quad_counter += 1
@_('dec2 ELSE LCURL statements RCURL dec3', 'empty dec4')
def else_stm(self, p):
return 'else_stm'
# Makes goto quadruples
@_('')
def dec2(self, p):
falso = self.jumps.pop()
self.quadruples.append(Quadruple(-1, -1, 'goto', -1))
self.jumps.append(self.quad_counter)
self.quad_counter += 1
self.quadruples[falso - 1].res = self.quad_counter
# Actualizes goto quadruple
@_('')
def dec3(self, p):
jump = self.jumps.pop()
self.quadruples[jump - 1].res = self.quad_counter
# Actualizes goto_f
@_('')
def dec4(self, p):
jump = self.jumps.pop()
self.quadruples[jump - 1].res = self.quad_counter
@_('conditional', 'non_conditional')
def repetition_statement(self, p):
return 'repetition_statement'
@_('WHILE LPAREN con0 expression con1 RPAREN DO LCURL statements RCURL con2')
def conditional(self, p):
return 'conditional'
# Add quadruple counter to jumps stack
@_('')
def con0(self, p):
self.jumps.append(self.quad_counter)
# Make quadruples if the stack of operators is not empty and make goto_f quadruple
@_('')
def con1(self, p):
while len(self.stack_of_stacks[-1]):
ro = self.stack_of_stacks[-2].pop()
lo = self.stack_of_stacks[-2].pop()
op = self.stack_of_stacks[-1].pop()
self.last_type = sm.checkOperation(lo['type'], ro['type'], op)
idx = self.types.index(self.last_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = idx * 300 + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
r_type = sm.checkOperation(lo['type'], ro['type'], op)
self.stack_of_stacks[-2].append(
{'value': 't' + str(self.temp_counter), 'type': r_type, 'dir': t_dir})
if self.check_var_is_array(lo):
lo_dir = '$' + str(self.last_arr_t.pop())
else:
lo_dir = lo['dir']
if self.check_var_is_array(ro):
ro_dir = '$' + str(self.last_arr_t.pop())
else:
ro_dir = ro['dir']
self.quadruples.append(
Quadruple(lo_dir, ro_dir, op, t_dir))
self.temp_counter += 1
self.quad_counter += 1
if self.last_type != 'bool':
raise SyntaxError(
'Expression to evaluate in if statement is not boolean')
else:
last_quad = self.quadruples[-1].res
self.quadruples.append(Quadruple(-1, last_quad, 'goto_f', -1))
self.jumps.append(self.quad_counter)
self.quad_counter += 1
# Make goto quadruple and actualize goto_f
@_('')
def con2(self, p):
falso = self.jumps.pop()
ret = self.jumps.pop()
self.quadruples.append(Quadruple(-1, -1, 'goto', ret))
self.quadruples[falso - 1].res = self.quad_counter + 1
if len(self.break_stack):
bq = self.break_stack.pop()
self.quadruples[bq - 1].res = self.quad_counter + 1
self.quad_counter += 1
@_('FOR variable ass1 EQUALS expression ass2 nc0 UNTIL expression nc1 DO nc2 LCURL statements RCURL nc3')
def non_conditional(self, p):
return 'non_conditional'
# Append the result of the last quadruple to the for_var_dir stack
@_('')
def nc0(self, p):
self.for_var_dir.append(self.quadruples[-1].res)
# Make quadruples if the stack of operators is not empty and do quadruple with <= as its operator
@_('')
def nc1(self, p):
made_quad = False
while(len(self.stack_of_stacks[-1])):
ro = self.stack_of_stacks[-2].pop()
lo = self.stack_of_stacks[-2].pop()
op = self.stack_of_stacks[-1].pop()
self.last_type = sm.checkOperation(lo['type'], ro['type'], op)
idx = self.types.index(self.last_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = idx * 300 + \
int(num_temps.split('\u001f')[idx]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
if not lo['dir'] in range(4500, 6000) and self.check_var_is_array(lo):
lo_dir = '$' + str(self.last_arr_t.pop())
else:
lo_dir = lo['dir']
if not ro['dir'] in range(4500, 6000) and self.check_var_is_array(ro):
ro_dir = '$' + str(self.last_arr_t.pop())
else:
ro_dir = ro['dir']
self.quadruples.append(
Quadruple(lo_dir, ro_dir, op, t_dir))
self.temp_counter += 1
self.quad_counter += 1
made_quad = True
if made_quad:
last_quad = self.quadruples[-1].res
if (last_quad % 1500) // 300 != 0 and (last_quad % 1500) // 300 != 1:
raise TypeError('Type mismatch')
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = 3 * 300 + \
int(num_temps.split('\u001f')[3]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, 3)
self.quadruples.append(
Quadruple(self.for_var_dir[-1], last_quad, '<=', t_dir))
self.jumps.append(self.quad_counter)
self.quad_counter += 1
self.temp_counter += 1
else:
var = self.stack_of_stacks[-2].pop()
if (var['dir'] % 1500) // 300 != 0 and (var['dir'] % 1500) // 300 != 1:
raise TypeError('Type mismatch')
num_temps = self.function_table[self.curr_scope]['num_temps']
t_dir = 3 * 300 + \
int(num_temps.split('\u001f')[3]) + 3000
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, 3)
if self.check_var_is_array(var):
var_dir = '$' + str(self.last_arr_t.pop())
else:
var_dir = var['dir']
self.quadruples.append(
Quadruple(self.for_var_dir[-1], var_dir, '<=', t_dir))
self.jumps.append(self.quad_counter)
self.quad_counter += 1
self.temp_counter += 1
# Make goto_f quadruple
@_('')
def nc2(self, p):
last_quad = self.quadruples[-1].res
self.quadruples.append(Quadruple(-1, last_quad, 'goto_f', -1))
self.jumps.append(self.quad_counter)
self.quad_counter += 1
# Make goto quadruple and actualize goto_f
@_('')
def nc3(self, p):
falso = self.jumps.pop()
cond = self.jumps.pop()
if not 1 in self.constant_table['int']:
self.constant_table['int'].append(1)
one_dir = self.constant_table['int'].index(1) + 4500
self.quadruples.append(
Quadruple(self.for_var_dir[-1], one_dir, '+', self.for_var_dir[-1]))
self.quad_counter += 1
self.quadruples.append(Quadruple(-1, -1, 'goto', cond))
self.quad_counter += 1
self.quadruples[falso - 1].res = self.quad_counter
if len(self.break_stack):
bq = self.break_stack.pop()
self.quadruples[bq - 1].res = self.quad_counter + 1
self.for_var_dir.pop()
@_('RETURN LPAREN expression fr0 RPAREN SEMI fr1')
def function_returns(self, p):
return 'function_returns'
# Make return quadruple
@_('')
def fr0(self, p):
made_quad = False
while(len(self.stack_of_stacks[-1])):
self.make_and_push_quad()
made_quad = True
if made_quad:
last_quad = self.quadruples[-1]
self.quadruples.append(
Quadruple(last_quad.res, -1, 'return', self.function_table[self.program_name]['vars'][self.curr_scope]['real_dir']))
self.quad_counter += 1
self.stack_of_stacks[-2].pop()
else:
self.quadruples.append(
Quadruple(self.stack_of_stacks[-2].pop()['dir'], -1, 'return', self.function_table[self.program_name]['vars'][self.curr_scope]['real_dir']))
self.quad_counter += 1
# Actualize has_returned to true
@_('')
def fr1(self, p):
self.has_returned = True
@ _('function_or_method vf0 LPAREN func_params RPAREN fp2 fp3 SEMI')
def call_to_void_function(self, p):
return 'call_to_void_function'
# Make gosub quadruple
@ _('')
def fp2(self, p):
self.quadruples.append(
Quadruple(self.called_func, -1, 'gosub', -1))
self.quad_counter += 1
# Equalize parameter counter to 0
@ _('')
def fp3(self, p):
self.param_counter = 0
# make era quadruple
@ _('')
def vf0(self, p):
self.called_func, quads = p[-1]
if self.current_class != None:
self.called_func = self.current_class + '.' + self.called_func
self.quadruples.append(Quadruple(self.called_func, -1, 'era', -1))
self.quad_counter += 1
if quads:
for q in quads:
self.quadruples.append(q)
self.quad_counter += 1
@ _('expression fp1 param_list', 'empty')
def func_params(self, p):
return 'func_params'
# Make quadruples if the stack of operators is not empty and do param quadruples
@ _('')
def fp1(self, p):
if not self.called_func in self.function_table:
for i in range(1, len(self.symstack)):
if hasattr(self.symstack[i * -1], 'lineno'):
lineno = self.symstack[i * -1].lineno
break
self.found_errors = True
print('ERROR: No function\033[1m',
self.called_func, '\033[0mwas found.')
print(' Missing reference found on line', lineno)
return
made_quad = False
while(len(self.stack_of_stacks[-1])):
offset = 800 * len(self.types) * 2
ro = self.stack_of_stacks[-2].pop()
lo = self.stack_of_stacks[-2].pop()
op = self.stack_of_stacks[-1].pop()
self.last_type = sm.checkOperation(lo['type'], ro['type'], op)
idx = self.types.index(self.last_type)
num_temps = self.function_table[self.curr_scope]['num_temps']
self.function_table[self.curr_scope]['num_temps'] = self.update_num_temps(
num_temps, idx)
t_dir = idx * 300 + 3000
self.quadruples.append(
Quadruple(lo['dir'], ro['dir'], op, t_dir))
self.temp_counter += 1
self.quad_counter += 1
made_quad = True
if made_quad:
last_quad = self.quadruples[-1]
if self.param_counter == len(self.function_table[self.called_func]['params']):
self.found_errors = True
print(
'ERROR: Too many parameters passed in call to function on line', self.symstack[-2].lineno)
return
try:
sm.checkAssignment(self.types[int(self.function_table[self.called_func]
['params'][self.param_counter])], self.types[(last_quad.res % 1500) // 300], '=')
except TypeError:
self.found_errors = True
print(
'ERROR: Type mismatch on line', self.symstack[-2].lineno)
print(
' Expected value of type', self.types[int(self.function_table[self.called_func]['params'][self.param_counter])], 'got value of type', self.types[(last_quad.res % 1500) // 300], 'instead')
return
self.quadruples.append(
Quadruple(last_quad.res, -1, 'param', self.param_counter))
self.quad_counter += 1
self.param_counter += 1
else:
val = self.stack_of_stacks[-2].pop()
if self.param_counter == len(self.function_table[self.called_func]['params']):
self.found_errors = True
print(
'ERROR: Too many parameters passed in call to function on line', self.symstack[-2].lineno)
return
if not val:
return
try:
sm.checkAssignment(self.types[int(self.function_table[self.called_func]
['params'][self.param_counter])], self.types[(val['dir'] % 1500) // 300], '=')
except TypeError:
self.found_errors = True
print(
'ERROR: Type mismatch on line', self.symstack[-2].lineno)
print(
' Expected value of type', self.types[int(self.function_table[self.called_func]['params'][self.param_counter])], 'got value of type', self.types[(val['dir'] % 1500) // 300], 'instead')
return
self.quadruples.append(
Quadruple(val['dir'], -1, 'param', self.param_counter))
self.quad_counter += 1
self.param_counter += 1
@ _('MAIN m1_add_to_func_table LPAREN RPAREN LCURL main0 var_declaration statements RCURL main2')
def main(self, p):
return 'main'
# Actualize the jump of the first goto made int he list of quadruples
@ _('')
def main0(self, p):
self.quadruples[0].res = self.quad_counter
# Do end quadruple and delete function and class tables
@ _('')
def main2(self, p):
self.quadruples.append(Quadruple(-1, -1, 'end', -1))
del self.function_table[self.program_name]['vars']
del self.function_table['main']['vars']
for class_name in self.class_table:
del self.class_table[class_name]['vars']
pass
# Add main to function table
@ _('')
def m1_add_to_func_table(self, p):
self.curr_scope = 'main'
self.add_to_func_table('main', None)
@ _('')
def empty(self, p):
pass
def error(self, p):
if not p:
return
print('ERROR: Syntax error found on line', p.lineno)
if p.value == 'var':
print(
' All variable declarations must be done before any other statement')
elif p.value == '(':
print(
' Parentheses are not allowed in this position.')
elif p.value == '{':
print(
' Curly brackets are not allowed in this position.')
elif p.value == '[':
print(
' Brackets are not allowed in this position.')
elif p.value == ')':
print(
' Closing parenthesis found without matching opening one.')
elif p.value == '}':
print(
' Closing curly bracket without an opening one.')
elif p.value == ']':
print(
' Closing bracket without an opening one.')
elif p.value == ';':
print(
' Must only be used at the end of statements')
elif p.value == '=':
print(
' Assignment is not allowed here. Perhaps you meant to use ==?')
else:
print(
' Keyword or id misplaced')
if not self.found_errors:
print(
' It\'s possible that all other syntax errors may be fixed by solving this one.')
self.errok()
self.found_errors = True
while True:
tok = next(self.tokens, None)
if tok == None:
raise EOFError()
if tok.type == 'SEMI':
tok = next(self.tokens, None)
return tok
|
# Copyright 2022 Accenture Global Solutions Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pathlib
import sys
import tracdap.rt.launch as launch
_ROOT_DIR = pathlib.Path(__file__).parent \
.joinpath("../../../..") \
.resolve()
_EXAMPLES_DIR = _ROOT_DIR.joinpath("examples/models/python")
class ChainingExample(unittest.TestCase):
def test_chaining(self):
job_config = _EXAMPLES_DIR.joinpath("chaining/chaining.yaml")
sys_config = _EXAMPLES_DIR.joinpath("sys_config.yaml")
test_dir = str(_EXAMPLES_DIR.joinpath("chaining"))
try:
sys.path.append(test_dir)
launch.launch_job(job_config, sys_config, dev_mode=True)
self.assertTrue(True)
finally:
sys.path.remove(test_dir)
|
#!/usr/bin/env python3
import os
import sys
import random
import numpy as np
src = open("input.txt", "r").readlines()
example = """
be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe
edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc
fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg
fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb
aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea
fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb
dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe
bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef
egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb
gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce
""".splitlines()
# example = "acedgfb cdfbe gcdfa fbcad dab cefabd cdfgeb eafb cagedb ab | cdfeb fcadb cdfeb cdbaf".splitlines()
"""
0: 1: 2: 3: 4:
aaaa .... aaaa aaaa ....
b c . c . c . c b c
b c . c . c . c b c
.... .... dddd dddd dddd
e f . f e . . f . f
e f . f e . . f . f
gggg .... gggg gggg ....
5: 6: 7: 8: 9:
aaaa aaaa aaaa aaaa aaaa
b . b . . c b c b c
b . b . . c b c b c
dddd dddd .... dddd dddd
. f e f . f e f . f
. f e f . f e f . f
gggg gggg .... gggg gggg
"""
# src = example
src = [r.strip() for r in src if r.strip()]
dc = [6, 2, 5, 5, 4, 5, 6, 3, 7, 6]
# 0 is a subset of 8
# *1 is a subset of 0, 3, 4, 7, 8, 9 ^[2, 5, 6]
# 2 is a subset of 8
# 3 is a subset of 8, 9
# *4 is a subset of 8, 9
# 5 is a subset of 8, 9
# 6 is a subset of 8
# *7 is a subset of 0, 3, 8, 9 ^[1, 2, 4, 5, 6]
# *8 is a subset of
# 9 is a subset of 8
part1 = 0
def collapse(digits):
# clean up where we only have a "new" single guess
for v in digits.values():
if len(v) == 1:
for i, j in digits.items():
if len(j) == 1:
continue
j.difference_update(v)
# cleanup where a digit has multiple guesses, but one of the guesses only appears once
guesses = [0 for _ in range(10)]
for d in digits.values():
for v in d:
guesses[v] += 1
for gi, c in enumerate(guesses):
if c > 1:
continue
for i, j in digits.items():
if gi in j:
j.difference_update(j.difference({gi}))
return digits
def get_choices(digits, idx):
choices = []
for k, v in digits.items():
if idx in v:
choices.append(k)
return choices
total = 0
for line in src:
scram, outp = line.split(" | ")
scram = [frozenset(x) for x in scram.split()]
outp = [frozenset(x) for x in outp.split()]
for d in outp:
if len(d) in (2, 4, 3, 7):
part1 += 1
digits = {}
for d in scram:
if len(d) == 2:
digits[d] = {1}
one = d
elif len(d) == 4:
digits[d] = {4}
four = d
elif len(d) == 3:
digits[d] = {7}
elif len(d) == 7:
digits[d] = {8}
elif len(d) == 6:
digits[d] = {0, 6, 9}
elif len(d) == 5:
digits[d] = {2, 3, 5}
else:
assert "wut"
# reduce based on if it is a subset of 1
for d in scram:
if one.issubset(d):
digits[d].difference_update({2, 5, 6})
# four must be a subset of 9
for c in get_choices(digits, 9):
if four.issubset(c):
digits[c] = {9}
nine = c
# five must be a subset of nine
for c in get_choices(digits, 5):
if c.issubset(nine):
digits[c] = {5}
digits = collapse(digits)
c = ""
for d in outp:
c += str(list(digits[d])[0])
total += int(c)
print("part1:", part1)
print("part1:", total)
|
# -*- coding: utf-8 -*-
"""
@date Created on Thu May 18 14:35:34 2017
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author: pierre_b
"""
from os.path import join, isfile
from os import remove
import sys
from unittest import TestCase
from ddt import ddt, data
import mock # for unittest of raw_input
from PyQt5 import QtWidgets
from pyleecan.Classes.MachineSyRM import MachineSyRM
from pyleecan.Classes.MachineIPMSM import MachineIPMSM
from pyleecan.Classes.MachineDFIM import MachineDFIM
from pyleecan.Classes.MachineSCIM import MachineSCIM
from pyleecan.Classes.MachineSIPMSM import MachineSIPMSM
from pyleecan.Classes.MachineWRSM import MachineWRSM
from pyleecan.Classes.MachineSRM import MachineSRM
from pyleecan.GUI.Dialog.DMachineSetup.DMachineSetup import DMachineSetup
from pyleecan.Tests import save_gui_path as save_path
from pyleecan.GUI.Dialog.DMachineSetup.SMachineType.SMachineType import SMachineType
from pyleecan.GUI.Dialog.DMachineSetup.SMagnet.SMagnet import SMagnet
from pyleecan.GUI.Dialog.DMachineSetup.SWindParam.SWindParam import SWindParam
from pyleecan.GUI.Dialog.DMachineSetup.SWindCond.SWindCond import SWindCond
from pyleecan.GUI.Dialog.DMachineSetup.SBar.SBar import SBar
from pyleecan.GUI.Dialog.DMachineSetup.SWSlot.SWSlot import SWSlot
from pyleecan.GUI.Dialog.DMachineSetup.SMHoleMag.SMHoleMag import SMHoleMag
import matplotlib.pyplot as plt
from pyleecan.Tests import DATA_DIR
load_test = list()
load_test.append( # 1
{"type": "SCIM", "index": 0, "name": "SCIM_001", "p": 1, "count": 10}
)
load_test.append( # 2
{"type": "DFIM", "index": 1, "name": "DFIM_001", "p": 2, "count": 12}
)
load_test.append( # 3
{"type": "SyRM", "index": 2, "name": "SynRM_001", "p": 2, "count": 9}
)
load_test.append( # 4
{"type": "SPMSM", "index": 3, "name": "SPMSM_001", "p": 4, "count": 9}
)
load_test.append( # 5
{"type": "SIPMSM", "index": 4, "name": "SIPMSM_008", "p": 4, "count": 9}
)
load_test.append( # 6
{"type": "IPMSM", "index": 5, "name": "machine_IPMSM_A", "p": 5, "count": 9}
)
load_test.append( # 7
{"type": "WRSM", "index": 6, "name": "WRSM_001", "p": 6, "count": 12}
)
load_test.append( # 8
{"type": "SRM", "index": 7, "name": "SRM_test_load", "p": 10, "count": 9}
)
from PyQt5.QtCore import Qt
ENABLE_ITEM = Qt.ItemIsSelectable | Qt.ItemIsEnabled
@ddt
class test_DMachineSetup(TestCase):
"""Test that the widget DMachineSetup behave like it should"""
def setUp(self):
"""Run at the begining of every test to setup the gui"""
self.widget = DMachineSetup(matlib_path="./MaterialData")
@classmethod
def setUpClass(cls):
"""Start the app for the test"""
print("\nStart Test DMachineSetup")
cls.app = QtWidgets.QApplication(sys.argv)
@classmethod
def tearDownClass(cls):
"""Exit the app after the test"""
cls.app.quit()
@data(*load_test)
def test_load(self, test_dict):
"""Check that you can load a machine
"""
return_value = (
join(join(DATA_DIR, "Load_GUI"), test_dict["name"] + ".json"),
"Json (*.json)",
)
with mock.patch(
"PyQt5.QtWidgets.QFileDialog.getOpenFileName", return_value=return_value
):
# To trigger the slot
self.widget.b_load.clicked.emit(True)
# To remember to update when adding a new machine type
self.assertEqual(self.widget.w_step.c_type.count(), 8)
# Check load MachineType
self.assertEqual(type(self.widget.w_step), SMachineType)
self.assertEqual(self.widget.w_step.c_type.currentIndex(), test_dict["index"])
self.assertEqual(self.widget.w_step.c_type.currentText(), test_dict["type"])
self.assertEqual(self.widget.w_step.si_p.value(), test_dict["p"])
self.assertEqual(self.widget.w_step.le_name.text(), test_dict["name"])
# Check that the nav_step is correct
self.assertEqual(self.widget.nav_step.count(), test_dict["count"])
def test_set_save_machine_type(self):
"""Check that the Widget allow to change the machine type and save
"""
# Check that all the machine type are available
self.assertEqual(self.widget.w_step.c_type.count(), 8)
# DFIM
self.widget.w_step.c_type.setCurrentIndex(1)
self.assertEqual(self.widget.w_step.c_type.currentText(), "DFIM")
self.assertEqual(type(self.widget.machine), MachineDFIM)
save_function(self, self.widget, "test_dfim_save")
# SyRM
self.widget.w_step.c_type.setCurrentIndex(2)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SyRM")
self.assertEqual(type(self.widget.machine), MachineSyRM)
save_function(self, self.widget, "test_syrm_save")
# SPMSM
self.widget.w_step.c_type.setCurrentIndex(3)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SPMSM")
self.assertEqual(type(self.widget.machine), MachineSIPMSM)
save_function(self, self.widget, "test_spmsm_save")
# SIPMSM
self.widget.w_step.c_type.setCurrentIndex(4)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SIPMSM")
self.assertEqual(type(self.widget.machine), MachineSIPMSM)
save_function(self, self.widget, "test_sipmsm_save")
# IPMSM
self.widget.w_step.c_type.setCurrentIndex(5)
self.assertEqual(self.widget.w_step.c_type.currentText(), "IPMSM")
self.assertEqual(type(self.widget.machine), MachineIPMSM)
save_function(self, self.widget, "test_ipmsm_save")
# WRSM
self.widget.w_step.c_type.setCurrentIndex(6)
self.assertEqual(self.widget.w_step.c_type.currentText(), "WRSM")
self.assertEqual(type(self.widget.machine), MachineWRSM)
save_function(self, self.widget, "test_wrsm_save")
# SRM
self.widget.w_step.c_type.setCurrentIndex(7)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SRM")
self.assertEqual(type(self.widget.machine), MachineSRM)
save_function(self, self.widget, "test_srm_save")
# SCIM
self.widget.w_step.c_type.setCurrentIndex(0)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SCIM")
self.assertEqual(type(self.widget.machine), MachineSCIM)
def save_function(self, widget, file_name):
"""Function to save a machine from the GUI
"""
file_path = join(save_path, file_name + ".json")
# Check that the file didn't already exist
if isfile(file_path):
remove(file_path)
self.assertFalse(isfile(file_path))
return_value = (file_path, "Json (*.json)")
with mock.patch(
"PyQt5.QtWidgets.QFileDialog.getSaveFileName", return_value=return_value
):
# To trigger the slot
widget.b_save.clicked.emit(True)
# Check that the file now exist => delete for next test
self.assertTrue(isfile(file_path))
remove(file_path)
# Check that the GUI have been updated
self.assertEqual(type(widget.w_step), SMachineType)
self.assertEqual(widget.w_step.le_name.text(), file_name)
|
import datetime
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from core.permissions import DjangoModelPermissions
from core.visits.serializer import PopulatedVisitSerializer
from core.models import Visit, FrontDeskEvent, FrontDeskEventType
from core.front_desk_events.serializer import FrontDeskEventForQueueSerializer
from django.contrib.auth.models import User
class QueueViewSet(viewsets.ViewSet):
"""
API endpoint that displays the queue
uses regular ViewSet to be able to display adjacent model responses in one view,
hence the permission classes being repeated here instead of using viewsets.py prototype
"""
# DjangoModelPermissions requires a queryset to function,
# the next line is what the docs suggest as a 'sentinel queryset'
queryset= FrontDeskEvent.objects.none()
permission_classes = [DjangoModelPermissions, IsAuthenticated]
def retrieve(self, request, program_id=None):
"""
retrieve most recent front desk event for each
visit that is happening today, filtered by program
"""
# filter by visits that are happening today in a certain program
visits_queryset = (
Visit.objects.select_related("participant", "program")
.filter(
program=program_id,
created_at__date=datetime.date.today(),
)
.order_by("urgency", "-created_at")
)
todays_visit_data = PopulatedVisitSerializer(
visits_queryset, many=True, context={"request": request}
).data
active_visits_queue = []
front_desk_events = FrontDeskEvent.objects.select_related("visit").filter(
visit__in=[dict(x)["id"] for x in todays_visit_data]
).order_by("-created_at").values("id", "visit", "event_type", "created_at")
# for each visit, get the most recent front desk event, to glean current visit status
for visit in todays_visit_data:
events = list(
filter(lambda x: x.get("visit") is visit.get("id"), front_desk_events)
)
if events:
event = events[0]
event_type = event.get("event_type")
if event_type in [
FrontDeskEventType.ARRIVED.name,
FrontDeskEventType.STEPPED_OUT.name,
FrontDeskEventType.CAME_BACK.name,
]:
# if most recent front desk event is an 'active' status add it to visit object
visit["status"] = event
# then add it to the 'active visits queue'
active_visits_queue.append(visit)
return Response(active_visits_queue)
|
def open_input():
with open("input.txt") as fd:
array = fd.read().splitlines()
array = list(map(int, array))
return array
def part_one(array):
lenght = len(array)
increased = 0
for i in range(0, lenght - 1):
if array[i] < array[i + 1]:
increased += 1
print("part one:", increased)
def part_two(array):
lenght = len(array)
increased = 0
for i in range(0, lenght - 3):
sum1 = array[i] + array[i + 1] + array[i + 2]
sum2 = array[i + 1] + array[i + 2] + array[i + 3]
if sum1 < sum2:
increased += 1
print("part two:", increased)
if (__name__ == "__main__"):
array = open_input()
part_one(array)
part_two(array)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetCustomResourceProviderResult',
'AwaitableGetCustomResourceProviderResult',
'get_custom_resource_provider',
]
@pulumi.output_type
class GetCustomResourceProviderResult:
"""
A manifest file that defines the custom resource provider resources.
"""
def __init__(__self__, actions=None, id=None, location=None, name=None, provisioning_state=None, resource_types=None, tags=None, type=None, validations=None):
if actions and not isinstance(actions, list):
raise TypeError("Expected argument 'actions' to be a list")
pulumi.set(__self__, "actions", actions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_types and not isinstance(resource_types, list):
raise TypeError("Expected argument 'resource_types' to be a list")
pulumi.set(__self__, "resource_types", resource_types)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if validations and not isinstance(validations, list):
raise TypeError("Expected argument 'validations' to be a list")
pulumi.set(__self__, "validations", validations)
@property
@pulumi.getter
def actions(self) -> Optional[Sequence['outputs.CustomRPActionRouteDefinitionResponse']]:
"""
A list of actions that the custom resource provider implements.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource provider.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceTypes")
def resource_types(self) -> Optional[Sequence['outputs.CustomRPResourceTypeRouteDefinitionResponse']]:
"""
A list of resource types that the custom resource provider implements.
"""
return pulumi.get(self, "resource_types")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def validations(self) -> Optional[Sequence['outputs.CustomRPValidationsResponse']]:
"""
A list of validations to run on the custom resource provider's requests.
"""
return pulumi.get(self, "validations")
class AwaitableGetCustomResourceProviderResult(GetCustomResourceProviderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCustomResourceProviderResult(
actions=self.actions,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_types=self.resource_types,
tags=self.tags,
type=self.type,
validations=self.validations)
def get_custom_resource_provider(resource_group_name: Optional[str] = None,
resource_provider_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCustomResourceProviderResult:
"""
A manifest file that defines the custom resource provider resources.
:param str resource_group_name: The name of the resource group.
:param str resource_provider_name: The name of the resource provider.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceProviderName'] = resource_provider_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:customproviders/v20180901preview:getCustomResourceProvider', __args__, opts=opts, typ=GetCustomResourceProviderResult).value
return AwaitableGetCustomResourceProviderResult(
actions=__ret__.actions,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_types=__ret__.resource_types,
tags=__ret__.tags,
type=__ret__.type,
validations=__ret__.validations)
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
##from ansible.errors import AnsibleOptionsError, AnsibleModuleError##, AnsibleError
####from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems, string_types
from ansible_collections.smabot.git.plugins.module_utils.plugins.gitlab_action import GitlabBase
from ansible_collections.smabot.base.plugins.module_utils.utils.utils import ansible_assert
class ActionModule(GitlabBase):
def __init__(self, *args, **kwargs):
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_check_mode = False
self._supports_async = False
@property
def argspec(self):
tmp = super(ActionModule, self).argspec
tmp.update({
'modname': (list(string_types)),
'modargs': ([collections.abc.Mapping], {}),
})
return tmp
def run_specific(self, result):
cmdret = self.exec_gitlab_module(
self.get_taskparam('modname'),
modargs=self.get_taskparam('modargs')
)
result.update(cmdret)
return result
|
import unittest
from okapi.proto.okapi.security.v1 import CreateOberonKeyRequest, CreateOberonTokenRequest, CreateOberonProofRequest, \
VerifyOberonProofRequest, UnBlindOberonTokenRequest, BlindOberonTokenRequest
from okapi.wrapper import Oberon
class KeyTests(unittest.TestCase):
def test_oberon_demo(self):
key = Oberon.create_key(CreateOberonKeyRequest())
data = bytes("alice", "utf8")
nonce = bytes("1234", "utf8")
token = Oberon.create_token(CreateOberonTokenRequest(data=data, sk=key.sk))
proof = Oberon.create_proof(CreateOberonProofRequest(data=data, nonce=nonce, token=token.token))
result = Oberon.verify_proof(VerifyOberonProofRequest(data=data, nonce=nonce, pk=key.pk, proof=proof.proof))
self.assertTrue(result.valid, "Proof should verify")
def test_demo_with_binding(self):
key = Oberon.create_key(CreateOberonKeyRequest())
data = bytes("alice", "utf8")
nonce = bytes("1234", "utf8")
issuer_2fa = bytes("issuer code", "utf8")
token_request = CreateOberonTokenRequest(data=data, sk=key.sk)
token_request.blinding.append(issuer_2fa)
blinded_token = Oberon.create_token(token_request)
# Holder unblinds the token
unblind_request = UnBlindOberonTokenRequest(token=blinded_token.token)
unblind_request.blinding.append(issuer_2fa)
token = Oberon.unblind_token(unblind_request)
# Holder prepares a proof without blinding
proof = Oberon.create_proof(CreateOberonProofRequest(data=data, nonce=nonce, token=token.token))
# Verifier verifies the proof
result = Oberon.verify_proof(VerifyOberonProofRequest(data=data, nonce=nonce, pk=key.pk, proof=proof.proof))
self.assertTrue(result.valid)
# Holder blinds the token with a personal pin
user_pin = bytes("0042", "utf8")
blind_request = BlindOberonTokenRequest(token=token.token)
blind_request.blinding.append(user_pin)
user_blinded_token = Oberon.blind_token(blind_request)
proof_request = CreateOberonProofRequest(data=data, nonce=nonce, token=user_blinded_token.token)
proof_request.blinding.append(user_pin)
proof = Oberon.create_proof(proof_request)
# Verifier verifies the proof
result = Oberon.verify_proof(VerifyOberonProofRequest(data=data, nonce=nonce, pk=key.pk, proof=proof.proof))
self.assertTrue(result.valid)
# Bad actor creates a proof with incorrect blinding pin
proof_request = CreateOberonProofRequest(data=data, nonce=nonce, token=user_blinded_token.token)
proof_request.blinding.append(bytes("invalid pin", "utf8"))
proof = Oberon.create_proof(proof_request)
# Verifies tries to verify proof, fails
result = Oberon.verify_proof(VerifyOberonProofRequest(data=data, nonce=nonce, pk=key.pk, proof=proof.proof))
self.assertFalse(result.valid)
|
import numpy, random
class Individual:
def __init__(self,genome, llimits =[], ulimits=[], type=[], LEN = 1,fitness_func = None):
if genome is None:
self.genome = numpy.zeros(LEN,dtype=float)
for gene in range(LEN):
if type[gene] == "integer":
self.genome[gene] = numpy.random.randint(llimits[gene], ulimits[gene])
else:
self.genome[gene] = numpy.random.uniform(llimits[gene], ulimits[gene])
else:
self.genome = genome
self.fitness = fitness_func(self.genome)
def __str__(self):
return "".join(str(int(i)) for i in self.genome)
def crossover(a, b, fitness):
g, h = a.genome.copy(), b.genome.copy()
for pt in range(len(g)):
if numpy.random.random() < 0.5:
g[pt], h[pt] = h[pt], g[pt]
return (Individual(genome=g,fitness_func=fitness), Individual(genome=h,fitness_func=fitness))
def mutate(a, mut_prob,fitness):
g = a.genome.copy()
for pt in range(len(g)):
if numpy.random.random() < mut_prob:
g[pt] = not g[pt]
return Individual(g,fitness_func=fitness)
def stats(pop, gen,threshold):
best = max(pop, key=lambda x: x.fitness)
print("{0} {1:.2f} {2} {3}".format(gen, numpy.mean([i.fitness for i in pop]), best.fitness, str(best)))
return (best.fitness >= threshold)
def roulette(items, n):
total = float(sum(w.fitness for w in items))
i = 0
w, v = items[0].fitness, items[0]
while n:
x = total * (1 - numpy.random.random() ** (1.0 / n))
total -= x
while x > w:
x -= w
i += 1
w, v = items[i].fitness, items[i]
w -= x
yield v
n -= 1
def tournament(items, n, tsize=5):
for i in range(n):
candidates = random.sample(items, tsize)
yield max(candidates, key=lambda x: x.fitness)
def step(pop,cross_prob,mut_prob,fitness):
newpop = []
parents = roulette(pop, len(pop) + 1) # one extra for final xover
while len(newpop) < len(pop):
if numpy.random.random() < cross_prob:
newpop.extend(map(mutate, crossover(next(parents), next(parents),fitness=fitness),[mut_prob,mut_prob],[fitness,fitness]))
else:
newpop.append(mutate(next(parents),mut_prob=mut_prob,fitness=fitness))
return newpop
def run(llimit, ulimit, type, GENERATIONS, CROSSOVER_PROB, POPSIZE, LEN, MUTATION_PROB,FITNESS,THRESHOLD):
numpy.random.seed(100)
pop = [Individual(None,llimit,ulimit,type,LEN,FITNESS) for i in range(POPSIZE)]
print(pop)
stats(pop, 0, THRESHOLD)
for gen in range(1, GENERATIONS):
pop = step(pop,CROSSOVER_PROB,MUTATION_PROB,FITNESS)
if stats(pop, gen, THRESHOLD):
print("Success")
llimit = [0.5,1e-6,1e-6,0]
ulimit = [1.5,0.1,0.1,3]
type = ['real','real','real','integer']
LEN = 4
FITNESS, SUCCESS_THRESHOLD = (numpy.sum, LEN)
run(llimit,ulimit,type,100,1,100,4,0.9,FITNESS,10)
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.19.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1TopologySpreadConstraint(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'label_selector': 'V1LabelSelector',
'max_skew': 'int',
'topology_key': 'str',
'when_unsatisfiable': 'str'
}
attribute_map = {
'label_selector': 'labelSelector',
'max_skew': 'maxSkew',
'topology_key': 'topologyKey',
'when_unsatisfiable': 'whenUnsatisfiable'
}
def __init__(self, label_selector=None, max_skew=None, topology_key=None, when_unsatisfiable=None, local_vars_configuration=None): # noqa: E501
"""V1TopologySpreadConstraint - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._label_selector = None
self._max_skew = None
self._topology_key = None
self._when_unsatisfiable = None
self.discriminator = None
if label_selector is not None:
self.label_selector = label_selector
self.max_skew = max_skew
self.topology_key = topology_key
self.when_unsatisfiable = when_unsatisfiable
@property
def label_selector(self):
"""Gets the label_selector of this V1TopologySpreadConstraint. # noqa: E501
:return: The label_selector of this V1TopologySpreadConstraint. # noqa: E501
:rtype: V1LabelSelector
"""
return self._label_selector
@label_selector.setter
def label_selector(self, label_selector):
"""Sets the label_selector of this V1TopologySpreadConstraint.
:param label_selector: The label_selector of this V1TopologySpreadConstraint. # noqa: E501
:type: V1LabelSelector
"""
self._label_selector = label_selector
@property
def max_skew(self):
"""Gets the max_skew of this V1TopologySpreadConstraint. # noqa: E501
MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. # noqa: E501
:return: The max_skew of this V1TopologySpreadConstraint. # noqa: E501
:rtype: int
"""
return self._max_skew
@max_skew.setter
def max_skew(self, max_skew):
"""Sets the max_skew of this V1TopologySpreadConstraint.
MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. # noqa: E501
:param max_skew: The max_skew of this V1TopologySpreadConstraint. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and max_skew is None: # noqa: E501
raise ValueError("Invalid value for `max_skew`, must not be `None`") # noqa: E501
self._max_skew = max_skew
@property
def topology_key(self):
"""Gets the topology_key of this V1TopologySpreadConstraint. # noqa: E501
TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field. # noqa: E501
:return: The topology_key of this V1TopologySpreadConstraint. # noqa: E501
:rtype: str
"""
return self._topology_key
@topology_key.setter
def topology_key(self, topology_key):
"""Sets the topology_key of this V1TopologySpreadConstraint.
TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field. # noqa: E501
:param topology_key: The topology_key of this V1TopologySpreadConstraint. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and topology_key is None: # noqa: E501
raise ValueError("Invalid value for `topology_key`, must not be `None`") # noqa: E501
self._topology_key = topology_key
@property
def when_unsatisfiable(self):
"""Gets the when_unsatisfiable of this V1TopologySpreadConstraint. # noqa: E501
WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assigment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. # noqa: E501
:return: The when_unsatisfiable of this V1TopologySpreadConstraint. # noqa: E501
:rtype: str
"""
return self._when_unsatisfiable
@when_unsatisfiable.setter
def when_unsatisfiable(self, when_unsatisfiable):
"""Sets the when_unsatisfiable of this V1TopologySpreadConstraint.
WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assigment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. # noqa: E501
:param when_unsatisfiable: The when_unsatisfiable of this V1TopologySpreadConstraint. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and when_unsatisfiable is None: # noqa: E501
raise ValueError("Invalid value for `when_unsatisfiable`, must not be `None`") # noqa: E501
self._when_unsatisfiable = when_unsatisfiable
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1TopologySpreadConstraint):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1TopologySpreadConstraint):
return True
return self.to_dict() != other.to_dict()
|
from django.conf.urls.defaults import *
urlpatterns = patterns(
'splango.views',
url(r'^confirm_human/$', 'confirm_human', name="splango-confirm-human"),
url(r'^admin/$', 'experiments_overview', name="splango-admin"),
url(r'^admin/exp/(?P<expname>[^/]+)/$', 'experiment_detail', name="splango-experiment-detail"),
url(r'^admin/exp/(?P<expname>[^/]+)/(?P<report_id>\d+)/$', 'experiment_report', name="splango-experiment-report"),
url(r'^admin/exp/(?P<expname>[^/]+)/(?P<variant>[^/]+)/(?P<goal>[^/]+)/$', 'experiment_log', name="splango-experiment-log"),
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import random
import numpy as np
import pandas as pd
import h5py
import matplotlib.pyplot as plt
from math import cos, sin, atan2, sqrt, pi, radians, degrees, ceil, isnan
from skimage import io, transform
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
TRAIN_CSV_PATH = './pointdata4/traindata/'
TEST_CSV_PATH = './pointdata4/testdata/'
data_path = './h5/'
train_file_path = data_path + 'initial_train_data.h5'
test_file_path = data_path + 'initial_test_data.h5'
# 按旋转角度分类的子级目录
label_dirs = [[16, 19], [43,71,129, 260], [95,128,129, 274]]
# 按道路分类的父级目录
label_set = [0, 1, 2]
# 获取二维点集的中心点坐标
def get_centroid(point_set):
c_x, c_y = zip(*point_set)
centroid_x = sum(c_x)/len(c_x)
centroid_y = sum(c_y)/len(c_y)
return centroid_x, centroid_y
# 逆时针旋转坐标点
def n_rotate(angle, valuex, valuey, centerx, centery):
valuex = np.array(valuex)
valuey = np.array(valuey)
nRotatex = (valuex-centerx)*cos(angle) - \
(valuey-centery)*sin(angle) + centerx
nRotatey = (valuex-centerx)*sin(angle) + \
(valuey-centery)*cos(angle) + centery
return nRotatex, nRotatey
# 获取csv文件的列表
def get_csv_list(path):
csv_file_list = []
file_list = os.listdir(path)
for file_name in file_list:
if file_name.endswith('csv'):
csv_file_list.append(path + "/" + file_name)
return csv_file_list
# 获取csv文件中的点集数据
def get_csv_data(path_list):
# 创建空的定维数组
sum_data = np.empty([0, 1024, 2], dtype=np.float32)
# 遍历每个csv文件
for path in path_list:
# 将每个csv文件读取为Numpy的数据
data = np.genfromtxt(path, delimiter=',', dtype=np.float32)[:, :2]
data_len = len(data)
empty_len = 1024 - data_len
# 完整的1024个元数据=csv文件数据+在csv文件中随机指定下标数据
count = 0
while count < empty_len:
data = np.append(
data, [data[random.randint(0, data_len-1)]], axis=0)
count += 1
sum_data = np.append(sum_data, [data], axis=0)
print(sum_data.shape)
return sum_data
# 随机打乱点集数据
def exchange_data_index(sum_data, label_data):
cursor_index = 0
max_range = len(sum_data)
while cursor_index < max_range:
random_index = random.randint(0, max_range-1)
temp_sum_data = sum_data[0]
temp_label_data = label_data[0]
sum_data = np.delete(sum_data, 0, axis=0)
label_data = np.delete(label_data, 0, axis=0)
sum_data = np.insert(sum_data, random_index, temp_sum_data, axis=0)
label_data = np.insert(label_data, random_index,
temp_label_data, axis=0)
cursor_index += 1
return sum_data, label_data
def get_label_and_data(root_path, label_dirs):
sum_data = np.empty([0, 1024, 2], dtype=np.float32)
typical_data = np.empty([0], dtype=np.int32)
for data_type, label_dir_set in enumerate(label_dirs):
print(">> 现在进入【第%d类】数据" % (data_type+1))
for rotate_angle in label_dir_set:
print("-- 需要旋转%d度的数据集:" % (rotate_angle))
# 获取csv文件列表
csv_list = get_csv_list(
root_path + str(data_type) + '/' + str(rotate_angle))
# 获取csv文件点集数据
csv_data = get_csv_data(csv_list)
# 遍历样本数据
for i, sample_data in enumerate(csv_data):
# 求出点集的中心坐标点
centroid_x, centroid_y = get_centroid(sample_data)
# 根据中心坐标点旋转点集中的点
for index, coordinate in enumerate(sample_data):
x, y = coordinate
n_x, n_y = n_rotate(
radians(rotate_angle), x, y, centroid_x, centroid_y)
# 旋转后的点集坐标中心化
sample_data[index] = [n_x-centroid_x, n_y-centroid_y]
# 旋转后的点集回归原列表
csv_data[i] = sample_data
# 归集点集标签
typical_data = np.append(typical_data, [data_type], axis=0)
# 将每个不同数量的样本合并到主列表中(n,1024,2)=>(m,n,1024,2)
sum_data = np.append(sum_data, csv_data, axis=0)
return sum_data, typical_data
if __name__ == "__main__":
sum_train_data, train_typical_data = get_label_and_data(
TRAIN_CSV_PATH, label_dirs)
sum_test_data, test_typical_data = get_label_and_data(
TEST_CSV_PATH, label_dirs)
# 随机打乱点集数据
rand_sum_train_data, rand_train_typical_data = exchange_data_index(
sum_train_data, train_typical_data)
rand_sum_test_data, rand_test_typical_data = exchange_data_index(
sum_test_data, test_typical_data)
if os.access(data_path, os.F_OK) == False:
os.mkdir(data_path)
if os.access(train_file_path, os.F_OK) == True:
os.remove(train_file_path)
open(train_file_path, 'w')
with h5py.File(train_file_path, 'r+') as f:
f.create_dataset('data', data=rand_sum_train_data)
f.create_dataset('label', data=rand_train_typical_data)
if os.access(test_file_path, os.F_OK) == True:
os.remove(test_file_path)
open(test_file_path, 'w')
with h5py.File(test_file_path, 'r+') as f:
f.create_dataset('data', data=rand_sum_test_data)
f.create_dataset('label', data=rand_test_typical_data)
|
"""
Premium Question
"""
from collections import deque
__author__ = 'Daniel'
class HitCounter(object):
def __init__(self):
"""
Initialize your data structure here.
calls are being made to the system in chronological order.
It is possible that several hits arrive roughly at the same time.
What if the number of hits per second could be very large? Does your design scale? # use counter
"""
self.q = deque()
def hit(self, timestamp):
"""
Record a hit.
@param timestamp - The current timestamp (in seconds granularity).
:type timestamp: int
:rtype: void
"""
self.pop(timestamp)
self.q.append(timestamp)
def getHits(self, timestamp):
"""
Return the number of hits in the past 5 minutes.
@param timestamp - The current timestamp (in seconds granularity).
:type timestamp: int
:rtype: int
"""
self.pop(timestamp)
return len(self.q)
def pop(self, timestamp):
while self.q and timestamp - self.q[0] >= 300:
self.q.popleft()
# Your HitCounter object will be instantiated and called as such:
# obj = HitCounter()
# obj.hit(timestamp)
# param_2 = obj.getHits(timestamp)
|
'''
Manage yum packages and repositories. Note that yum package names are case-sensitive.
'''
from __future__ import unicode_literals
from pyinfra.api import operation
from . import files
from .util.packaging import ensure_packages, ensure_rpm, ensure_yum_repo
@operation
def key(state, host, key):
'''
Add yum gpg keys with ``rpm``.
+ key: filename or URL
Note:
always returns one command, not state checking
Example:
.. code:: python
linux_id = host.fact.linux_distribution['release_meta'].get('ID')
yum.key(
{'Add the Docker CentOS gpg key'},
'https://download.docker.com/linux/{}/gpg'.format(linux_id),
)
'''
yield 'rpm --import {0}'.format(key)
@operation
def repo(
state, host, name, baseurl=None,
present=True, description=None, enabled=True, gpgcheck=True, gpgkey=None,
):
# NOTE: if updating this docstring also update `dnf.repo`
# COMPAT: on v1 rearrange baseurl/present kwargs
'''
Add/remove/update yum repositories.
+ name: URL or name for the ``.repo`` file
+ baseurl: the baseurl of the repo (if ``name`` is not a URL)
+ present: whether the ``.repo`` file should be present
+ description: optional verbose description
+ enabled: whether this repo is enabled
+ gpgcheck: whether set ``gpgcheck=1``
+ gpgkey: the URL to the gpg key for this repo
``Baseurl``/``description``/``gpgcheck``/``gpgkey``:
These are only valid when ``name`` is a filename (ie not a URL). This is
for manual construction of repository files. Use a URL to download and
install remote repository files.
Examples:
.. code:: python
# Download a repository file
yum.repo(
{'Install Docker-CE repo via URL'},
'https://download.docker.com/linux/centos/docker-ce.repo',
)
# Create the repository file from baseurl/etc
yum.repo(
{'Add the Docker CentOS repo'},
name='DockerCE',
baseurl='https://download.docker.com/linux/centos/7/$basearch/stable',
)
'''
yield ensure_yum_repo(
state, host, files,
name, baseurl, present, description, enabled, gpgcheck, gpgkey,
'yum-config-manager',
)
@operation
def rpm(state, host, source, present=True):
# NOTE: if updating this docstring also update `dnf.rpm`
'''
Add/remove ``.rpm`` file packages.
+ source: filename or URL of the ``.rpm`` package
+ present: whether ore not the package should exist on the system
URL sources with ``present=False``:
If the ``.rpm`` file isn't downloaded, pyinfra can't remove any existing
package as the file won't exist until mid-deploy.
Example:
.. code:: python
yum.rpm(
{'Install EPEL rpm to enable EPEL repo'},
'https://dl.fedoraproject.org/pub/epel/epel-release-latest-'
'{{ host.fact.linux_distribution.major }}.noarch.rpm',
)
'''
yield ensure_rpm(state, host, files, source, present, 'yum')
@operation
def update(state, host):
'''
Updates all yum packages.
'''
yield 'yum update -y'
_update = update # noqa: E305 (for use below where update is a kwarg)
@operation
def packages(
state, host, packages=None,
present=True, latest=False, update=False, clean=False, nobest=False,
extra_install_args='', extra_uninstall_args='',
):
'''
Install/remove/update yum packages & updates.
+ packages: list of packages to ensure
+ present: whether the packages should be installed
+ latest: whether to upgrade packages without a specified version
+ update: run yum update
+ clean: run yum clean
+ nobest: add the no best option to install
+ extra_install_args: additional arguments to the yum install command
+ extra_uninstall_args: additional arguments to the yum uninstall command
Versions:
Package versions can be pinned like yum: ``<pkg>-<version>``
Examples:
.. code:: python
# Update package list and install packages
yum.packages(
{'Install Vim and Vim enhanced'},
['vim-enhanced', 'vim'],
update=True,
)
# Install the latest versions of packages (always check)
yum.packages(
{'Install latest Vim'},
['vim'],
latest=True,
)
'''
if clean:
yield 'yum clean all'
if update:
yield _update(state, host)
nobest_option = ''
if nobest:
nobest_option = ' --nobest'
if extra_install_args != '':
extra_install_args = ' ' + extra_install_args
if extra_uninstall_args != '':
extra_uninstall_args = ' ' + extra_uninstall_args
yield ensure_packages(
packages, host.fact.rpm_packages, present,
install_command='yum install -y' + nobest_option + extra_install_args,
uninstall_command='yum remove -y' + extra_uninstall_args,
upgrade_command='yum update -y',
version_join='-',
latest=latest,
)
|
from cx_Freeze import setup, Executable
setup(name = "Server" ,
version = "1.0" ,
description = "" ,
executables = [Executable("server.py")])
|
import os
import platform
from collections import OrderedDict
from itertools import chain
from conans.client import defs_to_string, join_arguments
from conans.client.build.cppstd_flags import cppstd_flag
from conans.client.tools import cross_building
from conans.client.tools.oss import get_cross_building_settings
from conans.errors import ConanException
from conans.model.conan_file import ConanFile
from conans.model.version import Version
from conans.util.env_reader import get_env
from conans.util.files import mkdir, get_abs_path
from conans.tools import cpu_count, args_to_string
from conans import tools
from conans.util.log import logger
from conans.util.config_parser import get_bool_from_text
from conans.client.build.compiler_flags import architecture_flag
def _get_env_cmake_system_name():
env_system_name = get_env("CONAN_CMAKE_SYSTEM_NAME", "")
return {"False": False, "True": True, "": None}.get(env_system_name, env_system_name)
class CMake(object):
def __init__(self, conanfile, generator=None, cmake_system_name=True,
parallel=True, build_type=None, toolset=None, make_program=None,
set_cmake_flags=False):
"""
:param settings_or_conanfile: Conanfile instance (or settings for retro compatibility)
:param generator: Generator name to use or none to autodetect
:param cmake_system_name: False to not use CMAKE_SYSTEM_NAME variable,
True for auto-detect or directly a string with the system name
:param parallel: Try to build with multiple cores if available
:param build_type: Overrides default build type comming from settings
:param toolset: Toolset name to use (such as llvm-vs2014) or none for default one,
applies only to certain generators (e.g. Visual Studio)
:param set_cmake_flags: whether or not to set CMake flags like CMAKE_CXX_FLAGS, CMAKE_C_FLAGS, etc.
it's vital to set for certain projects (e.g. using CMAKE_SIZEOF_VOID_P or CMAKE_LIBRARY_ARCHITECTURE)
"""
if not isinstance(conanfile, ConanFile):
raise ConanException("First argument of CMake() has to be ConanFile. Use CMake(self)")
self._settings = conanfile.settings
self._conanfile = conanfile
self._os = self._settings.get_safe("os")
self._os_build, _, self._os_host, _ = get_cross_building_settings(self._settings)
self._compiler = self._settings.get_safe("compiler")
self._compiler_version = self._settings.get_safe("compiler.version")
self._arch = self._settings.get_safe("arch")
os_ver_str = "os.api_level" if self._os == "Android" else "os.version"
self._op_system_version = self._settings.get_safe(os_ver_str)
self._libcxx = self._settings.get_safe("compiler.libcxx")
self._runtime = self._settings.get_safe("compiler.runtime")
self._build_type = self._settings.get_safe("build_type")
self._cppstd = self._settings.get_safe("cppstd")
self.generator = generator or self._generator()
self.toolset = self._toolset(toolset)
self.build_dir = None
self._cmake_system_name = _get_env_cmake_system_name()
if self._cmake_system_name is None: # Not overwritten using environment
self._cmake_system_name = cmake_system_name
self.parallel = parallel
self._set_cmake_flags = set_cmake_flags
self.definitions = self._get_cmake_definitions()
if build_type and build_type != self._build_type:
# Call the setter to warn and update the definitions if needed
self.build_type = build_type
make_program = os.getenv("CONAN_MAKE_PROGRAM") or make_program
if make_program:
if not tools.which(make_program):
self._conanfile.output.warn("The specified make program '%s' cannot be found"
"and will be ignored" % make_program)
else:
self._conanfile.output.info("Using '%s' as CMAKE_MAKE_PROGRAM" % make_program)
self.definitions["CMAKE_MAKE_PROGRAM"] = make_program
@property
def build_folder(self):
return self.build_dir
@build_folder.setter
def build_folder(self, value):
self.build_dir = value
@property
def build_type(self):
return self._build_type
@build_type.setter
def build_type(self, build_type):
settings_build_type = self._settings.get_safe("build_type")
if build_type != settings_build_type:
self._conanfile.output.warn(
'Set CMake build type "%s" is different than the settings build_type "%s"'
% (build_type, settings_build_type))
self._build_type = build_type
self.definitions.update(self._build_type_definition())
@property
def flags(self):
return defs_to_string(self.definitions)
def _generator(self):
if "CONAN_CMAKE_GENERATOR" in os.environ:
return os.environ["CONAN_CMAKE_GENERATOR"]
if not self._compiler or not self._compiler_version or not self._arch:
if self._os_build == "Windows":
# Not enough settings to set a generator in Windows
return None
return "Unix Makefiles"
if self._compiler == "Visual Studio":
_visuals = {'8': '8 2005',
'9': '9 2008',
'10': '10 2010',
'11': '11 2012',
'12': '12 2013',
'14': '14 2015',
'15': '15 2017'}
base = "Visual Studio %s" % _visuals.get(self._compiler_version,
"UnknownVersion %s" % self._compiler_version)
if self._arch == "x86_64":
return base + " Win64"
elif "arm" in self._arch:
return base + " ARM"
else:
return base
# The generator depends on the build machine, not the target
if self._os_build == "Windows":
return "MinGW Makefiles" # it is valid only under Windows
return "Unix Makefiles"
def _toolset(self, toolset=None):
if toolset:
return toolset
elif self._settings.get_safe("compiler") == "Visual Studio":
subs_toolset = self._settings.get_safe("compiler.toolset")
if subs_toolset:
return subs_toolset
return None
def _cmake_compiler_options(self):
cmake_definitions = OrderedDict()
if str(self._os).lower() == "macos":
if self._arch == "x86":
cmake_definitions["CMAKE_OSX_ARCHITECTURES"] = "i386"
return cmake_definitions
def _cmake_cross_build_defines(self):
ret = OrderedDict()
os_ver = get_env("CONAN_CMAKE_SYSTEM_VERSION", self._op_system_version)
toolchain_file = get_env("CONAN_CMAKE_TOOLCHAIN_FILE", "")
if toolchain_file != "":
logger.info("Setting Cross build toolchain file: %s" % toolchain_file)
ret["CMAKE_TOOLCHAIN_FILE"] = toolchain_file
return ret
if self._cmake_system_name is False:
return ret
# System name and system version
if self._cmake_system_name is not True: # String not empty
ret["CMAKE_SYSTEM_NAME"] = self._cmake_system_name
ret["CMAKE_SYSTEM_VERSION"] = os_ver
else: # detect if we are cross building and the system name and version
if cross_building(self._conanfile.settings): # We are cross building
if self._os != self._os_build:
if self._os: # the_os is the host (regular setting)
ret["CMAKE_SYSTEM_NAME"] = "Darwin" if self._os in ["iOS", "tvOS",
"watchOS"] else self._os
if os_ver:
ret["CMAKE_SYSTEM_VERSION"] = os_ver
else:
ret["CMAKE_SYSTEM_NAME"] = "Generic"
# system processor
cmake_system_processor = os.getenv("CONAN_CMAKE_SYSTEM_PROCESSOR", None)
if cmake_system_processor:
ret["CMAKE_SYSTEM_PROCESSOR"] = cmake_system_processor
if ret: # If enabled cross compile
for env_var in ["CONAN_CMAKE_FIND_ROOT_PATH",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE"]:
value = os.getenv(env_var, None)
if value:
ret[env_var] = value
if self._conanfile and self._conanfile.deps_cpp_info.sysroot:
sysroot_path = self._conanfile.deps_cpp_info.sysroot
else:
sysroot_path = os.getenv("CONAN_CMAKE_FIND_ROOT_PATH", None)
if sysroot_path:
# Needs to be set here, can't be managed in the cmake generator, CMake needs
# to know about the sysroot before any other thing
ret["CMAKE_SYSROOT"] = sysroot_path.replace("\\", "/")
# Adjust Android stuff
if self._os == "Android":
arch_abi_settings = {"armv8": "arm64-v8a",
"armv7": "armeabi-v7a",
"armv7hf": "armeabi-v7a",
"armv6": "armeabi-v6",
"armv5": "armeabi"
}.get(self._arch,
self._arch)
if arch_abi_settings:
ret["CMAKE_ANDROID_ARCH_ABI"] = arch_abi_settings
logger.info("Setting Cross build flags: %s"
% ", ".join(["%s=%s" % (k, v) for k, v in ret.items()]))
return ret
@property
def is_multi_configuration(self):
""" some IDEs are multi-configuration, as Visual. Makefiles or Ninja are single-conf
"""
if "Visual" in self.generator or "Xcode" in self.generator:
return True
# TODO: complete logic
return False
@property
def command_line(self):
args = ['-G "%s"' % self.generator] if self.generator else []
args.append(self.flags)
args.append('-Wno-dev')
if self.toolset:
args.append('-T "%s"' % self.toolset)
return join_arguments(args)
def _build_type_definition(self):
if self._build_type and not self.is_multi_configuration:
return {'CMAKE_BUILD_TYPE': self._build_type}
return {}
@property
def runtime(self):
return defs_to_string(self._runtime_definition())
def _runtime_definition(self):
if self._runtime:
return {"CONAN_LINK_RUNTIME": "/%s" % self._runtime}
return {}
@property
def build_config(self):
""" cmake --build tool have a --config option for Multi-configuration IDEs
"""
if self._build_type and self.is_multi_configuration:
return "--config %s" % self._build_type
return ""
def _get_cmake_definitions(self):
def add_cmake_flag(cmake_flags, name, flag):
"""
appends compiler linker flags (if already present), or just sets
"""
if flag:
if name not in cmake_flags:
cmake_flags[name] = flag
else:
cmake_flags[name] = ' ' + flag
return cmake_flags
ret = OrderedDict()
ret.update(self._build_type_definition())
ret.update(self._runtime_definition())
ret.update(self._cmake_compiler_options())
ret.update(self._cmake_cross_build_defines())
ret.update(self._get_cpp_standard_vars())
ret["CONAN_EXPORTED"] = "1"
if self._compiler:
ret["CONAN_COMPILER"] = self._compiler
if self._compiler_version:
ret["CONAN_COMPILER_VERSION"] = str(self._compiler_version)
# Force compiler flags -- TODO: give as environment/setting parameter?
arch_flag = architecture_flag(compiler=self._compiler, arch=self._arch)
ret = add_cmake_flag(ret, 'CONAN_CXX_FLAGS', arch_flag)
ret = add_cmake_flag(ret, 'CONAN_SHARED_LINKER_FLAGS', arch_flag)
ret = add_cmake_flag(ret, 'CONAN_C_FLAGS', arch_flag)
if self._set_cmake_flags:
ret = add_cmake_flag(ret, 'CMAKE_CXX_FLAGS', arch_flag)
ret = add_cmake_flag(ret, 'CMAKE_SHARED_LINKER_FLAGS', arch_flag)
ret = add_cmake_flag(ret, 'CMAKE_C_FLAGS', arch_flag)
if self._libcxx:
ret["CONAN_LIBCXX"] = self._libcxx
# Shared library
try:
ret["BUILD_SHARED_LIBS"] = "ON" if self._conanfile.options.shared else "OFF"
except ConanException:
pass
# Install to package folder
try:
if self._conanfile.package_folder:
ret["CMAKE_INSTALL_PREFIX"] = self._conanfile.package_folder
except AttributeError:
pass
if str(self._os) in ["Windows", "WindowsStore"] and self._compiler == "Visual Studio":
if self.parallel:
cpus = tools.cpu_count()
ret["CONAN_CXX_FLAGS"] = "/MP%s" % cpus
ret["CONAN_C_FLAGS"] = "/MP%s" % cpus
# fpic
if str(self._os) not in ["Windows", "WindowsStore"]:
fpic = self._conanfile.options.get_safe("fPIC")
if fpic is not None:
shared = self._conanfile.options.get_safe("shared")
ret["CONAN_CMAKE_POSITION_INDEPENDENT_CODE"] = "ON" if (fpic or shared) else "OFF"
# Adjust automatically the module path in case the conanfile is using the cmake_find_package
if "cmake_find_package" in self._conanfile.generators:
ret["CMAKE_MODULE_PATH"] = self._conanfile.install_folder.replace("\\", "/")
# Disable CMake export registry #3070 (CMake installing modules in user home's)
ret["CMAKE_EXPORT_NO_PACKAGE_REGISTRY"] = "ON"
return ret
def _get_dirs(self, source_folder, build_folder, source_dir, build_dir, cache_build_folder):
if (source_folder or build_folder) and (source_dir or build_dir):
raise ConanException("Use 'build_folder'/'source_folder' arguments")
def get_dir(folder, origin):
if folder:
if os.path.isabs(folder):
return folder
return os.path.join(origin, folder)
return origin
if source_dir or build_dir: # OLD MODE
build_ret = build_dir or self.build_dir or self._conanfile.build_folder
source_ret = source_dir or self._conanfile.source_folder
else:
build_ret = get_dir(build_folder, self._conanfile.build_folder)
source_ret = get_dir(source_folder, self._conanfile.source_folder)
if self._conanfile.in_local_cache and cache_build_folder:
build_ret = get_dir(cache_build_folder, self._conanfile.build_folder)
return source_ret, build_ret
def _run(self, command):
if self._compiler == 'Visual Studio' and self.generator in ['Ninja', 'NMake Makefiles', 'NMake Makefiles JOM']:
with tools.vcvars(self._settings, force=True, filter_known_paths=False):
self._conanfile.run(command)
else:
self._conanfile.run(command)
def configure(self, args=None, defs=None, source_dir=None, build_dir=None,
source_folder=None, build_folder=None, cache_build_folder=None,
pkg_config_paths=None):
# TODO: Deprecate source_dir and build_dir in favor of xxx_folder
if not self._conanfile.should_configure:
return
args = args or []
defs = defs or {}
source_dir, self.build_dir = self._get_dirs(source_folder, build_folder,
source_dir, build_dir,
cache_build_folder)
mkdir(self.build_dir)
arg_list = join_arguments([
self.command_line,
args_to_string(args),
defs_to_string(defs),
args_to_string([source_dir])
])
if pkg_config_paths:
pkg_env = {"PKG_CONFIG_PATH":
os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
for f in pkg_config_paths)}
else:
# If we are using pkg_config generator automate the pcs location, otherwise it could
# read wrong files
set_env = "pkg_config" in self._conanfile.generators \
and "PKG_CONFIG_PATH" not in os.environ
pkg_env = {"PKG_CONFIG_PATH": self._conanfile.install_folder} if set_env else {}
with tools.environment_append(pkg_env):
command = "cd %s && cmake %s" % (args_to_string([self.build_dir]), arg_list)
if platform.system() == "Windows" and self.generator == "MinGW Makefiles":
with tools.remove_from_path("sh"):
self._conanfile.run(command)
else:
self._conanfile.run(command)
def build(self, args=None, build_dir=None, target=None):
if not self._conanfile.should_build:
return
args = args or []
build_dir = build_dir or self.build_dir or self._conanfile.build_folder
if target is not None:
args = ["--target", target] + args
if self.generator and self.parallel:
if "Makefiles" in self.generator and "NMake" not in self.generator:
if "--" not in args:
args.append("--")
args.append("-j%i" % cpu_count())
elif "Visual Studio" in self.generator and \
self._compiler_version and Version(self._compiler_version) >= "10":
if "--" not in args:
args.append("--")
args.append("/m:%i" % cpu_count())
arg_list = join_arguments([
args_to_string([build_dir]),
self.build_config,
args_to_string(args)
])
command = "cmake --build %s" % arg_list
self._run(command)
def install(self, args=None, build_dir=None):
if not self._conanfile.should_install:
return
mkdir(self._conanfile.package_folder)
if not self.definitions.get("CMAKE_INSTALL_PREFIX"):
raise ConanException("CMAKE_INSTALL_PREFIX not defined for 'cmake.install()'\n"
"Make sure 'package_folder' is defined")
self.build(args=args, build_dir=build_dir, target="install")
def test(self, args=None, build_dir=None, target=None):
if not self._conanfile.should_test:
return
if not target:
target = "RUN_TESTS" if self.is_multi_configuration else "test"
self.build(args=args, build_dir=build_dir, target=target)
@property
def verbose(self):
try:
verbose = self.definitions["CMAKE_VERBOSE_MAKEFILE"]
return get_bool_from_text(str(verbose))
except KeyError:
return False
@verbose.setter
def verbose(self, value):
self.definitions["CMAKE_VERBOSE_MAKEFILE"] = "ON" if value else "OFF"
def patch_config_paths(self):
"""
changes references to the absolute path of the installed package and its dependencies in
exported cmake config files to the appropriate conan variable. This makes
most (sensible) cmake config files portable.
For example, if a package foo installs a file called "fooConfig.cmake" to
be used by cmake's find_package method, normally this file will contain
absolute paths to the installed package folder, for example it will contain
a line such as:
SET(Foo_INSTALL_DIR /home/developer/.conan/data/Foo/1.0.0/...)
This will cause cmake find_package() method to fail when someone else
installs the package via conan.
This function will replace such mentions to
SET(Foo_INSTALL_DIR ${CONAN_FOO_ROOT})
which is a variable that is set by conanbuildinfo.cmake, so that find_package()
now correctly works on this conan package.
For dependent packages, if a package foo installs a file called "fooConfig.cmake" to
be used by cmake's find_package method and if it depends to a package bar,
normally this file will contain absolute paths to the bar package folder,
for example it will contain a line such as:
SET_TARGET_PROPERTIES(foo PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES
"/home/developer/.conan/data/Bar/1.0.0/user/channel/id/include")
This function will replace such mentions to
SET_TARGET_PROPERTIES(foo PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES
"${CONAN_BAR_ROOT}/include")
If the install() method of the CMake object in the conan file is used, this
function should be called _after_ that invocation. For example:
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
cmake.install()
cmake.patch_config_paths()
"""
if not self._conanfile.should_install:
return
if not self._conanfile.name:
raise ConanException("cmake.patch_config_paths() can't work without package name. "
"Define name in your recipe")
pf = self.definitions.get("CMAKE_INSTALL_PREFIX")
replstr = "${CONAN_%s_ROOT}" % self._conanfile.name.upper()
allwalk = chain(os.walk(self._conanfile.build_folder), os.walk(self._conanfile.package_folder))
for root, _, files in allwalk:
for f in files:
if f.endswith(".cmake"):
path = os.path.join(root, f)
tools.replace_in_file(path, pf, replstr, strict=False)
# patch paths of dependent packages that are found in any cmake files of the current package
path_content = tools.load(path)
for dep in self._conanfile.deps_cpp_info.deps:
from_str = self._conanfile.deps_cpp_info[dep].rootpath
# try to replace only if from str is found
if path_content.find(from_str) != -1:
dep_str = "${CONAN_%s_ROOT}" % dep.upper()
self._conanfile.output.info("Patching paths for %s: %s to %s" % (dep, from_str, dep_str))
tools.replace_in_file(path, from_str, dep_str, strict=False)
def _get_cpp_standard_vars(self):
if not self._cppstd:
return {}
ret = {}
if self._cppstd.startswith("gnu"):
ret["CONAN_CMAKE_CXX_STANDARD"] = self._cppstd[3:]
ret["CONAN_CMAKE_CXX_EXTENSIONS"] = "ON"
else:
ret["CONAN_CMAKE_CXX_STANDARD"] = self._cppstd
ret["CONAN_CMAKE_CXX_EXTENSIONS"] = "OFF"
ret["CONAN_STD_CXX_FLAG"] = cppstd_flag(self._compiler, self._compiler_version,
self._cppstd)
return ret
|
import urllib.parse
import requests
class ERMSError(Exception):
pass
class ERMS(object):
"""
Possible queries:
/object?id=eq.574
/object?id=in.(574,575)
"""
# endpoints
EP_OBJECT = 'object'
EP_IDENTITY = 'identity'
EP_CONSORTIUM = 'consortium'
EP_CONSORTIUM_MEMBER = 'consortium_member'
EP_ACQUISITION = 'acquisition'
EP_PROCUREMENT = 'procurement'
EP_OFFER = 'offer'
EP_OFFER_SPLIT = 'offer_split'
# object classes
CLS_PERSON = 'Person'
CLS_ORGANIZATION = 'Organization'
CLS_PLATFORM = 'Platform'
def __init__(self, base_url="https://erms.czechelib.cz/api/"):
self.base_url = base_url.rstrip('/')
self.session = requests.Session()
@classmethod
def _construct_query_string(cls, value):
if type(value) in (list, tuple, set):
return 'in.({})'.format(','.join(str(_id) for _id in value))
return f'eq.{value}'
def construct_object_url(self, cls=None, object_id=None):
params = {}
if cls:
params['class'] = self._construct_query_string(cls)
if object_id:
params['id'] = self._construct_query_string(object_id)
else:
params['order'] = 'id'
query = urllib.parse.urlencode(params)
return f'{self.base_url}/{self.EP_OBJECT}?{query}'
def fetch_url(self, url):
response = self.session.get(url)
if response.status_code == 200:
return response.json()
raise ERMSError(response)
def fetch_objects(self, cls=None, object_id=None):
url = self.construct_object_url(cls=cls, object_id=object_id)
data = self.fetch_url(url)
return data
def fetch_endpoint(self, endpoint, object_id=None, **kwargs):
url = f'{self.base_url}/{endpoint}'
params = {}
if object_id:
params['id'] = self._construct_query_string(object_id)
for key, value in kwargs.items():
params[key] = self._construct_query_string(value)
if params:
url += '?{}'.format(urllib.parse.urlencode(params))
return self.fetch_url(url)
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Code(Component):
"""A Code component.
Code is a wrapper for the <code> HTML5 element.
For detailed attribute info see:
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/code
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- accessKey (string; optional):
Keyboard shortcut to activate or add focus to the element.
- aria-* (string; optional):
A wildcard aria attribute.
- className (string; optional):
Often used with CSS to style elements with common properties.
- contentEditable (string; optional):
Indicates whether the element's content is editable.
- contextMenu (string; optional):
Defines the ID of a <menu> element which will serve as the
element's context menu.
- data-* (string; optional):
A wildcard data attribute.
- dir (string; optional):
Defines the text direction. Allowed values are ltr (Left-To-Right)
or rtl (Right-To-Left).
- draggable (string; optional):
Defines whether the element can be dragged.
- hidden (a value equal to: 'hidden', 'HIDDEN' | boolean; optional):
Prevents rendering of given element, while keeping child elements,
e.g. script elements, active.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- lang (string; optional):
Defines the language used in the element.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- n_clicks (number; default 0):
An integer that represents the number of times that this element
has been clicked on.
- n_clicks_timestamp (number; default -1):
An integer that represents the time (in ms since 1970) at which
n_clicks changed. This can be used to tell which button was
changed most recently.
- role (string; optional):
Defines an explicit role for an element for use by assistive
technologies.
- spellCheck (string; optional):
Indicates whether spell checking is allowed for the element.
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- tabIndex (string; optional):
Overrides the browser's default tab order and follows the one
specified instead.
- title (string; optional):
Text to be displayed in a tooltip when hovering over the element."""
_children_props = []
_base_nodes = ["children"]
_namespace = "dash_html_components"
_type = "Code"
@_explicitize_args
def __init__(
self,
children=None,
id=Component.UNDEFINED,
n_clicks=Component.UNDEFINED,
n_clicks_timestamp=Component.UNDEFINED,
key=Component.UNDEFINED,
accessKey=Component.UNDEFINED,
className=Component.UNDEFINED,
contentEditable=Component.UNDEFINED,
contextMenu=Component.UNDEFINED,
dir=Component.UNDEFINED,
draggable=Component.UNDEFINED,
hidden=Component.UNDEFINED,
lang=Component.UNDEFINED,
role=Component.UNDEFINED,
spellCheck=Component.UNDEFINED,
style=Component.UNDEFINED,
tabIndex=Component.UNDEFINED,
title=Component.UNDEFINED,
loading_state=Component.UNDEFINED,
**kwargs
):
self._prop_names = [
"children",
"id",
"accessKey",
"aria-*",
"className",
"contentEditable",
"contextMenu",
"data-*",
"dir",
"draggable",
"hidden",
"key",
"lang",
"loading_state",
"n_clicks",
"n_clicks_timestamp",
"role",
"spellCheck",
"style",
"tabIndex",
"title",
]
self._valid_wildcard_attributes = ["data-", "aria-"]
self.available_properties = [
"children",
"id",
"accessKey",
"aria-*",
"className",
"contentEditable",
"contextMenu",
"data-*",
"dir",
"draggable",
"hidden",
"key",
"lang",
"loading_state",
"n_clicks",
"n_clicks_timestamp",
"role",
"spellCheck",
"style",
"tabIndex",
"title",
]
self.available_wildcard_properties = ["data-", "aria-"]
_explicit_args = kwargs.pop("_explicit_args")
_locals = locals()
_locals.update(kwargs) # For wildcard attrs and excess named props
args = {k: _locals[k] for k in _explicit_args if k != "children"}
for k in []:
if k not in args:
raise TypeError("Required argument `" + k + "` was not specified.")
super(Code, self).__init__(children=children, **args)
|
"""
Train a language model to generate SMILES.
"""
import argparse
import os
import numpy as np
import pandas as pd
import random
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
# suppress Chem.MolFromSmiles error output
from rdkit import rdBase
rdBase.DisableLog('rdApp.error')
# set working directory
git_dir = os.path.expanduser("~/git/low-data-generative-models")
python_dir = git_dir + "/python"
os.chdir(python_dir)
# import classes
from models import RNN, OneHotRNN, EarlyStopping
from datasets import SmilesDataset, SelfiesDataset, SmilesCollate
from functions import decrease_learning_rate, print_update, track_loss, \
sample_smiles, write_smiles
### CLI
parser = argparse.ArgumentParser(
description='Chemical structure language model interface')
# input file
parser.add_argument('--smiles_file', type=str,
help='location of the SMILES file to train on')
parser.add_argument('--selfies', dest='selfies', action='store_true')
parser.set_defaults(selfies=False)
# output files
parser.add_argument('--output_dir', type=str,
help='directory to save trained models to')
# RNN parameters
parser.add_argument('--rnn_type', type=str, choices=['RNN', 'LSTM', 'GRU'],
default='GRU', help='type of language model to train')
parser.add_argument('--embedding_size', type=int, default=128,
help='size of vocabulary embedding')
parser.add_argument('--hidden_size', type=int, default=512,
help='size of language model hidden layers')
parser.add_argument('--n_layers', type=int, default=3,
help='number of layers in language model')
parser.add_argument('--dropout', type=float, default=0,
help='amount of dropout (0-1) to apply to model')
parser.add_argument('--bidirectional', type=bool, default=False,
help='for LSTMs only, train a bidirectional model')
parser.add_argument('--nonlinearity', type=str, choices=['tanh', 'relu'],
default='tanh', help='for RNNs only, nonlinearity to use')
parser.add_argument('--tie_weights', dest='tie_weights',
help='require embedding/dense linear layers use the ' +\
'same weights',
action='store_true')
parser.set_defaults(tie_weights=False)
# optimization parameters
parser.add_argument('--learning_rate', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--learning_rate_decay', default=None, # type=float,
help='amount (0-1) to decrease learning rate by every ' +\
'fixed number of steps')
parser.add_argument('--learning_rate_decay_steps', default=10000, type=int,
help='# of steps between learning rate decrements')
parser.add_argument('--gradient_clip', default=None, # type=float,
help='amount to which to clip the gradients')
# training schedule
parser.add_argument('--seed', type=int, default=0,
help='seed for random number generator')
parser.add_argument('--batch_size', type=int, default=128,
help='batch size')
parser.add_argument('--max_epochs', type=int, default=1000,
help='maximum number of epochs to train for')
parser.add_argument('--patience', type=int, default=100,
help='patience for early stopping')
# sampling from trained models
parser.add_argument('--sample_idx', type=int, default=0,
help='index of the model being trained (zero-indexed)')
parser.add_argument('--sample_every_epochs', type=int,
help='if set, sample SMILES from the trained model' +
'every n epochs')
parser.add_argument('--sample_every_steps', type=int,
help='if set, sample SMILES from the trained model' +
'every n steps')
parser.add_argument('--log_every_epochs', type=int,
help='log training/validation losses every n epochs')
parser.add_argument('--log_every_steps', type=int,
help='log training/validation losses every n steps')
parser.add_argument('--sample_size', type=int, default=100000,
help='size of each sample from the trained model')
# start with pretrained model
parser.add_argument('--pretrain_model', type=str, default=None,
help='load parameters from a pretrained model')
# enforce a larger vocabulary
parser.add_argument('--vocab_file', type=str, default=None,
help='file containing all tokens in vocabulary')
# for use in grid
parser.add_argument('--stop_if_exists', dest='stop_if_exists',
action='store_true')
parser.set_defaults(stop_if_exists=False)
# parse arguments
args = parser.parse_args()
# manually deal with gradient clipping
try:
args.gradient_clip = float(args.gradient_clip)
except (ValueError, TypeError):
args.gradient_clip = None
# manually deal with learning rate decay
try:
args.learning_rate_decay = float(args.learning_rate_decay)
except (ValueError, TypeError):
args.learning_rate_decay = None
# log args (make searching through logging directory easier)
for arg in vars(args):
print(arg, ": ", getattr(args, arg), "(", type(getattr(args, arg)), ")")
# optionally stop if output file already exists
if args.selfies:
smiles_filename = "sample-" + str(args.sample_idx + 1) + "-SELFIES.smi"
else:
smiles_filename = "sample-" + str(args.sample_idx + 1) + "-SMILES.smi"
smiles_file = os.path.join(args.output_dir, smiles_filename)
if os.path.isfile(smiles_file) and args.stop_if_exists:
print("output file " + smiles_file + " exists: stopping early")
sys.exit()
# make output directories
if not os.path.isdir(args.output_dir):
try:
os.makedirs(args.output_dir)
except FileExistsError:
pass
## seed all RNGs
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
print("using cuda")
torch.cuda.manual_seed_all(args.seed)
# set up dataset
if args.selfies:
dataset = SelfiesDataset(selfies_file=args.smiles_file)
else:
dataset = SmilesDataset(smiles_file=args.smiles_file,
vocab_file=args.vocab_file)
# set up batching
loader = DataLoader(dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
collate_fn=SmilesCollate(dataset.vocabulary))
# set up model
if args.embedding_size > 0:
model = RNN(vocabulary=dataset.vocabulary,
rnn_type=args.rnn_type,
embedding_size=args.embedding_size,
hidden_size=args.hidden_size,
n_layers=args.n_layers,
dropout=args.dropout,
bidirectional=args.bidirectional,
tie_weights=args.tie_weights,
nonlinearity=args.nonlinearity)
else:
# no embedding layer (one-hot encoding)
model = OneHotRNN(vocabulary=dataset.vocabulary,
rnn_type=args.rnn_type,
hidden_size=args.hidden_size,
n_layers=args.n_layers,
dropout=args.dropout,
bidirectional=args.bidirectional,
nonlinearity=args.nonlinearity)
# optionally, load model parameters from file
if args.pretrain_model is not None:
model.load_state_dict(torch.load(args.pretrain_model))
# set up optimizer
optimizer = optim.Adam(model.parameters(),
betas=(0.9, 0.999), ## default
eps=1e-08, ## default
lr=args.learning_rate)
# set up early stopping
early_stop = EarlyStopping(patience=args.patience)
# set up training schedule file
sched_filename = "training_schedule-" + str(args.sample_idx + 1) + ".csv"
sched_file = os.path.join(args.output_dir, sched_filename)
# iterate over epochs
counter = 0
for epoch in range(args.max_epochs):
# iterate over batches
for batch_idx, batch in tqdm(enumerate(loader), total=len(loader)):
batch, lengths = batch
# increment counter
counter += 1
# calculate loss
log_p = model.loss(batch, lengths)
loss = log_p.mean()
# zero gradients, calculate new gradients, and take a step
optimizer.zero_grad()
loss.backward()
# clip gradient
if args.gradient_clip is not None:
nn.utils.clip_grad_norm_(model.parameters(), args.gradient_clip)
optimizer.step()
# check learning rate decay
if args.learning_rate_decay is not None and \
counter % args.learning_rate_decay_steps == 0:
decrease_learning_rate(optimizer,
multiplier=args.learning_rate_decay)
# print update and write training schedule?
if args.log_every_steps is not None:
if counter % args.log_every_steps == 0:
print_update(model, dataset, epoch, batch_idx + 1, loss.item(),
args.batch_size, selfies=args.selfies)
track_loss(sched_file, model, dataset, epoch,
counter, loss.item(), args.batch_size)
# save SMILES?
if args.sample_every_steps is not None:
if counter % args.sample_every_steps == 0:
sample_smiles(args.output_dir, args.sample_idx, model,
args.sample_size, epoch, counter)
# calculate validation loss
validation, lengths = dataset.get_validation(args.batch_size)
validation_loss = model.loss(validation, lengths).mean().detach()
# check early stopping
model_filename = "model-" + str(args.sample_idx + 1) + ".pt"
model_file = os.path.join(args.output_dir, model_filename)
early_stop(validation_loss.item(), model, model_file, counter)
if early_stop.stop:
break
# print update and write training schedule?
if args.log_every_epochs is not None:
print_update(model, dataset, epoch, 'NA', loss.item(), args.batch_size)
track_loss(sched_file, model, dataset, epoch,
counter, loss.item(), args.batch_size)
# save SMILES?
if args.sample_every_epochs is not None:
sample_smiles(args.output_dir, args.sample_idx, model,
args.sample_size, epoch, counter)
if early_stop.stop:
break
# append information about final training step
if args.log_every_epochs is not None or args.log_every_steps is not None:
sched = pd.DataFrame({'epoch': [None],
'step': [early_stop.step_at_best],
'outcome': ['training loss'],
'value': [early_stop.best_loss]})
sched.to_csv(sched_file, index=False, mode='a', header=False)
# load the best model
model.load_state_dict(torch.load(model_file))
model.eval() ## enable evaluation modes
# sample a set of SMILES from the final, trained model
sampled_smiles = []
while len(sampled_smiles) < args.sample_size:
sampled_smiles.extend(model.sample(args.batch_size, return_smiles=True))
# write sampled SMILES
write_smiles(sampled_smiles, smiles_file)
|
"""Code for checking and inferring types."""
import collections
import logging
import re
import subprocess
from typing import Any, Dict, Union
from pytype import abstract
from pytype import abstract_utils
from pytype import convert_structural
from pytype import debug
from pytype import function
from pytype import metrics
from pytype import special_builtins
from pytype import state as frame_state
from pytype import vm
from pytype.overlays import typing_overlay
from pytype.pytd import builtins
from pytype.pytd import escape
from pytype.pytd import optimize
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import visitors
from pytype.typegraph import cfg
log = logging.getLogger(__name__)
# Most interpreter functions (including lambdas) need to be analyzed as
# stand-alone functions. The exceptions are comprehensions and generators, which
# have names like "<listcomp>" and "<genexpr>".
_SKIP_FUNCTION_RE = re.compile("<(?!lambda).+>$")
CallRecord = collections.namedtuple(
"CallRecord", ["node", "function", "signatures", "positional_arguments",
"keyword_arguments", "return_value"])
# How deep to follow call chains:
INIT_MAXIMUM_DEPTH = 4 # during module loading
MAXIMUM_DEPTH = 3 # during non-quick analysis
QUICK_CHECK_MAXIMUM_DEPTH = 2 # during quick checking
QUICK_INFER_MAXIMUM_DEPTH = 1 # during quick inference
class _Initializing:
pass
class CallTracer(vm.VirtualMachine):
"""Virtual machine that records all function calls.
Attributes:
exitpoint: A CFG node representing the program exit. Needs to be set before
analyze_types.
"""
_CONSTRUCTORS = ("__new__", "__init__")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._unknowns = {}
self._calls = set()
self._method_calls = set()
# Used by init_class.
self._instance_cache: Dict[Any, Union[_Initializing, cfg.Variable]] = {}
# Used by call_init. Can differ from _instance_cache because we also call
# __init__ on classes not initialized via init_class.
self._initialized_instances = set()
self._interpreter_functions = []
self._interpreter_classes = []
self._analyzed_functions = set()
self._analyzed_classes = set()
self._generated_classes = {}
self.exitpoint = None
def create_varargs(self, node):
value = abstract.Instance(self.convert.tuple_type, self)
value.merge_instance_type_parameter(
node, abstract_utils.T, self.convert.create_new_unknown(node))
return value.to_variable(node)
def create_kwargs(self, node):
key_type = self.convert.primitive_class_instances[str].to_variable(node)
value_type = self.convert.create_new_unknown(node)
kwargs = abstract.Instance(self.convert.dict_type, self)
kwargs.merge_instance_type_parameter(node, abstract_utils.K, key_type)
kwargs.merge_instance_type_parameter(node, abstract_utils.V, value_type)
return kwargs.to_variable(node)
def create_method_arguments(self, node, method, use_defaults=False):
"""Create arguments for the given method.
Creates Unknown objects as arguments for the given method. Note that we
don't need to take parameter annotations into account as
InterpreterFunction.call() will take care of that.
Args:
node: The current node.
method: An abstract.InterpreterFunction.
use_defaults: Whether to use parameter defaults for arguments. When True,
unknown arguments are created with force=False, as it is fine to use
Unsolvable rather than Unknown objects for type-checking defaults.
Returns:
A tuple of a node and a function.Args object.
"""
args = []
num_posargs = method.argcount(node)
num_posargs_no_default = num_posargs - len(method.defaults)
for i in range(num_posargs):
default_idx = i - num_posargs_no_default
if use_defaults and default_idx >= 0:
arg = method.defaults[default_idx]
else:
arg = self.convert.create_new_unknown(node, force=not use_defaults)
args.append(arg)
kws = {}
for key in method.signature.kwonly_params:
if use_defaults and key in method.kw_defaults:
kws[key] = method.kw_defaults[key]
else:
kws[key] = self.convert.create_new_unknown(node, force=not use_defaults)
starargs = self.create_varargs(node) if method.has_varargs() else None
starstarargs = self.create_kwargs(node) if method.has_kwargs() else None
return node, function.Args(posargs=tuple(args),
namedargs=kws,
starargs=starargs,
starstarargs=starstarargs)
def call_function_with_args(self, node, val, args):
"""Call a function.
Args:
node: The given node.
val: A cfg.Binding containing the function.
args: A function.Args object.
Returns:
A tuple of (1) a node and (2) a cfg.Variable of the return value.
"""
fvar = val.AssignToNewVariable(node)
with val.data.record_calls():
new_node, ret = self.call_function_in_frame(node, fvar, *args)
return new_node, ret
def call_function_in_frame(self, node, var, args, kwargs,
starargs, starstarargs):
frame = frame_state.SimpleFrame(node=node)
self.push_frame(frame)
log.info("Analyzing %r", [v.name for v in var.data])
state = frame_state.FrameState.init(node, self)
state, ret = self.call_function_with_state(
state, var, args, kwargs, starargs, starstarargs)
self.pop_frame(frame)
return state.node, ret
def _maybe_fix_classmethod_cls_arg(self, node, cls, func, args):
sig = func.signature
if (args.posargs and sig.param_names and
(sig.param_names[0] not in sig.annotations)):
# fix "cls" parameter
return args._replace(
posargs=(cls.AssignToNewVariable(node),) + args.posargs[1:])
else:
return args
def maybe_analyze_method(self, node, val, cls=None):
method = val.data
fname = val.data.name
if isinstance(method, abstract.INTERPRETER_FUNCTION_TYPES):
self._analyzed_functions.add(method.get_first_opcode())
if (not self.options.analyze_annotated and
(method.signature.has_return_annotation or method.has_overloads) and
fname.rsplit(".", 1)[-1] not in self._CONSTRUCTORS):
log.info("%r has annotations, not analyzing further.", fname)
else:
for f in method.iter_signature_functions():
node, args = self.create_method_arguments(node, f)
if f.is_classmethod and cls:
args = self._maybe_fix_classmethod_cls_arg(node, cls, f, args)
node, _ = self.call_function_with_args(node, val, args)
return node
def _call_with_fake_args(self, node0, funcv):
"""Attempt to call the given function with made-up arguments."""
# TODO(tsudol): If expand this beyond __init__, need to handle
# DictKeyMissing
nodes = []
rets = []
for funcb in funcv.bindings:
func = funcb.data
log.info("Trying %s with fake arguments", func)
if isinstance(func, abstract.INTERPRETER_FUNCTION_TYPES):
node1, args = self.create_method_arguments(node0, func)
# Once the args are generated, try calling the function.
# call_function will check fallback_to_unsolvable if a DictKeyMissing or
# FailedFunctionCall error is raised when the target function is called.
# DictKeyMissing doesn't trigger call_with_fake_args, so that shouldn't
# be raised again, and generating fake arguments should avoid any
# FailedFunctionCall errors. To prevent an infinite recursion loop, set
# fallback_to_unsolvable to False just in case.
# This means any additional errors that may be raised will be passed to
# the call_function that called this method in the first place.
node2, ret = self.call_function(node1,
funcb.AssignToNewVariable(),
args,
fallback_to_unsolvable=False)
nodes.append(node2)
rets.append(ret)
if nodes:
ret = self.join_variables(node0, rets)
node = self.join_cfg_nodes(nodes)
if ret.bindings:
return node, ret
else:
node = node0
log.info("Unable to generate fake arguments for %s", funcv)
return node, self.new_unsolvable(node)
def analyze_method_var(self, node0, name, var, cls=None):
log.info("Analyzing %s", name)
node1 = node0.ConnectNew(name)
for val in var.bindings:
node2 = self.maybe_analyze_method(node1, val, cls)
node2.ConnectTo(node0)
return node0
def bind_method(self, node, name, methodvar, instance_var):
bound = self.program.NewVariable()
for m in methodvar.Data(node):
if isinstance(m, special_builtins.ClassMethodInstance):
m = m.func.data[0]
is_cls = True
else:
is_cls = (m.isinstance_InterpreterFunction() and m.is_classmethod)
bound.AddBinding(m.property_get(instance_var, is_cls), [], node)
return bound
def _instantiate_binding(self, node0, cls, container):
"""Instantiate a class binding."""
node1, new = cls.data.get_own_new(node0, cls)
if not new or (
any(not isinstance(f, abstract.InterpreterFunction) for f in new.data)):
# This assumes that any inherited __new__ method defined in a pyi file
# returns an instance of the current class.
return node0, cls.data.instantiate(node0, container=container)
instance = self.program.NewVariable()
nodes = []
for b in new.bindings:
self._analyzed_functions.add(b.data.get_first_opcode())
node2, args = self.create_method_arguments(node1, b.data)
args = self._maybe_fix_classmethod_cls_arg(node0, cls, b.data, args)
node3 = node2.ConnectNew()
node4, ret = self.call_function_with_args(node3, b, args)
instance.PasteVariable(ret)
nodes.append(node4)
return self.join_cfg_nodes(nodes), instance
def _instantiate_var(self, node, clsv, container):
"""Build an (dummy) instance from a class, for analyzing it."""
n = self.program.NewVariable()
for cls in clsv.Bindings(node, strict=False):
node, var = self._instantiate_binding(node, cls, container)
n.PasteVariable(var)
return node, n
def _mark_maybe_missing_members(self, values):
"""Set maybe_missing_members to True on these values and their type params.
Args:
values: A list of BaseValue objects. On every instance among
the values, recursively set maybe_missing_members to True on the
instance and its type parameters.
"""
values = list(values)
seen = set()
while values:
v = values.pop(0)
if v not in seen:
seen.add(v)
if isinstance(v, abstract.SimpleValue):
v.maybe_missing_members = True
for child in v.instance_type_parameters.values():
values.extend(child.data)
def init_class(self, node, cls, container=None, extra_key=None):
"""Instantiate a class, and also call __init__.
Calling __init__ can be expensive, so this method caches its created
instances. If you don't need __init__ called, use cls.instantiate instead.
Args:
node: The current node.
cls: The class to instantiate.
container: Optionally, a container to pass to the class's instantiate()
method, so that type parameters in the container's template are
instantiated to TypeParameterInstance.
extra_key: Optionally, extra information about the location at which the
instantion occurs. By default, this method keys on the current opcode
and the class, which sometimes isn't enough to disambiguate callers
that shouldn't get back the same cached instance.
Returns:
A tuple of node and instance variable.
"""
key = (self.frame and self.frame.current_opcode, extra_key, cls)
instance = self._instance_cache.get(key)
if not instance or isinstance(instance, _Initializing):
clsvar = cls.to_variable(node)
node, instance = self._instantiate_var(node, clsvar, container)
if key in self._instance_cache:
# We've encountered a recursive pattern such as
# class A:
# def __init__(self, x: "A"): ...
# Calling __init__ again would lead to an infinite loop, so
# we instead create an incomplete instance that will be
# overwritten later. Note that we have to create a new
# instance rather than using the one that we're already in
# the process of initializing - otherwise, setting
# maybe_missing_members to True would cause pytype to ignore
# all attribute errors on self in __init__.
self._mark_maybe_missing_members(instance.data)
else:
self._instance_cache[key] = _Initializing()
node = self.call_init(node, instance)
self._instance_cache[key] = instance
return node, instance
def _call_method(self, node, binding, method_name):
node, method = self.attribute_handler.get_attribute(
node, binding.data.get_class(), method_name, binding)
if method:
bound_method = self.bind_method(
node, method_name, method, binding.AssignToNewVariable())
node = self.analyze_method_var(node, method_name, bound_method)
return node
def _call_init_on_binding(self, node, b):
if isinstance(b.data, abstract.SimpleValue):
for param in b.data.instance_type_parameters.values():
node = self.call_init(node, param)
node = self._call_method(node, b, "__init__")
cls = b.data.get_class()
if isinstance(cls, abstract.InterpreterClass):
# Call any additional initalizers the class has registered.
for method in cls.additional_init_methods:
node = self._call_method(node, b, method)
return node
def call_init(self, node, instance):
# Call __init__ on each binding.
for b in instance.bindings:
if b.data in self._initialized_instances:
continue
self._initialized_instances.add(b.data)
node = self._call_init_on_binding(node, b)
return node
def reinitialize_if_initialized(self, node, instance):
if instance in self._initialized_instances:
self._call_init_on_binding(node, instance.to_binding(node))
def analyze_class(self, node, val):
self._analyzed_classes.add(val.data)
node, instance = self.init_class(node, val.data)
good_instances = [b for b in instance.bindings if val.data == b.data.cls]
if not good_instances:
# __new__ returned something that's not an instance of our class.
instance = val.data.instantiate(node)
node = self.call_init(node, instance)
elif len(good_instances) != len(instance.bindings):
# __new__ returned some extra possibilities we don't need.
instance = self.join_bindings(node, good_instances)
for instance_value in instance.data:
val.data.register_canonical_instance(instance_value)
methods = sorted(val.data.members.items())
while methods:
name, methodvar = methods.pop(0)
if name in self._CONSTRUCTORS:
continue # We already called this method during initialization.
for v in methodvar.data:
if (self.options.bind_properties and
isinstance(v, special_builtins.PropertyInstance)):
for m in (v.fget, v.fset, v.fdel):
if m:
methods.insert(0, (name, m))
b = self.bind_method(node, name, methodvar, instance)
node = self.analyze_method_var(node, name, b, val)
return node
def analyze_function(self, node0, val):
if val.data.is_attribute_of_class:
# We'll analyze this function as part of a class.
log.info("Analyze functions: Skipping class method %s", val.data.name)
else:
node1 = node0.ConnectNew(val.data.name)
node2 = self.maybe_analyze_method(node1, val)
node2.ConnectTo(node0)
return node0
def _should_analyze_as_interpreter_function(self, data):
# We record analyzed functions by opcode rather than function object. The
# two ways of recording are equivalent except for closures, which are
# re-generated when the variables they close over change, but we don't want
# to re-analyze them.
return (isinstance(data, abstract.InterpreterFunction) and
not data.is_overload and
not data.is_class_builder and
data.get_first_opcode() not in self._analyzed_functions and
not _SKIP_FUNCTION_RE.search(data.name))
def analyze_toplevel(self, node, defs):
for name, var in sorted(defs.items()): # sort, for determinicity
if not self._is_typing_member(name, var):
for value in var.bindings:
if isinstance(value.data, abstract.InterpreterClass):
new_node = self.analyze_class(node, value)
elif (isinstance(value.data, abstract.INTERPRETER_FUNCTION_TYPES) and
not value.data.is_overload):
new_node = self.analyze_function(node, value)
else:
continue
if new_node is not node:
new_node.ConnectTo(node)
# Now go through all functions and classes we haven't analyzed yet.
# These are typically hidden under a decorator.
# Go through classes first so that the `is_attribute_of_class` will
# be set for all functions in class.
for c in self._interpreter_classes:
for value in c.bindings:
if (isinstance(value.data, abstract.InterpreterClass) and
value.data not in self._analyzed_classes):
node = self.analyze_class(node, value)
for f in self._interpreter_functions:
for value in f.bindings:
if self._should_analyze_as_interpreter_function(value.data):
node = self.analyze_function(node, value)
return node
def analyze(self, node, defs, maximum_depth):
assert not self.frame
self.maximum_depth = maximum_depth
self._analyzing = True
node = node.ConnectNew(name="Analyze")
return self.analyze_toplevel(node, defs)
def trace_unknown(self, name, unknown_binding):
self._unknowns[name] = unknown_binding
def trace_call(self, node, func, sigs, posargs, namedargs, result):
"""Add an entry into the call trace.
Args:
node: The CFG node right after this function call.
func: A cfg.Binding of a function that was called.
sigs: The signatures that the function might have been called with.
posargs: The positional arguments, an iterable over cfg.Value.
namedargs: The keyword arguments, a dict mapping str to cfg.Value.
result: A Variable of the possible result values.
"""
log.debug("Logging call to %r with %d args, return %r",
func, len(posargs), result)
args = tuple(posargs)
kwargs = tuple((namedargs or {}).items())
record = CallRecord(node, func, sigs, args, kwargs, result)
if isinstance(func.data, abstract.BoundPyTDFunction):
self._method_calls.add(record)
elif isinstance(func.data, abstract.PyTDFunction):
self._calls.add(record)
def trace_functiondef(self, f):
self._interpreter_functions.append(f)
def trace_classdef(self, c):
self._interpreter_classes.append(c)
def trace_namedtuple(self, nt):
# All namedtuple instances with the same name are equal, so it's fine to
# overwrite previous instances.
self._generated_classes[nt.name] = nt
def pytd_classes_for_unknowns(self):
classes = []
for name, val in self._unknowns.items():
if val in val.variable.Filter(self.exitpoint, strict=False):
classes.append(val.data.to_structural_def(self.exitpoint, name))
return classes
def pytd_for_types(self, defs):
# If a variable is annotated, we'll always output that type.
annotated_names = set()
data = []
pytd_convert = self.convert.pytd_convert
annots = abstract_utils.get_annotations_dict(defs)
for name, t in pytd_convert.annotations_to_instance_types(
self.exitpoint, annots):
annotated_names.add(name)
data.append(pytd.Constant(name, t))
for name, var in defs.items():
if (name in abstract_utils.TOP_LEVEL_IGNORE or name in annotated_names or
self._is_typing_member(name, var)):
continue
options = var.FilteredData(self.exitpoint, strict=False)
if (len(options) > 1 and
not all(isinstance(o, abstract.FUNCTION_TYPES) for o in options)):
if all(isinstance(o, (abstract.ParameterizedClass,
abstract.TypeParameter,
abstract.Union)) for o in options
) and self.options.preserve_union_macros: # type alias
data.append(pytd_utils.JoinTypes(t.to_pytd_def(self.exitpoint, name)
for t in options))
else:
# It's ambiguous whether this is a type, a function or something
# else, so encode it as a constant.
combined_types = pytd_utils.JoinTypes(t.to_type(self.exitpoint)
for t in options)
data.append(pytd.Constant(name, combined_types))
elif options:
for option in options:
try:
d = option.to_pytd_def(self.exitpoint, name) # Deep definition
except NotImplementedError:
d = option.to_type(self.exitpoint) # Type only
if isinstance(d, pytd.NothingType):
if isinstance(option, abstract.Empty):
d = pytd.AnythingType()
else:
assert isinstance(option, typing_overlay.NoReturn)
if isinstance(d, pytd.Type) and not isinstance(d, pytd.TypeParameter):
data.append(pytd.Constant(name, d))
else:
data.append(d)
else:
log.error("No visible options for %s", name)
data.append(pytd.Constant(name, pytd.AnythingType()))
return pytd_utils.WrapTypeDeclUnit("inferred", data)
@staticmethod
def _call_traces_to_function(call_traces, name_transform=lambda x: x):
funcs = collections.defaultdict(pytd_utils.OrderedSet)
for node, func, sigs, args, kws, retvar in call_traces:
# The lengths may be different in the presence of optional and kw args.
arg_names = max((sig.get_positional_names() for sig in sigs), key=len)
for i in range(len(arg_names)):
if not isinstance(func.data, abstract.BoundFunction) or i > 0:
arg_names[i] = function.argname(i)
arg_types = (a.data.to_type(node) for a in args)
ret = pytd_utils.JoinTypes(t.to_type(node) for t in retvar.data)
starargs = None
starstarargs = None
funcs[func.data.name].add(pytd.Signature(
tuple(pytd.Parameter(n, t, False, False, None)
for n, t in zip(arg_names, arg_types)) +
tuple(pytd.Parameter(name, a.data.to_type(node), False, False, None)
for name, a in kws),
starargs, starstarargs,
ret, exceptions=(), template=()))
functions = []
for name, signatures in funcs.items():
functions.append(pytd.Function(name_transform(name), tuple(signatures),
pytd.MethodTypes.METHOD))
return functions
def _is_typing_member(self, name, var):
for module_name in ("typing", "typing_extensions"):
if module_name not in self.loaded_overlays:
continue
module = self.loaded_overlays[module_name].get_module(name)
if name in module.members and module.members[name].data == var.data:
return True
return False
def pytd_functions_for_call_traces(self):
return self._call_traces_to_function(self._calls, escape.pack_partial)
def pytd_classes_for_call_traces(self):
class_to_records = collections.defaultdict(list)
for call_record in self._method_calls:
args = call_record.positional_arguments
if not any(isinstance(a.data, abstract.Unknown) for a in args):
# We don't need to record call signatures that don't involve
# unknowns - there's nothing to solve for.
continue
cls = args[0].data.get_class()
if isinstance(cls, abstract.PyTDClass):
class_to_records[cls].append(call_record)
classes = []
for cls, call_records in class_to_records.items():
full_name = cls.module + "." + cls.name if cls.module else cls.name
classes.append(pytd.Class(
name=escape.pack_partial(full_name),
metaclass=None,
parents=(pytd.NamedType("builtins.object"),), # not used in solver
methods=tuple(self._call_traces_to_function(call_records)),
constants=(),
classes=(),
decorators=(),
slots=None,
template=(),
))
return classes
def pytd_classes_for_namedtuple_instances(self):
return tuple(v.generate_ast() for v in self._generated_classes.values())
def compute_types(self, defs):
classes = (tuple(self.pytd_classes_for_unknowns()) +
tuple(self.pytd_classes_for_call_traces()) +
self.pytd_classes_for_namedtuple_instances())
functions = tuple(self.pytd_functions_for_call_traces())
aliases = () # aliases are instead recorded as constants
ty = pytd_utils.Concat(
self.pytd_for_types(defs),
pytd_utils.CreateModule("unknowns", classes=classes,
functions=functions, aliases=aliases))
ty = ty.Visit(optimize.CombineReturnsAndExceptions())
ty = ty.Visit(optimize.PullInMethodClasses())
ty = ty.Visit(visitors.DefaceUnresolved(
[ty, self.loader.concat_all()], escape.UNKNOWN))
return ty.Visit(visitors.AdjustTypeParameters())
def _check_return(self, node, actual, formal):
if not self.options.report_errors:
return True
views = abstract_utils.get_views([actual], node)
# Check for typevars in the return value first, since bad_matches
# expects not to get any.
bad = [view for view in views
if actual in view and view[actual].data.formal]
if not bad:
bad = self.matcher(node).bad_matches(actual, formal)
if bad:
self.errorlog.bad_return_type(
self.frames, node, formal, actual, bad)
return not bad
def check_types(src, filename, errorlog, options, loader,
deep=True, init_maximum_depth=INIT_MAXIMUM_DEPTH,
maximum_depth=None, **kwargs):
"""Verify the Python code."""
tracer = CallTracer(errorlog=errorlog, options=options,
generate_unknowns=False, loader=loader, **kwargs)
loc, defs = tracer.run_program(src, filename, init_maximum_depth)
snapshotter = metrics.get_metric("memory", metrics.Snapshot)
snapshotter.take_snapshot("analyze:check_types:tracer")
if deep:
if maximum_depth is None:
maximum_depth = (
QUICK_CHECK_MAXIMUM_DEPTH if options.quick else MAXIMUM_DEPTH)
tracer.analyze(loc, defs, maximum_depth=maximum_depth)
snapshotter.take_snapshot("analyze:check_types:post")
_maybe_output_debug(options, tracer.program)
def infer_types(src, errorlog, options, loader,
filename=None, deep=True, init_maximum_depth=INIT_MAXIMUM_DEPTH,
show_library_calls=False, maximum_depth=None, tracer_vm=None,
**kwargs):
"""Given Python source return its types.
Args:
src: A string containing Python source code.
errorlog: Where error messages go. Instance of errors.ErrorLog.
options: config.Options object
loader: A load_pytd.Loader instance to load PYI information.
filename: Filename of the program we're parsing.
deep: If True, analyze all functions, even the ones not called by the main
execution flow.
init_maximum_depth: Depth of analysis during module loading.
show_library_calls: If True, call traces are kept in the output.
maximum_depth: Depth of the analysis. Default: unlimited.
tracer_vm: An instance of CallTracer, in case the caller wants to
instantiate and retain the vm used for type inference.
**kwargs: Additional parameters to pass to vm.VirtualMachine
Returns:
A tuple of (ast: TypeDeclUnit, builtins: TypeDeclUnit)
Raises:
AssertionError: In case of a bad parameter combination.
"""
# If the caller has passed in a vm, use that.
if tracer_vm:
assert isinstance(tracer_vm, CallTracer)
tracer = tracer_vm
else:
tracer = CallTracer(errorlog=errorlog, options=options,
generate_unknowns=options.protocols,
store_all_calls=not deep, loader=loader, **kwargs)
loc, defs = tracer.run_program(src, filename, init_maximum_depth)
log.info("===Done running definitions and module-level code===")
snapshotter = metrics.get_metric("memory", metrics.Snapshot)
snapshotter.take_snapshot("analyze:infer_types:tracer")
if deep:
if maximum_depth is None:
if not options.quick:
maximum_depth = MAXIMUM_DEPTH
elif options.analyze_annotated:
# Since there's no point in analyzing annotated functions for inference,
# the presence of this option means that the user wants checking, too.
maximum_depth = QUICK_CHECK_MAXIMUM_DEPTH
else:
maximum_depth = QUICK_INFER_MAXIMUM_DEPTH
tracer.exitpoint = tracer.analyze(loc, defs, maximum_depth)
else:
tracer.exitpoint = loc
snapshotter.take_snapshot("analyze:infer_types:post")
ast = tracer.compute_types(defs)
ast = tracer.loader.resolve_ast(ast)
if tracer.has_unknown_wildcard_imports or any(
a in defs for a in abstract_utils.DYNAMIC_ATTRIBUTE_MARKERS):
if "__getattr__" not in ast:
ast = pytd_utils.Concat(
ast, builtins.GetDefaultAst(options.python_version))
# If merged with other if statement, triggers a ValueError: Unresolved class
# when attempts to load from the protocols file
if options.protocols:
protocols_pytd = tracer.loader.import_name("protocols")
else:
protocols_pytd = None
builtins_pytd = tracer.loader.concat_all()
# Insert type parameters, where appropriate
ast = ast.Visit(visitors.CreateTypeParametersForSignatures())
if options.protocols:
log.info("=========== PyTD to solve =============\n%s",
pytd_utils.Print(ast))
ast = convert_structural.convert_pytd(ast, builtins_pytd, protocols_pytd)
elif not show_library_calls:
log.info("Solving is turned off. Discarding call traces.")
# Rename remaining "~unknown" to "?"
ast = ast.Visit(visitors.RemoveUnknownClasses())
# Remove "~list" etc.:
ast = convert_structural.extract_local(ast)
_maybe_output_debug(options, tracer.program)
return ast, builtins_pytd
def _maybe_output_debug(options, program):
"""Maybe emit debugging output."""
if options.output_cfg or options.output_typegraph:
dot = debug.program_to_dot(program, set([]), bool(options.output_cfg))
svg_file = options.output_cfg or options.output_typegraph
with subprocess.Popen(
["/usr/bin/dot", "-T", "svg", "-o", svg_file],
stdin=subprocess.PIPE, universal_newlines=True) as proc:
(_, stderr) = proc.communicate(dot)
if stderr:
log.info("Failed to create %s: %s", svg_file, stderr)
if options.output_debug:
text = debug.program_to_text(program)
if options.output_debug == "-":
log.info("=========== Program Dump =============\n%s", text)
else:
with options.open_function(options.output_debug, "w") as fi:
fi.write(text)
|
""" pickle can serialized python objects into a stream of bytes and
deserialize bytes back into objects.
Note:
by design, pickle is unsafe!
"""
import pickle
state_path = 'game_state.bin'
class GameState(object):
def __init__(self):
self.level = 0
self.lives = 4
def save_game(state):
with open(state_path, 'wb') as f:
pickle.dump(state, f)
def load_game():
state_after = {}
with open(state_path, 'rb') as f:
state_after = pickle.load(f)
return state_after
def example_one():
"""
>>>
{'level': 1, 'lives': 3}
"""
state = GameState()
state.level += 1
state.lives -= 1
save_game(state)
saved_state = load_game()
print(saved_state.__dict__)
class GameStateNew(object):
def __init__(self):
self.level = 0
self.lives = 4
self.points = 0
def example_two():
"""
>>>
{'level': 0, 'lives': 4, 'points': 0}
"""
state = GameStateNew()
serialized = pickle.dumps(state) # dumps
state_after = pickle.loads(serialized) # loads
print(state_after.__dict__)
def example_three():
"""
- What happens when we try to access an older saved GameState but the
defination of GameState has changed to GameStateNew ?
"""
state_after = load_game()
try:
assert isinstance(state_after, GameStateNew)
except AssertionError:
print('AssertionError: We knew')
class GameStateDefaults(object):
def __init__(self, level=0, lives=4, points=0):
self.level = level
self.lives = lives
self.points = points
def pickle_game_state(game_state):
kwargs = game_state.__dict__
return unpickle_game_state, (kwargs, )
def unpickle_game_state(kwargs):
return GameStateDefaults(**kwargs)
class GameStateDefaultsMagic(object):
def __init__(self, level=0, lives=4, points=0, magic=5):
self.level = level
self.lives = lives
self.points = points
self.magic = magic
def example_four():
"""
>>>
{'level': 0, 'lives': 4, 'points': 1000}
"""
state = GameStateDefaults()
state.points += 1000
serialized = pickle.dumps(state)
state_after = pickle.loads(serialized)
print(state_after.__dict__)
def main():
example_four()
# TODO things got unclear from this point onwards,
# book describes removing lives form the game state, and versioning.
# I have been putting off this book for 4 days, not making progress.
# moving to item 45
if __name__ == '__main__':
main()
|
"""Camera platform that receives images through HTTP POST."""
from __future__ import annotations
import asyncio
from collections import deque
from datetime import timedelta
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components import webhook
from homeassistant.components.camera import PLATFORM_SCHEMA, STATE_IDLE, Camera
from homeassistant.components.camera.const import DOMAIN
from homeassistant.const import CONF_NAME, CONF_TIMEOUT, CONF_WEBHOOK_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_BUFFER_SIZE = "buffer"
CONF_IMAGE_FIELD = "field"
DEFAULT_NAME = "Push Camera"
ATTR_FILENAME = "filename"
ATTR_LAST_TRIP = "last_trip"
PUSH_CAMERA_DATA = "push_camera"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_BUFFER_SIZE, default=1): cv.positive_int,
vol.Optional(CONF_TIMEOUT, default=timedelta(seconds=5)): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_IMAGE_FIELD, default="image"): cv.string,
vol.Required(CONF_WEBHOOK_ID): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Push Camera platform."""
if PUSH_CAMERA_DATA not in hass.data:
hass.data[PUSH_CAMERA_DATA] = {}
webhook_id = config.get(CONF_WEBHOOK_ID)
cameras = [
PushCamera(
hass,
config[CONF_NAME],
config[CONF_BUFFER_SIZE],
config[CONF_TIMEOUT],
config[CONF_IMAGE_FIELD],
webhook_id,
)
]
async_add_entities(cameras)
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook POST with image files."""
try:
async with async_timeout.timeout(5):
data = dict(await request.post())
except (asyncio.TimeoutError, aiohttp.web.HTTPException) as error:
_LOGGER.error("Could not get information from POST <%s>", error)
return
camera = hass.data[PUSH_CAMERA_DATA][webhook_id]
if camera.image_field not in data:
_LOGGER.warning("Webhook call without POST parameter <%s>", camera.image_field)
return
await camera.update_image(
data[camera.image_field].file.read(), data[camera.image_field].filename
)
class PushCamera(Camera):
"""The representation of a Push camera."""
def __init__(self, hass, name, buffer_size, timeout, image_field, webhook_id):
"""Initialize push camera component."""
super().__init__()
self._name = name
self._last_trip = None
self._filename = None
self._expired_listener = None
self._timeout = timeout
self.queue = deque([], buffer_size)
self._current_image = None
self._image_field = image_field
self.webhook_id = webhook_id
self.webhook_url = webhook.async_generate_url(hass, webhook_id)
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[PUSH_CAMERA_DATA][self.webhook_id] = self
try:
webhook.async_register(
self.hass, DOMAIN, self.name, self.webhook_id, handle_webhook
)
except ValueError:
_LOGGER.error(
"In <%s>, webhook_id <%s> already used", self.name, self.webhook_id
)
@property
def image_field(self):
"""HTTP field containing the image file."""
return self._image_field
async def update_image(self, image, filename):
"""Update the camera image."""
if self.state == STATE_IDLE:
self._attr_is_recording = True
self._last_trip = dt_util.utcnow()
self.queue.clear()
self._filename = filename
self.queue.appendleft(image)
@callback
def reset_state(now):
"""Set state to idle after no new images for a period of time."""
self._attr_is_recording = False
self._expired_listener = None
_LOGGER.debug("Reset state")
self.async_write_ha_state()
if self._expired_listener:
self._expired_listener()
self._expired_listener = async_track_point_in_utc_time(
self.hass, reset_state, dt_util.utcnow() + self._timeout
)
self.async_write_ha_state()
async def async_camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return a still image response."""
if self.queue:
if self.state == STATE_IDLE:
self.queue.rotate(1)
self._current_image = self.queue[0]
return self._current_image
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
name: value
for name, value in (
(ATTR_LAST_TRIP, self._last_trip),
(ATTR_FILENAME, self._filename),
)
if value is not None
}
|
import os
import sys
import subprocess
import hydra
from omegaconf import DictConfig
from hydra import slurm_utils
@hydra.main(config_path='/h/nng/conf/robust/config.yaml')
def gen_neighborhood_labels(cfg: DictConfig):
base_path = '/h/nng/data'
model_data_path = os.path.join(base_path, cfg.data.task, cfg.eval.model.data)
eval_data_path = os.path.join(base_path, cfg.data.task, cfg.eval.data)
model_path = os.path.join('/h/nng/slurm', cfg.eval.model.date, slurm_utils.resolve_name(cfg.eval.model.name))
if not os.path.exists(os.path.join(model_path, 'checkpoint_best.pt')):
for f in sorted(os.listdir(model_path))[::-1]:
if os.path.exists(os.path.join(model_path, f, 'checkpoint_best.pt')):
model_path = os.path.join(model_path, f)
break
model_path = os.path.join(model_path, 'checkpoint_best.pt')
bin_path = os.path.join(model_data_path, cfg.data.fdset, cfg.data.bin, 'bin')
t_path = os.path.join(eval_data_path, cfg.data.tdset, 'orig', cfg.eval.split + '.bpe.' + cfg.data.src)
ref_path = os.path.join(eval_data_path, cfg.data.tdset, 'orig', cfg.eval.split + '.raw.' + cfg.data.tgt)
bpe_path = '/h/nng/programs/subword-nmt/subword_nmt'
if cfg.data.fdset == 'iwslt':
fair_sh = ['fairseq-generate', bin_path, \
'--path', model_path, \
'--beam', '10', \
'--remove-bpe', \
'--batch-size', '128', \
'--quiet']
fair_p = subprocess.Popen(fair_sh, stdout=subprocess.PIPE)
output, err = fair_p.communicate()
print(output)
else:
cat_sh = ['cat', t_path]
fair_sh = ['fairseq-interactive', bin_path, \
'--path', model_path, \
'-s', cfg.data.src, \
'-t', cfg.data.tgt, \
'--beam', '10', \
'--remove-bpe', \
'--buffer-size', '1024', \
'--max-tokens', '8000']
grep_sh = ['grep', '^H-']
cut_sh = ['cut', '-f', '3-']
detoken_sh = ['sacremoses', 'detokenize', '-l', cfg.data.tgt, '-q']
score_sh = ['sacrebleu', ref_path, '-l', cfg.data.src + '-' + cfg.data.tgt, '-w', '2']
cat_p = subprocess.Popen(cat_sh, stdout=subprocess.PIPE)
fair_p = subprocess.Popen(fair_sh, stdin=cat_p.stdout, stdout=subprocess.PIPE)
cat_p.stdout.close()
grep_p = subprocess.Popen(grep_sh, stdin=fair_p.stdout, stdout=subprocess.PIPE)
fair_p.stdout.close()
cut_p = subprocess.Popen(cut_sh, stdin=grep_p.stdout, stdout=subprocess.PIPE)
grep_p.stdout.close()
detoken_p = subprocess.Popen(detoken_sh, stdin=cut_p.stdout, stdout=subprocess.PIPE)
cut_p.stdout.close()
score_p = subprocess.Popen(score_sh, stdin=detoken_p.stdout, stdout=subprocess.PIPE)
detoken_p.stdout.close()
output, err = score_p.communicate()
print(output)
if __name__ == "__main__":
gen_neighborhood_labels()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DeploymentsOperations(object):
"""DeploymentsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2022_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DeploymentResource"
"""Get a Deployment and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_01_01_preview.models.DeploymentResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
deployment_resource, # type: "_models.DeploymentResource"
**kwargs # type: Any
):
# type: (...) -> "_models.DeploymentResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(deployment_resource, 'DeploymentResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DeploymentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
deployment_resource, # type: "_models.DeploymentResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DeploymentResource"]
"""Create a new Deployment or update an exiting Deployment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:param deployment_resource: Parameters for the create or update operation.
:type deployment_resource: ~azure.mgmt.appplatform.v2022_01_01_preview.models.DeploymentResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DeploymentResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_01_01_preview.models.DeploymentResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
deployment_name=deployment_name,
deployment_resource=deployment_resource,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DeploymentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Operation to delete a Deployment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
deployment_name=deployment_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
deployment_resource, # type: "_models.DeploymentResource"
**kwargs # type: Any
):
# type: (...) -> "_models.DeploymentResource"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(deployment_resource, 'DeploymentResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DeploymentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
deployment_resource, # type: "_models.DeploymentResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DeploymentResource"]
"""Operation to update an exiting Deployment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:param deployment_resource: Parameters for the update operation.
:type deployment_resource: ~azure.mgmt.appplatform.v2022_01_01_preview.models.DeploymentResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DeploymentResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_01_01_preview.models.DeploymentResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
deployment_name=deployment_name,
deployment_resource=deployment_resource,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DeploymentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
version=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DeploymentResourceCollection"]
"""Handles requests to list all resources in an App.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param version: Version of the deployments to be listed.
:type version: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentResourceCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_01_01_preview.models.DeploymentResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if version is not None:
query_parameters['version'] = [self._serialize.query("version", q, 'str') if q is not None else '' for q in version]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DeploymentResourceCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments'} # type: ignore
def list_for_cluster(
self,
resource_group_name, # type: str
service_name, # type: str
version=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DeploymentResourceCollection"]
"""List deployments for a certain service.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param version: Version of the deployments to be listed.
:type version: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentResourceCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_01_01_preview.models.DeploymentResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_cluster.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if version is not None:
query_parameters['version'] = [self._serialize.query("version", q, 'str') if q is not None else '' for q in version]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DeploymentResourceCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_for_cluster.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/deployments'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Start the deployment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
deployment_name=deployment_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/start'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stop the deployment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
deployment_name=deployment_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/stop'} # type: ignore
def _restart_initial(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = self._restart_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/restart'} # type: ignore
def begin_restart(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Restart the deployment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._restart_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
deployment_name=deployment_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/restart'} # type: ignore
def get_log_file_url(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.LogFileUrlResponse"]
"""Get deployment log file URL.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LogFileUrlResponse, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_01_01_preview.models.LogFileUrlResponse or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.LogFileUrlResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get_log_file_url.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogFileUrlResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_log_file_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/getLogFileUrl'} # type: ignore
def _generate_heap_dump_initial(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
diagnostic_parameters, # type: "_models.DiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generate_heap_dump_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(diagnostic_parameters, 'DiagnosticParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_generate_heap_dump_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/generateHeapDump'} # type: ignore
def begin_generate_heap_dump(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
diagnostic_parameters, # type: "_models.DiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Generate Heap Dump.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:param diagnostic_parameters: Parameters for the diagnostic operation.
:type diagnostic_parameters: ~azure.mgmt.appplatform.v2022_01_01_preview.models.DiagnosticParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generate_heap_dump_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
deployment_name=deployment_name,
diagnostic_parameters=diagnostic_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generate_heap_dump.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/generateHeapDump'} # type: ignore
def _generate_thread_dump_initial(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
diagnostic_parameters, # type: "_models.DiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generate_thread_dump_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(diagnostic_parameters, 'DiagnosticParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_generate_thread_dump_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/generateThreadDump'} # type: ignore
def begin_generate_thread_dump(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
diagnostic_parameters, # type: "_models.DiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Generate Thread Dump.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:param diagnostic_parameters: Parameters for the diagnostic operation.
:type diagnostic_parameters: ~azure.mgmt.appplatform.v2022_01_01_preview.models.DiagnosticParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generate_thread_dump_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
deployment_name=deployment_name,
diagnostic_parameters=diagnostic_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generate_thread_dump.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/generateThreadDump'} # type: ignore
def _start_jfr_initial(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
diagnostic_parameters, # type: "_models.DiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_jfr_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(diagnostic_parameters, 'DiagnosticParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_jfr_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/startJFR'} # type: ignore
def begin_start_jfr(
self,
resource_group_name, # type: str
service_name, # type: str
app_name, # type: str
deployment_name, # type: str
diagnostic_parameters, # type: "_models.DiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Start JFR.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param deployment_name: The name of the Deployment resource.
:type deployment_name: str
:param diagnostic_parameters: Parameters for the diagnostic operation.
:type diagnostic_parameters: ~azure.mgmt.appplatform.v2022_01_01_preview.models.DiagnosticParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_jfr_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
deployment_name=deployment_name,
diagnostic_parameters=diagnostic_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'appName': self._serialize.url("app_name", app_name, 'str'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_jfr.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/deployments/{deploymentName}/startJFR'} # type: ignore
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: trainer.py
# Author: Qian Ge <geqian1001@gmail.com>
import os
import numpy as np
import tensorflow as tf
def display(global_step,
step,
scaler_sum_list,
name_list,
collection,
summary_val=None,
summary_writer=None,
):
print('[step: {}]'.format(global_step), end='')
for val, name in zip(scaler_sum_list, name_list):
print(' {}: {:.4f}'.format(name, val * 1. / step), end='')
print('')
if summary_writer is not None:
s = tf.Summary()
for val, name in zip(scaler_sum_list, name_list):
s.value.add(tag='{}/{}'.format(collection, name),
simple_value=val * 1. / step)
summary_writer.add_summary(s, global_step)
if summary_val is not None:
summary_writer.add_summary(summary_val, global_step)
class Trainer(object):
def __init__(self, train_model, valid_model, train_data, init_lr=1e-3):
self._t_model = train_model
self._v_model = valid_model
self._train_data = train_data
self._init_lr = init_lr
self._train_op = train_model.get_train_op()
self._train_loss_op = train_model.get_loss()
self._train_accuracy_op = train_model.get_accuracy()
self._valid_loss_op = valid_model.get_loss()
self._valid_accuracy_op = valid_model.get_accuracy()
# self._train_summary_op = train_model.get_train_summary()
# self._valid_summary_op = train_model.get_valid_summary()
self.global_step = 0
self.epoch_id = 0
def train_epoch(self, sess, keep_prob=1., summary_writer=None):
if self.epoch_id < 35:
self._lr = self._init_lr
elif self.epoch_id < 50:
self._lr = self._init_lr / 10.
else:
self._lr = self._init_lr / 100.
# self._t_model.set_is_training(True)
display_name_list = ['loss', 'accuracy']
cur_summary = None
cur_epoch = self._train_data.epochs_completed
step = 0
loss_sum = 0
acc_sum = 0
self.epoch_id += 1
while cur_epoch == self._train_data.epochs_completed:
self.global_step += 1
step += 1
batch_data = self._train_data.next_batch_dict()
im = batch_data['image']
label = batch_data['label']
_, loss, acc = sess.run(
[self._train_op, self._train_loss_op, self._train_accuracy_op],
feed_dict={self._t_model.image: im,
self._t_model.label: label,
self._t_model.lr: self._lr,
self._t_model.keep_prob: keep_prob})
loss_sum += loss
acc_sum += acc
if step % 100 == 0 or step == 1:
display(self.global_step,
step,
[loss_sum, acc_sum],
display_name_list,
'train',
summary_val=cur_summary,
summary_writer=summary_writer)
print('==== epoch: {}, lr:{} ===='.format(cur_epoch, self._lr))
display(self.global_step,
step,
[loss_sum, acc_sum],
display_name_list,
'train',
summary_val=cur_summary,
summary_writer=summary_writer)
def valid_epoch(self, sess, dataflow, summary_writer=None):
display_name_list = ['loss', 'accuracy']
cur_summary = None
dataflow.reset_epoch()
step = 0
loss_sum = 0
acc_sum = 0
while dataflow.epochs_completed < 1:
step += 1
batch_data = dataflow.next_batch_dict()
im = batch_data['image']
label = batch_data['label']
loss, acc = sess.run(
[self._valid_loss_op, self._valid_accuracy_op],
feed_dict={self._v_model.image: im,
self._v_model.label: label})
loss_sum += loss
acc_sum += acc
print('[Valid]: ', end='')
display(self.global_step,
step,
[loss_sum, acc_sum],
display_name_list,
'valid',
summary_val=cur_summary,
summary_writer=summary_writer)
|
"""NDG XACML ndg namespace package
NERC DataGrid
This is a setuptools namespace_package. DO NOT place any other
code in this file! There is no guarantee that it will be installed
with easy_install. See:
http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
... for details.
"""
__author__ = "P J Kershaw"
__date__ = "19/02/10"
__copyright__ = "(C) 2010 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
__import__('pkg_resources').declare_namespace(__name__)
|
import os
import torch, torchvision, torchtext
from torch import nn, cuda, backends, FloatTensor, LongTensor, optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, TensorDataset
from torch.nn.init import kaiming_uniform, kaiming_normal
from torchvision.transforms import Compose
from torchvision.models import resnet18, resnet34, resnet50, resnet101, resnet152
from torchvision.models import vgg16_bn, vgg19_bn
from torchvision.models import densenet121, densenet161, densenet169, densenet201
from .models.resnext_50_32x4d import resnext_50_32x4d
from .models.resnext_101_32x4d import resnext_101_32x4d
from .models.resnext_101_64x4d import resnext_101_64x4d
from .models.wrn_50_2f import wrn_50_2f
from .models.inceptionresnetv2 import InceptionResnetV2
from .models.inceptionv4 import InceptionV4
from .models.nasnet import nasnetalarge
from unet_models import unet11
import warnings
warnings.filterwarnings('ignore', message='Implicit dimension choice', category=UserWarning)
def children(m): return m if isinstance(m, (list, tuple)) else list(m.children())
def save_model(m, p): torch.save(m.state_dict(), p)
def load_model(m, p): m.load_state_dict(torch.load(p, map_location=lambda storage, loc: storage))
def load_pre(pre, f, fn):
m = f()
path = os.path.dirname(__file__)
if pre: load_model(m, f'{path}/weights/{fn}.pth')
return m
def _fastai_model(name, paper_title, paper_href):
def add_docs_wrapper(f):
f.__doc__ = f"""{name} model from
`"{paper_title}" <{paper_href}>`_
Args:
pre (bool): If True, returns a model pre-trained on ImageNet
"""
return f
return add_docs_wrapper
@_fastai_model('Inception 4', 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning',
'https://arxiv.org/pdf/1602.07261.pdf')
def inception_4(pre): return children(inceptionv4(pretrained=pre))[0]
@_fastai_model('Inception 4', 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning',
'https://arxiv.org/pdf/1602.07261.pdf')
def inceptionresnet_2(pre): return load_pre(pre, InceptionResnetV2, 'inceptionresnetv2-d579a627')
@_fastai_model('ResNeXt 50', 'Aggregated Residual Transformations for Deep Neural Networks',
'https://arxiv.org/abs/1611.05431')
def resnext50(pre): return load_pre(pre, resnext_50_32x4d, 'resnext_50_32x4d')
@_fastai_model('ResNeXt 101_32', 'Aggregated Residual Transformations for Deep Neural Networks',
'https://arxiv.org/abs/1611.05431')
def resnext101(pre): return load_pre(pre, resnext_101_32x4d, 'resnext_101_32x4d')
@_fastai_model('ResNeXt 101_64', 'Aggregated Residual Transformations for Deep Neural Networks',
'https://arxiv.org/abs/1611.05431')
def resnext101_64(pre): return load_pre(pre, resnext_101_64x4d, 'resnext_101_64x4d')
@_fastai_model('Wide Residual Networks', 'Wide Residual Networks',
'https://arxiv.org/pdf/1605.07146.pdf')
def wrn(pre): return load_pre(pre, wrn_50_2f, 'wrn_50_2f')
@_fastai_model('Densenet-121', 'Densely Connected Convolutional Networks',
'https://arxiv.org/pdf/1608.06993.pdf')
def dn121(pre): return children(densenet121(pre))[0]
@_fastai_model('Densenet-169', 'Densely Connected Convolutional Networks',
'https://arxiv.org/pdf/1608.06993.pdf')
def dn161(pre): return children(densenet161(pre))[0]
@_fastai_model('Densenet-161', 'Densely Connected Convolutional Networks',
'https://arxiv.org/pdf/1608.06993.pdf')
def dn169(pre): return children(densenet169(pre))[0]
@_fastai_model('Densenet-201', 'Densely Connected Convolutional Networks',
'https://arxiv.org/pdf/1608.06993.pdf')
def dn201(pre): return children(densenet201(pre))[0]
@_fastai_model('Vgg-16 with batch norm added', 'Very Deep Convolutional Networks for Large-Scale Image Recognition',
'https://arxiv.org/pdf/1409.1556.pdf')
def vgg16(pre): return children(vgg16_bn(pre))[0]
@_fastai_model('Vgg-19 with batch norm added', 'Very Deep Convolutional Networks for Large-Scale Image Recognition',
'https://arxiv.org/pdf/1409.1556.pdf')
def vgg19(pre): return children(vgg19_bn(pre))[0]
@_fastai_model('Vgg-11 with U-Net', 'TernausNet: U-Net with VGG11 Encoder Pre-Trained on ImageNet for Image Segmentation',
'https://arxiv.org/pdf/1801.05746.pdf')
def ternausnet(pre): return children(unet11(pre))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .sprin import GlobalInfoProp, SparseSO3Conv
import numpy as np
class ResLayer(torch.nn.Module):
def __init__(self, dim_in, dim_out, bn=False) -> None:
super().__init__()
assert(bn is False)
self.fc1 = torch.nn.Linear(dim_in, dim_out)
if bn:
self.bn1 = torch.nn.BatchNorm1d(dim_out)
else:
self.bn1 = lambda x: x
self.fc2 = torch.nn.Linear(dim_out, dim_out)
if bn:
self.bn2 = torch.nn.BatchNorm1d(dim_out)
else:
self.bn2 = lambda x: x
if dim_in != dim_out:
self.fc0 = torch.nn.Linear(dim_in, dim_out)
else:
self.fc0 = None
def forward(self, x):
x_res = x if self.fc0 is None else self.fc0(x)
x = F.relu(self.bn1(self.fc1(x)))
x = self.bn2(self.fc2(x))
return x + x_res
class PointEncoder(nn.Module):
def __init__(self, k, spfcs, out_dim, num_layers=2, num_nbr_feats=2) -> None:
super().__init__()
self.k = k
self.spconvs = nn.ModuleList()
self.spconvs.append(SparseSO3Conv(32, num_nbr_feats, out_dim, *spfcs))
self.aggrs = nn.ModuleList()
self.aggrs.append(GlobalInfoProp(out_dim, out_dim // 4))
for _ in range(num_layers - 1):
self.spconvs.append(SparseSO3Conv(32, out_dim + out_dim // 4, out_dim, *spfcs))
self.aggrs.append(GlobalInfoProp(out_dim, out_dim // 4))
def forward(self, pc, pc_normal, dist):
nbrs_idx = torch.topk(dist, self.k, largest=False, sorted=False)[1] #[..., N, K]
pc_nbrs = torch.gather(pc.unsqueeze(-3).expand(*pc.shape[:-1], *pc.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc.shape[-1])) #[..., N, K, 3]
pc_nbrs_centered = pc_nbrs - pc.unsqueeze(-2) #[..., N, K, 3]
pc_nbrs_norm = torch.norm(pc_nbrs_centered, dim=-1, keepdim=True)
pc_normal_nbrs = torch.gather(pc_normal.unsqueeze(-3).expand(*pc_normal.shape[:-1], *pc_normal.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc_normal.shape[-1])) #[..., N, K, 3]
pc_normal_cos = torch.sum(pc_normal_nbrs * pc_normal.unsqueeze(-2), -1, keepdim=True)
feat = self.aggrs[0](self.spconvs[0](pc_nbrs, torch.cat([pc_nbrs_norm, pc_normal_cos], -1), pc))
for i in range(len(self.spconvs) - 1):
spconv = self.spconvs[i + 1]
aggr = self.aggrs[i + 1]
feat_nbrs = torch.gather(feat.unsqueeze(-3).expand(*feat.shape[:-1], *feat.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, feat.shape[-1]))
feat = aggr(spconv(pc_nbrs, feat_nbrs, pc))
return feat
def forward_nbrs(self, pc, pc_normal, nbrs_idx):
pc_nbrs = torch.gather(pc.unsqueeze(-3).expand(*pc.shape[:-1], *pc.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc.shape[-1])) #[..., N, K, 3]
pc_nbrs_centered = pc_nbrs - pc.unsqueeze(-2) #[..., N, K, 3]
pc_nbrs_norm = torch.norm(pc_nbrs_centered, dim=-1, keepdim=True)
pc_normal_nbrs = torch.gather(pc_normal.unsqueeze(-3).expand(*pc_normal.shape[:-1], *pc_normal.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, pc_normal.shape[-1])) #[..., N, K, 3]
pc_normal_cos = torch.sum(pc_normal_nbrs * pc_normal.unsqueeze(-2), -1, keepdim=True)
feat = self.aggrs[0](self.spconvs[0](pc_nbrs, torch.cat([pc_nbrs_norm, pc_normal_cos], -1), pc))
for i in range(len(self.spconvs) - 1):
spconv = self.spconvs[i + 1]
aggr = self.aggrs[i + 1]
feat_nbrs = torch.gather(feat.unsqueeze(-3).expand(*feat.shape[:-1], *feat.shape[-2:]), -2, nbrs_idx[..., None].expand(*nbrs_idx.shape, feat.shape[-1]))
feat = aggr(spconv(pc_nbrs, feat_nbrs, pc))
return feat
class PPFEncoder(nn.Module):
def __init__(self, ppffcs, out_dim) -> None:
super().__init__()
self.res_layers = nn.ModuleList()
for i in range(len(ppffcs) - 1):
dim_in, dim_out = ppffcs[i], ppffcs[i + 1]
self.res_layers.append(ResLayer(dim_in, dim_out, bn=False))
self.final = nn.Linear(ppffcs[-1], out_dim)
def forward(self, pc, pc_normal, feat, dist=None, idxs=None):
if idxs is not None:
return self.forward_with_idx(pc[0], pc_normal[0], feat[0], idxs)[None]
xx = pc.unsqueeze(-2) - pc.unsqueeze(-3)
xx_normed = xx / (dist[..., None] + 1e-7)
outputs = []
for idx in torch.chunk(torch.arange(pc.shape[1]), 5):
feat_chunk = feat[..., idx, :]
target_shape = [*feat_chunk.shape[:-2], feat_chunk.shape[-2], feat.shape[-2], feat_chunk.shape[-1]] # B x NC x N x F
xx_normed_chunk = xx_normed[..., idx, :, :]
ppf = torch.cat([
torch.sum(pc_normal[..., idx, :].unsqueeze(-2) * xx_normed_chunk, -1, keepdim=True),
torch.sum(pc_normal.unsqueeze(-3) * xx_normed_chunk, -1, keepdim=True),
torch.sum(pc_normal[..., idx, :].unsqueeze(-2) * pc_normal.unsqueeze(-3), -1, keepdim=True),
dist[..., idx, :, None],
], -1)
# ppf.zero_()
final_feat = torch.cat([feat_chunk[..., None, :].expand(*target_shape), feat[..., None, :, :].expand(*target_shape), ppf], -1)
output = final_feat
for res_layer in self.res_layers:
output = res_layer(output)
outputs.append(output)
output = torch.cat(outputs, dim=-3)
return self.final(output)
def forward_with_idx(self, pc, pc_normal, feat, idxs):
a_idxs = idxs[:, 0]
b_idxs = idxs[:, 1]
xy = pc[a_idxs] - pc[b_idxs]
xy_norm = torch.norm(xy, dim=-1)
xy_normed = xy / (xy_norm[..., None] + 1e-7)
pnormal_cos = pc_normal[a_idxs] * pc_normal[b_idxs]
ppf = torch.cat([
torch.sum(pc_normal[a_idxs] * xy_normed, -1, keepdim=True),
torch.sum(pc_normal[b_idxs] * xy_normed, -1, keepdim=True),
torch.sum(pnormal_cos, -1, keepdim=True),
xy_norm[..., None],
], -1)
# ppf.zero_()
final_feat = torch.cat([feat[a_idxs], feat[b_idxs], ppf], -1)
output = final_feat
for res_layer in self.res_layers:
output = res_layer(output)
return self.final(output)
|
import argparse
import json
import numpy as np
import os
import subprocess
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from params_conf import N_MODULES, MIN_NUM_MODULES, STIFF_TABLE
from utils import parse_robot_string
def convert_h_to_arch(h_file, morph_file, best_function, out_dir, seed):
with open(morph_file) as morph_file:
morphologies = json.load(morph_file)
inverse_morph_dict = {v: k for k, v in morphologies.items()}
archive = np.full(shape=(len(N_MODULES), len(STIFF_TABLE)), fill_value=None)
last_gen = 0
with open(h_file) as hf:
_ = hf.readline() # skip header
for line in hf:
last_gen, m_id, c_id, fit, _ = line.split(',')
last_gen, m_id, c_id, fit = int(last_gen), int(m_id), int(c_id), float(fit)
robot = parse_robot_string(inverse_morph_dict[m_id])
index_1 = len(robot) - MIN_NUM_MODULES
index_2 = STIFF_TABLE.index(robot[0]['stiff'])
value_1 = len(robot)
value_2 = robot[0]['stiff']
if archive[index_1, index_2] is None or fit == best_function(archive[index_1, index_2][4], fit):
archive[index_1, index_2] = (index_1, index_2, value_1, value_2, fit, m_id, c_id)
out_filename = os.path.join(out_dir, 'entity_archive_{}_ngen_{}.csv'.format(seed, last_gen))
with open(out_filename, 'w') as out_file:
out_file.write('1st_dim_indx,2nd_dim_indx,1st_dim:num_modules,2nd_dim:stiffness_value,fitness,e_id,nn_id\n')
for entry in archive.flatten():
if entry is not None:
index_1, index_2, value_1, value_2, fit, m_id, c_id = entry
out_file.write('{},{},{},{},{:.4f},{},{}\n'.format(
index_1, index_2, value_1, value_2, fit, m_id, c_id
))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script for converting an history file into a archive based on '
'morphological features.')
parser.add_argument('res_dirs', metavar='res_dirs', type=str, nargs='+',
help='list of folders containing evolution results')
parser.add_argument('--fit-func-best', metavar='fitness_function_best', type=str, nargs='?', default='min',
help='function used to determine the best fitness, allowed values are: min, max')
parser.add_argument('--owner', metavar='owner', type=str, action='store', nargs='+',
default=None, help='User and group for chown')
args = parser.parse_args()
if args.fit_func_best not in ['min', 'max']:
raise Exception('The function provided to determine the best fitness'
+ 'is not valid')
else:
fit_func_best = min if args.fit_func_best == 'min' else max
if args.res_dirs is not None:
for res_dir in args.res_dirs:
settings_file = os.path.join(res_dir, 'settings.json')
with open(settings_file) as sf:
settings = json.load(sf)
abs_res_dir_path = os.path.abspath(res_dir)
evo_info_dir = os.path.join(abs_res_dir_path, 'evolution_info')
out_dir = os.path.join(abs_res_dir_path, 'archives')
os.makedirs(out_dir, exist_ok=True)
for seed in settings['seeds']:
history_file = os.path.join(evo_info_dir, 'history_{}.csv'.format(seed))
morphologies_file = os.path.join(abs_res_dir_path, 'morphologies', 'morphologies_{}.json'.format(seed))
convert_h_to_arch(history_file, morphologies_file, fit_func_best, out_dir, seed)
if args.owner is not None and len(args.owner) == 2:
try:
exec_string = 'chown -R {}:{} {}'.format(args.owner[0], args.owner[1], out_dir)
c_proc = subprocess.run(exec_string.split(' '), capture_output=True)
except:
raise Exception('An error occurred during the owner setting')
|
"""
pyexcel_xlsw
~~~~~~~~~~~~~~~~~~~
The lower level xls file format handler using xlwt
:copyright: (c) 2016-2021 by Onni Software Ltd
:license: New BSD License
"""
import datetime
import xlrd
from xlwt import XFStyle, Workbook
from pyexcel_io import constants
from pyexcel_io.plugin_api import IWriter, ISheetWriter
DEFAULT_DATE_FORMAT = "DD/MM/YY"
DEFAULT_TIME_FORMAT = "HH:MM:SS"
DEFAULT_LONGTIME_FORMAT = "[HH]:MM:SS"
DEFAULT_DATETIME_FORMAT = "%s %s" % (DEFAULT_DATE_FORMAT, DEFAULT_TIME_FORMAT)
EMPTY_SHEET_NOT_ALLOWED = "xlwt does not support a book without any sheets"
class XLSheetWriter(ISheetWriter):
"""
xls sheet writer
"""
def __init__(self, xls_book, xls_sheet, sheet_name):
if sheet_name is None:
sheet_name = constants.DEFAULT_SHEET_NAME
self._xls_book = xls_book
self._xls_sheet = xls_sheet
self._xls_sheet = self._xls_book.add_sheet(sheet_name)
self.current_row = 0
def write_row(self, array):
"""
write a row into the file
"""
for i, value in enumerate(array):
style = None
tmp_array = []
if isinstance(value, datetime.datetime):
tmp_array = [
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
]
value = xlrd.xldate.xldate_from_datetime_tuple(tmp_array, 0)
style = XFStyle()
style.num_format_str = DEFAULT_DATETIME_FORMAT
elif isinstance(value, datetime.timedelta):
value = value.days + value.seconds / 86_400
style = XFStyle()
style.num_format_str = DEFAULT_LONGTIME_FORMAT
elif isinstance(value, datetime.date):
tmp_array = [value.year, value.month, value.day]
value = xlrd.xldate.xldate_from_date_tuple(tmp_array, 0)
style = XFStyle()
style.num_format_str = DEFAULT_DATE_FORMAT
elif isinstance(value, datetime.time):
tmp_array = [value.hour, value.minute, value.second]
value = xlrd.xldate.xldate_from_time_tuple(tmp_array)
style = XFStyle()
style.num_format_str = DEFAULT_TIME_FORMAT
if style:
self._xls_sheet.write(self.current_row, i, value, style)
else:
self._xls_sheet.write(self.current_row, i, value)
self.current_row += 1
def close(self):
pass
class XLSWriter(IWriter):
"""
xls writer
"""
def __init__(
self,
file_alike_object,
_, # file_type not used
encoding="ascii",
style_compression=2,
**keywords,
):
self.file_alike_object = file_alike_object
self.work_book = Workbook(
style_compression=style_compression, encoding=encoding
)
def create_sheet(self, name):
return XLSheetWriter(self.work_book, None, name)
def write(self, incoming_dict):
if incoming_dict:
IWriter.write(self, incoming_dict)
else:
raise NotImplementedError(EMPTY_SHEET_NOT_ALLOWED)
def close(self):
"""
This call actually save the file
"""
self.work_book.save(self.file_alike_object)
|
from typing import List, Tuple, Optional
import aiosqlite
from ethgreen.types.blockchain_format.sized_bytes import bytes32
from ethgreen.util.db_wrapper import DBWrapper
class WalletInterestedStore:
"""
Stores coin ids that we are interested in receiving
"""
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
@classmethod
async def create(cls, wrapper: DBWrapper):
self = cls()
self.db_connection = wrapper.db
self.db_wrapper = wrapper
await self.db_connection.execute("CREATE TABLE IF NOT EXISTS interested_coins(coin_name text PRIMARY KEY)")
await self.db_connection.execute(
"CREATE TABLE IF NOT EXISTS interested_puzzle_hashes(puzzle_hash text PRIMARY KEY, wallet_id integer)"
)
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM puzzle_hashes")
await cursor.close()
cursor = await self.db_connection.execute("DELETE FROM interested_coins")
await cursor.close()
await self.db_connection.commit()
async def get_interested_coin_ids(self) -> List[bytes32]:
cursor = await self.db_connection.execute("SELECT coin_name FROM interested_coins")
rows_hex = await cursor.fetchall()
return [bytes32(bytes.fromhex(row[0])) for row in rows_hex]
async def add_interested_coin_id(self, coin_id: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_coins VALUES (?)", (coin_id.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_interested_puzzle_hashes(self) -> List[Tuple[bytes32, int]]:
cursor = await self.db_connection.execute("SELECT puzzle_hash, wallet_id FROM interested_puzzle_hashes")
rows_hex = await cursor.fetchall()
return [(bytes32(bytes.fromhex(row[0])), row[1]) for row in rows_hex]
async def get_interested_puzzle_hash_wallet_id(self, puzzle_hash: bytes32) -> Optional[int]:
cursor = await self.db_connection.execute(
"SELECT wallet_id FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
if row is None:
return None
return row[0]
async def add_interested_puzzle_hash(
self, puzzle_hash: bytes32, wallet_id: int, in_transaction: bool = False
) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_puzzle_hashes VALUES (?, ?)", (puzzle_hash.hex(), wallet_id)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def remove_interested_puzzle_hash(self, puzzle_hash: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"DELETE FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 14/01/2018 01:04 AM
# @Project : BioQueue
# @Author : Li Yao
# @File : gunzip.py
def get_sub_protocol(db_obj, protocol_parent, step_order_start=1):
steps = list()
steps.append(db_obj(software='gunzip',
parameter='{{InputFile}}',
parent=protocol_parent,
user_id=0,
hash='541df26aff8e4d054a57c7e3717e91ca',
step_order=step_order_start))
return step_order_start+len(steps), steps
|
import altair as alt
import pandas as pd
from .visitor import visit
from .aggregate import AGG_REPLACEMENTS
@visit.register(alt.JoinAggregateTransform)
def visit_joinaggregate(
transform: alt.JoinAggregateTransform, df: pd.DataFrame
) -> pd.DataFrame:
transform = transform.to_dict()
groupby = transform.get("groupby")
for aggregate in transform["joinaggregate"]:
op = aggregate["op"]
field = aggregate["field"]
col = aggregate["as"]
op = AGG_REPLACEMENTS.get(op, op)
if field == "*" and field not in df.columns:
field = df.columns[0]
if groupby is None:
df[col] = df[field].aggregate(op)
else:
result = df.groupby(groupby)[field].aggregate(op)
result.name = col
df = df.join(result, on=groupby)
return df
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, List, Optional
import attr
from cv2 import log
import numpy as np
from gym import spaces
from habitat.config import Config
from habitat.core.dataset import SceneState
from habitat.core.logging import logger
from habitat.core.registry import registry
from habitat.core.simulator import AgentState, Sensor, SensorTypes
from habitat.core.utils import not_none_validator
from habitat.tasks.nav.nav import (
NavigationEpisode,
NavigationGoal,
NavigationTask
)
try:
from habitat.datasets.object_nav.object_nav_dataset import (
ObjectNavDatasetV1,
)
except ImportError:
pass
task_cat2mpcat40 = [
3, # ('chair', 2, 0)
5, # ('table', 4, 1)
6, # ('picture', 5, 2)
7, # ('cabinet', 6, 3)
8, # ('cushion', 7, 4)
10, # ('sofa', 9, 5),
11, # ('bed', 10, 6)
13, # ('chest_of_drawers', 12, 7),
14, # ('plant', 13, 8)
15, # ('sink', 14, 9)
18, # ('toilet', 17, 10),
19, # ('stool', 18, 11),
20, # ('towel', 19, 12)
22, # ('tv_monitor', 21, 13)
23, # ('shower', 22, 14)
25, # ('bathtub', 24, 15)
26, # ('counter', 25, 16),
27, # ('fireplace', 26, 17),
33, # ('gym_equipment', 32, 18),
34, # ('seating', 33, 19),
38, # ('clothes', 37, 20),
43, # ('foodstuff', 42, 21),
44, # ('stationery', 43, 22),
45, # ('fruit', 44, 23),
46, # ('plaything', 45, 24),
47, # ('hand_tool', 46, 25),
48, # ('game_equipment', 47, 26),
49, # ('kitchenware', 48, 27)
]
mapping_mpcat40_to_goal21 = {
3: 1,
5: 2,
6: 3,
7: 4,
8: 5,
10: 6,
11: 7,
13: 8,
14: 9,
15: 10,
18: 11,
19: 12,
20: 13,
22: 14,
23: 15,
25: 16,
26: 17,
27: 18,
33: 19,
34: 20,
38: 21,
43: 22, # ('foodstuff', 42, task_cat: 21)
44: 28, # ('stationery', 43, task_cat: 22)
45: 26, # ('fruit', 44, task_cat: 23)
46: 25, # ('plaything', 45, task_cat: 24)
47: 24, # ('hand_tool', 46, task_cat: 25)
48: 23, # ('game_equipment', 47, task_cat: 26)
49: 27, # ('kitchenware', 48, task_cat: 27)
}
@attr.s(auto_attribs=True, kw_only=True)
class AgentStateSpec:
r"""Agent data specifications that capture states of agent and sensor in replay state.
"""
position: Optional[List[float]] = attr.ib(default=None)
rotation: Optional[List[float]] = attr.ib(default=None)
sensor_data: Optional[dict] = attr.ib(default=None)
@attr.s(auto_attribs=True, kw_only=True)
class ReplayActionSpec:
r"""Replay specifications that capture metadata associated with action.
"""
action: str = attr.ib(default=None, validator=not_none_validator)
agent_state: Optional[AgentStateSpec] = attr.ib(default=None)
@attr.s(auto_attribs=True, kw_only=True)
class ObjectGoalNavEpisode(NavigationEpisode):
r"""ObjectGoal Navigation Episode
:param object_category: Category of the obect
"""
object_category: Optional[str] = None
reference_replay: Optional[List[ReplayActionSpec]] = None
scene_state: Optional[List[SceneState]] = None
is_thda: Optional[bool] = False
scene_dataset: Optional[str] = "mp3d"
@property
def goals_key(self) -> str:
r"""The key to retrieve the goals"""
return f"{os.path.basename(self.scene_id)}_{self.object_category}"
@attr.s(auto_attribs=True)
class ObjectViewLocation:
r"""ObjectViewLocation provides information about a position around an object goal
usually that is navigable and the object is visible with specific agent
configuration that episode's dataset was created.
that is target for
navigation. That can be specify object_id, position and object
category. An important part for metrics calculation are view points that
describe success area for the navigation.
Args:
agent_state: navigable AgentState with a position and a rotation where
the object is visible.
iou: an intersection of a union of the object and a rectangle in the
center of view. This metric is used to evaluate how good is the object
view form current position. Higher iou means better view, iou equals
1.0 if whole object is inside of the rectangle and no pixel inside
the rectangle belongs to anything except the object.
"""
agent_state: AgentState
iou: Optional[float]
@attr.s(auto_attribs=True, kw_only=True)
class ObjectGoal(NavigationGoal):
r"""Object goal provides information about an object that is target for
navigation. That can be specify object_id, position and object
category. An important part for metrics calculation are view points that
describe success area for the navigation.
Args:
object_id: id that can be used to retrieve object from the semantic
scene annotation
object_name: name of the object
object_category: object category name usually similar to scene semantic
categories
room_id: id of a room where object is located, can be used to retrieve
room from the semantic scene annotation
room_name: name of the room, where object is located
view_points: navigable positions around the object with specified
proximity of the object surface used for navigation metrics calculation.
The object is visible from these positions.
"""
object_id: str = attr.ib(default=None, validator=not_none_validator)
object_name: Optional[str] = None
object_name_id: Optional[int] = None
object_category: Optional[str] = None
room_id: Optional[str] = None
room_name: Optional[str] = None
view_points: Optional[List[ObjectViewLocation]] = None
@registry.register_sensor
class ObjectGoalSensor(Sensor):
r"""A sensor for Object Goal specification as observations which is used in
ObjectGoal Navigation. The goal is expected to be specified by object_id or
semantic category id.
For the agent in simulator the forward direction is along negative-z.
In polar coordinate format the angle returned is azimuth to the goal.
Args:
sim: a reference to the simulator for calculating task observations.
config: a config for the ObjectGoalSensor sensor. Can contain field
GOAL_SPEC that specifies which id use for goal specification,
GOAL_SPEC_MAX_VAL the maximum object_id possible used for
observation space definition.
dataset: a Object Goal navigation dataset that contains dictionaries
of categories id to text mapping.
"""
cls_uuid: str = "objectgoal"
def __init__(
self,
sim,
config: Config,
dataset: "ObjectNavDatasetV1",
*args: Any,
**kwargs: Any,
):
self._sim = sim
self._dataset = dataset
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.SEMANTIC
def _get_observation_space(self, *args: Any, **kwargs: Any):
sensor_shape = (1,)
max_value = self.config.GOAL_SPEC_MAX_VAL - 1
if self.config.GOAL_SPEC == "TASK_CATEGORY_ID":
max_value = max(
self._dataset.category_to_task_category_id.values()
)
logger.info("max object cat: {}".format(max_value))
logger.info("cats: {}".format(self._dataset.category_to_task_category_id.values()))
return spaces.Box(
low=0, high=max_value, shape=sensor_shape, dtype=np.int64
)
def get_observation(
self,
observations,
*args: Any,
episode: ObjectGoalNavEpisode,
**kwargs: Any,
) -> Optional[int]:
if len(episode.goals) == 0:
logger.error(
f"No goal specified for episode {episode.episode_id}."
)
return None
if not isinstance(episode.goals[0], ObjectGoal):
logger.error(
f"First goal should be ObjectGoal, episode {episode.episode_id}."
)
return None
category_name = episode.object_category
if self.config.GOAL_SPEC == "TASK_CATEGORY_ID":
return np.array(
[self._dataset.category_to_task_category_id[category_name]],
dtype=np.int64,
)
elif self.config.GOAL_SPEC == "OBJECT_ID":
obj_goal = episode.goals[0]
assert isinstance(obj_goal, ObjectGoal) # for type checking
return np.array([obj_goal.object_name_id], dtype=np.int64)
else:
raise RuntimeError(
"Wrong GOAL_SPEC specified for ObjectGoalSensor."
)
@registry.register_task(name="ObjectNav-v1")
class ObjectNavigationTask(NavigationTask):
r"""An Object Navigation Task class for a task specific methods.
Used to explicitly state a type of the task in config.
"""
_is_episode_active: bool
_prev_action: int
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._is_episode_active = False
def overwrite_sim_config(self, sim_config, episode):
super().overwrite_sim_config(sim_config, episode)
sim_config.defrost()
sim_config.scene_state = episode.scene_state
sim_config.freeze()
return sim_config
def _check_episode_is_active(self, action, *args: Any, **kwargs: Any) -> bool:
return not getattr(self, "is_stop_called", False)
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.23.6
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V2beta1MetricSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'container_resource': 'V2beta1ContainerResourceMetricSource',
'external': 'V2beta1ExternalMetricSource',
'object': 'V2beta1ObjectMetricSource',
'pods': 'V2beta1PodsMetricSource',
'resource': 'V2beta1ResourceMetricSource',
'type': 'str'
}
attribute_map = {
'container_resource': 'containerResource',
'external': 'external',
'object': 'object',
'pods': 'pods',
'resource': 'resource',
'type': 'type'
}
def __init__(self, container_resource=None, external=None, object=None, pods=None, resource=None, type=None, local_vars_configuration=None): # noqa: E501
"""V2beta1MetricSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._container_resource = None
self._external = None
self._object = None
self._pods = None
self._resource = None
self._type = None
self.discriminator = None
if container_resource is not None:
self.container_resource = container_resource
if external is not None:
self.external = external
if object is not None:
self.object = object
if pods is not None:
self.pods = pods
if resource is not None:
self.resource = resource
self.type = type
@property
def container_resource(self):
"""Gets the container_resource of this V2beta1MetricSpec. # noqa: E501
:return: The container_resource of this V2beta1MetricSpec. # noqa: E501
:rtype: V2beta1ContainerResourceMetricSource
"""
return self._container_resource
@container_resource.setter
def container_resource(self, container_resource):
"""Sets the container_resource of this V2beta1MetricSpec.
:param container_resource: The container_resource of this V2beta1MetricSpec. # noqa: E501
:type container_resource: V2beta1ContainerResourceMetricSource
"""
self._container_resource = container_resource
@property
def external(self):
"""Gets the external of this V2beta1MetricSpec. # noqa: E501
:return: The external of this V2beta1MetricSpec. # noqa: E501
:rtype: V2beta1ExternalMetricSource
"""
return self._external
@external.setter
def external(self, external):
"""Sets the external of this V2beta1MetricSpec.
:param external: The external of this V2beta1MetricSpec. # noqa: E501
:type external: V2beta1ExternalMetricSource
"""
self._external = external
@property
def object(self):
"""Gets the object of this V2beta1MetricSpec. # noqa: E501
:return: The object of this V2beta1MetricSpec. # noqa: E501
:rtype: V2beta1ObjectMetricSource
"""
return self._object
@object.setter
def object(self, object):
"""Sets the object of this V2beta1MetricSpec.
:param object: The object of this V2beta1MetricSpec. # noqa: E501
:type object: V2beta1ObjectMetricSource
"""
self._object = object
@property
def pods(self):
"""Gets the pods of this V2beta1MetricSpec. # noqa: E501
:return: The pods of this V2beta1MetricSpec. # noqa: E501
:rtype: V2beta1PodsMetricSource
"""
return self._pods
@pods.setter
def pods(self, pods):
"""Sets the pods of this V2beta1MetricSpec.
:param pods: The pods of this V2beta1MetricSpec. # noqa: E501
:type pods: V2beta1PodsMetricSource
"""
self._pods = pods
@property
def resource(self):
"""Gets the resource of this V2beta1MetricSpec. # noqa: E501
:return: The resource of this V2beta1MetricSpec. # noqa: E501
:rtype: V2beta1ResourceMetricSource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V2beta1MetricSpec.
:param resource: The resource of this V2beta1MetricSpec. # noqa: E501
:type resource: V2beta1ResourceMetricSource
"""
self._resource = resource
@property
def type(self):
"""Gets the type of this V2beta1MetricSpec. # noqa: E501
type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
:return: The type of this V2beta1MetricSpec. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V2beta1MetricSpec.
type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
:param type: The type of this V2beta1MetricSpec. # noqa: E501
:type type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta1MetricSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2beta1MetricSpec):
return True
return self.to_dict() != other.to_dict()
|
_game_fields = [
'cover.image_id',
'first_release_date',
'genres.name',
'involved_companies.developer',
'involved_companies.publisher',
'involved_companies.company.country',
'involved_companies.company.name',
'name',
'platforms.name',
'screenshots.image_id',
'slug',
'summary',
'time_to_beat.normally',
'themes.name',
]
_search_fields = [
'first_release_date',
'name',
'slug',
]
_popular_fields = [
'cover.image_id',
'name',
'popularity',
]
_backdrop_fields = [
'name',
'screenshots.image_id',
'slug',
]
game_fields = ','.join(_game_fields)
search_fields = ','.join(_search_fields)
popular_fields = ','.join(_popular_fields)
backdrop_fields = ','.join(_backdrop_fields)
|
from flask import Flask, render_template, request, jsonify
from pyecharts import options as opts
from pyecharts.charts import Graph
import json
import redis
from flask_cors import *
r = redis.Redis(host="127.0.0.1", port=6379)
app = Flask(__name__)
CORS(app, supports_credentials=True)
@app.route("/dockermsg", methods=["POST"])
def dockerMsg():
data = request.json
host = data["host"]
datalist = data["data"]
# print(datalist)
r.set(host, json.dumps(datalist))
return "ok"
@app.route("/getdockermsg", methods=["GET"])
def getDockerMsg():
host = request.args.get("host")
docker = request.args.get("dockerdata")
dockers = json.loads(r.get(host))
tar = None
# print(dockers)
for doc in dockers:
print(doc["NetworkSettings"]["Networks"]["bridge"]["IPAddress"], docker)
if docker == doc["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]:
tar = doc
break
print(tar)
return jsonify(tar)
def graph_base() -> Graph:
nodes = []
links = []
categories = [
{"symbol": "circle", 'name': 'ryu'},
{"symbol": "diamond", 'name': 'host'},
{"symbol": "roundRect", 'name': 'dockerdata'},
]
ryu = opts.GraphNode(name="RYU", symbol_size=40, category=0) # symbol='roundRect'
nodes.append(ryu)
doc_id = 1
for key in r.keys():
host = opts.GraphNode(name=key, symbol_size=30, category=1) # symbol='diamond'
nodes.append(host)
ryuHostLink = opts.GraphLink(source="RYU", target=key)
links.append(ryuHostLink)
dockerlist = json.loads(r.get(key))
for doc in dockerlist:
docName = doc["Names"][0]
docInfo = str(key, encoding='utf-8') + '/' + doc["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
new_node = opts.GraphNode(name=str(doc_id) + docName, symbol_size=20, category=2, value=docInfo)
nodes.append(new_node)
hostDocLink = opts.GraphLink(source=key, target=str(doc_id) + docName)
links.append(hostDocLink)
doc_id += 1
linestyle_opts = opts.LineStyleOpts(is_show=True,
width=2,
curve=0.1,
type_="solid",
color="orange",
)
g = (
Graph()
.add("", nodes, links, repulsion=1000, categories=categories,
label_opts=opts.LabelOpts(is_show=True, position="left", color='white'),
linestyle_opts=linestyle_opts)
.set_global_opts(title_opts=opts.TitleOpts(title=""))
)
return g
@app.route("/graphchart", methods=["GET"])
def get_bar_chart():
c = graph_base()
return c.dump_options_with_quotes()
if __name__ == '__main__':
app.run(host="127.0.0.1", port=5000, debug=True)
|
# Generated by Django 3.0.3 on 2020-06-09 08:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0009_user_username'),
]
operations = [
migrations.CreateModel(
name='RequestLogs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_address', models.CharField(blank=True, max_length=255)),
('user_id', models.IntegerField(blank=True)),
('method_type', models.CharField(blank=True, max_length=50)),
('request_path', models.CharField(blank=True, max_length=255)),
('response_code', models.CharField(blank=True, max_length=15)),
],
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.