text
stringlengths 1
93.6k
|
|---|
# Code for testing the variational Multi-Stage Generative Model. #
|
##################################################################
|
from __future__ import print_function, division
|
# basic python
|
import cPickle as pickle
|
from PIL import Image
|
import numpy as np
|
import numpy.random as npr
|
from collections import OrderedDict
|
import time
|
# theano business
|
import theano
|
import theano.tensor as T
|
# blocks stuff
|
from blocks.initialization import Constant, IsotropicGaussian, Orthogonal
|
from blocks.filter import VariableFilter
|
from blocks.graph import ComputationGraph
|
from blocks.roles import PARAMETER
|
from blocks.model import Model
|
from blocks.bricks import Tanh, Identity, Rectifier, MLP
|
from blocks.bricks.cost import BinaryCrossEntropy
|
from blocks.bricks.recurrent import SimpleRecurrent, LSTM
|
# phil's sweetness
|
import utils
|
from BlocksModels import *
|
from RAMBlocks import *
|
from SeqCondGenVariants import *
|
from DKCode import get_adam_updates, get_adadelta_updates
|
from load_data import load_udm, load_tfd, load_svhn_gray, load_binarized_mnist
|
from HelperFuncs import sample_data_masks, shift_and_scale_into_01, \
|
row_shuffle, to_fX, one_hot_np
|
from MotionRenderers import TrajectoryGenerator, ObjectPainter
|
RESULT_PATH = "RAM_TEST_RESULTS/"
|
###########################################
|
###########################################
|
## ##
|
## Test attention-based image "copying". ##
|
## ##
|
###########################################
|
###########################################
|
def test_seq_cond_gen_copy(step_type='add', res_tag="AAA"):
|
##############################
|
# File tag, for output stuff #
|
##############################
|
result_tag = "{}TEST_{}".format(RESULT_PATH, res_tag)
|
##########################
|
# Get some training data #
|
##########################
|
rng = np.random.RandomState(1234)
|
dataset = 'data/mnist.pkl.gz'
|
datasets = load_udm(dataset, as_shared=False, zero_mean=False)
|
Xtr = datasets[0][0]
|
Xva = datasets[1][0]
|
Xte = datasets[2][0]
|
# merge validation set and training set, and test on test set.
|
#Xtr = np.concatenate((Xtr, Xva), axis=0)
|
#Xva = Xte
|
Xtr = to_fX(shift_and_scale_into_01(Xtr))
|
Xva = to_fX(shift_and_scale_into_01(Xva))
|
# basic params
|
batch_size = 128
|
traj_len = 20
|
im_dim = 28
|
obs_dim = im_dim*im_dim
|
def sample_batch(np_ary, bs=100):
|
row_count = np_ary.shape[0]
|
samp_idx = npr.randint(low=0,high=row_count,size=(bs,))
|
xb = np_ary.take(samp_idx, axis=0)
|
return xb
|
############################################################
|
# Setup some parameters for the Iterative Refinement Model #
|
############################################################
|
total_steps = traj_len
|
init_steps = 5
|
exit_rate = 0.1
|
nll_weight = 0.0
|
x_dim = obs_dim
|
y_dim = obs_dim
|
z_dim = 128
|
att_spec_dim = 5
|
rnn_dim = 512
|
mlp_dim = 512
|
def visualize_attention(result, pre_tag="AAA", post_tag="AAA"):
|
seq_len = result[0].shape[0]
|
samp_count = result[0].shape[1]
|
# get generated predictions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.