text
stringlengths
1
93.6k
def test_seq_cond_gen_impute(step_type='add', res_tag="AAA"):
##############################
# File tag, for output stuff #
##############################
result_tag = "{}TEST_{}".format(RESULT_PATH, res_tag)
##########################
# Get some training data #
##########################
rng = np.random.RandomState(1234)
dataset = 'data/mnist.pkl.gz'
datasets = load_udm(dataset, as_shared=False, zero_mean=False)
Xtr = datasets[0][0]
Xva = datasets[1][0]
Xte = datasets[2][0]
# merge validation set and training set, and test on test set.
#Xtr = np.concatenate((Xtr, Xva), axis=0)
#Xva = Xte
Xtr = to_fX(shift_and_scale_into_01(Xtr))
Xva = to_fX(shift_and_scale_into_01(Xva))
# basic params
drop_prob = 0.0
occ_dim = 16
batch_size = 128
traj_len = 15
im_dim = 28
obs_dim = im_dim*im_dim
def sample_batch(np_ary, bs=100):
row_count = np_ary.shape[0]
samp_idx = npr.randint(low=0,high=row_count,size=(bs,))
xb = np_ary.take(samp_idx, axis=0)
return xb
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
total_steps = traj_len
init_steps = 3
exit_rate = 0.2
nll_weight = 0.0
x_dim = obs_dim
y_dim = obs_dim
z_dim = 128
att_spec_dim = 5
rnn_dim = 512
mlp_dim = 512
def visualize_attention(result, pre_tag="AAA", post_tag="AAA"):
seq_len = result[0].shape[0]
samp_count = result[0].shape[1]
# get generated predictions
x_samps = np.zeros((seq_len*samp_count, obs_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
x_samps[idx] = result[0][s2,s1,:]
idx += 1
file_name = "{0:s}_traj_xs_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(x_samps, file_name, num_rows=samp_count)
# get sequential attention maps
seq_samps = np.zeros((seq_len*samp_count, obs_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = result[1][s2,s1,:]
idx += 1
file_name = "{0:s}_traj_att_maps_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(seq_samps, file_name, num_rows=samp_count)
# get sequential attention maps (read out values)
seq_samps = np.zeros((seq_len*samp_count, obs_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = result[2][s2,s1,:]
idx += 1
file_name = "{0:s}_traj_read_outs_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(seq_samps, file_name, num_rows=samp_count)
# get original input sequences
seq_samps = np.zeros((seq_len*samp_count, obs_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = result[3][s2,s1,:]
idx += 1
file_name = "{0:s}_traj_xs_in_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(seq_samps, file_name, num_rows=samp_count)
return
rnninits = {
'weights_init': IsotropicGaussian(0.01),
'biases_init': Constant(0.),
}
inits = {
'weights_init': IsotropicGaussian(0.01),
'biases_init': Constant(0.),
}