text
stringlengths 1
93.6k
|
|---|
x_samps = np.zeros((seq_len*samp_count, obs_dim))
|
idx = 0
|
for s1 in range(samp_count):
|
for s2 in range(seq_len):
|
x_samps[idx] = result[0][s2,s1,:]
|
idx += 1
|
file_name = "{0:s}_traj_xs_{1:s}.png".format(pre_tag, post_tag)
|
utils.visualize_samples(x_samps, file_name, num_rows=samp_count)
|
# get sequential attention maps
|
seq_samps = np.zeros((seq_len*samp_count, obs_dim))
|
idx = 0
|
for s1 in range(samp_count):
|
for s2 in range(seq_len):
|
seq_samps[idx] = result[1][s2,s1,:]
|
idx += 1
|
file_name = "{0:s}_traj_att_maps_{1:s}.png".format(pre_tag, post_tag)
|
utils.visualize_samples(seq_samps, file_name, num_rows=samp_count)
|
# get sequential attention maps (read out values)
|
seq_samps = np.zeros((seq_len*samp_count, obs_dim))
|
idx = 0
|
for s1 in range(samp_count):
|
for s2 in range(seq_len):
|
seq_samps[idx] = result[2][s2,s1,:]
|
idx += 1
|
file_name = "{0:s}_traj_read_outs_{1:s}.png".format(pre_tag, post_tag)
|
utils.visualize_samples(seq_samps, file_name, num_rows=samp_count)
|
# get original input sequences
|
seq_samps = np.zeros((seq_len*samp_count, obs_dim))
|
idx = 0
|
for s1 in range(samp_count):
|
for s2 in range(seq_len):
|
seq_samps[idx] = result[3][s2,s1,:]
|
idx += 1
|
file_name = "{0:s}_traj_xs_in_{1:s}.png".format(pre_tag, post_tag)
|
utils.visualize_samples(seq_samps, file_name, num_rows=samp_count)
|
return
|
rnninits = {
|
'weights_init': IsotropicGaussian(0.01),
|
'biases_init': Constant(0.),
|
}
|
inits = {
|
'weights_init': IsotropicGaussian(0.01),
|
'biases_init': Constant(0.),
|
}
|
# module for doing local 2d read defined by an attention specification
|
img_scale = 1.0 # image coords will range over [-img_scale...img_scale]
|
read_N = 2 # use NxN grid for reader
|
reader_mlp = FovAttentionReader2d(x_dim=obs_dim,
|
width=im_dim, height=im_dim, N=read_N,
|
img_scale=img_scale, att_scale=0.5,
|
**inits)
|
read_dim = reader_mlp.read_dim # total number of "pixels" read by reader
|
# MLP for updating belief state based on con_rnn
|
writer_mlp = MLP([None, None], [rnn_dim, mlp_dim, obs_dim], \
|
name="writer_mlp", **inits)
|
# mlps for processing inputs to LSTMs
|
con_mlp_in = MLP([Identity()], \
|
[ z_dim, 4*rnn_dim], \
|
name="con_mlp_in", **inits)
|
var_mlp_in = MLP([Identity()], \
|
[(read_dim + read_dim + att_spec_dim + rnn_dim), 4*rnn_dim], \
|
name="var_mlp_in", **inits)
|
gen_mlp_in = MLP([Identity()], \
|
[ (read_dim + att_spec_dim + rnn_dim), 4*rnn_dim], \
|
name="gen_mlp_in", **inits)
|
# mlps for turning LSTM outputs into conditionals over z_gen
|
con_mlp_out = CondNet([], [rnn_dim, att_spec_dim], \
|
name="con_mlp_out", **inits)
|
gen_mlp_out = CondNet([], [rnn_dim, z_dim], name="gen_mlp_out", **inits)
|
var_mlp_out = CondNet([], [rnn_dim, z_dim], name="var_mlp_out", **inits)
|
# LSTMs for the actual LSTMs (obviously, perhaps)
|
con_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
|
name="con_rnn", **rnninits)
|
gen_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
|
name="gen_rnn", **rnninits)
|
var_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
|
name="var_rnn", **rnninits)
|
SCG = SeqCondGenRAM(
|
x_and_y_are_seqs=False,
|
total_steps=total_steps,
|
init_steps=init_steps,
|
exit_rate=exit_rate,
|
nll_weight=nll_weight,
|
step_type=step_type,
|
x_dim=obs_dim,
|
y_dim=obs_dim,
|
reader_mlp=reader_mlp,
|
writer_mlp=writer_mlp,
|
con_mlp_in=con_mlp_in,
|
con_mlp_out=con_mlp_out,
|
con_rnn=con_rnn,
|
gen_mlp_in=gen_mlp_in,
|
gen_mlp_out=gen_mlp_out,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.