text
stringlengths
1
93.6k
# module for doing local 2d read defined by an attention specification
img_scale = 1.0 # image coords will range over [-img_scale...img_scale]
read_N = 2 # use NxN grid for reader
reader_mlp = FovAttentionReader2d(x_dim=obs_dim,
width=im_dim, height=im_dim, N=read_N,
img_scale=img_scale, att_scale=0.5,
**inits)
read_dim = reader_mlp.read_dim # total number of "pixels" read by reader
# MLP for updating belief state based on con_rnn
writer_mlp = MLP([None, None], [rnn_dim, mlp_dim, obs_dim], \
name="writer_mlp", **inits)
# mlps for processing inputs to LSTMs
con_mlp_in = MLP([Identity()], \
[ z_dim, 4*rnn_dim], \
name="con_mlp_in", **inits)
var_mlp_in = MLP([Identity()], \
[(read_dim + read_dim + att_spec_dim + rnn_dim), 4*rnn_dim], \
name="var_mlp_in", **inits)
gen_mlp_in = MLP([Identity()], \
[ (read_dim + att_spec_dim + rnn_dim), 4*rnn_dim], \
name="gen_mlp_in", **inits)
# mlps for turning LSTM outputs into conditionals over z_gen
con_mlp_out = CondNet([], [rnn_dim, att_spec_dim], \
name="con_mlp_out", **inits)
gen_mlp_out = CondNet([], [rnn_dim, z_dim], name="gen_mlp_out", **inits)
var_mlp_out = CondNet([], [rnn_dim, z_dim], name="var_mlp_out", **inits)
# LSTMs for the actual LSTMs (obviously, perhaps)
con_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
name="con_rnn", **rnninits)
gen_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
name="gen_rnn", **rnninits)
var_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
name="var_rnn", **rnninits)
SCG = SeqCondGenIMP(
x_and_y_are_seqs=False,
total_steps=total_steps,
init_steps=init_steps,
exit_rate=exit_rate,
nll_weight=nll_weight,
step_type=step_type,
x_dim=obs_dim,
y_dim=obs_dim,
reader_mlp=reader_mlp,
writer_mlp=writer_mlp,
con_mlp_in=con_mlp_in,
con_mlp_out=con_mlp_out,
con_rnn=con_rnn,
gen_mlp_in=gen_mlp_in,
gen_mlp_out=gen_mlp_out,
gen_rnn=gen_rnn,
var_mlp_in=var_mlp_in,
var_mlp_out=var_mlp_out,
var_rnn=var_rnn,
att_noise=0.1)
SCG.initialize()
compile_start_time = time.time()
# build the attention trajectory sampler
SCG.build_attention_funcs()
# quick test of attention trajectory sampler
Xb = sample_batch(Xtr, bs=32)
Mb = sample_data_masks(Xb, drop_prob=drop_prob, occ_dim=occ_dim)
result = SCG.sample_attention(Xb, Mb)
visualize_attention(result, pre_tag=result_tag, post_tag="b0")
# build the main model functions (i.e. training and cost functions)
SCG.build_model_funcs()
compile_end_time = time.time()
compile_minutes = (compile_end_time - compile_start_time) / 60.0
print("THEANO COMPILE TIME (MIN): {}".format(compile_minutes))
# TEST SAVE/LOAD FUNCTIONALITY
param_save_file = "{}_params.pkl".format(result_tag)
SCG.save_model_params(param_save_file)
SCG.load_model_params(param_save_file)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
print("Beginning to train the model...")
out_file = open("{}_results.txt".format(result_tag), 'wb')
out_file.flush()
costs = [0. for i in range(10)]
learn_rate = 0.0001
momentum = 0.95
for i in range(250000):
lr_scale = min(1.0, ((i+1) / 5000.0))
mom_scale = min(1.0, ((i+1) / 10000.0))
if (((i + 1) % 10000) == 0):
learn_rate = learn_rate * 0.95
# set sgd and objective function hyperparams for this update
SCG.set_sgd_params(lr=lr_scale*learn_rate, mom_1=mom_scale*momentum, mom_2=0.99)