INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
General optimization function for Theano. Parameters: params - parameters gradients - gradients config - training config Returns: Theano updates: type config: deepy. TrainerConfig or dict
def optimize_updates(params, gradients, config=None, shapes=None): """ General optimization function for Theano. Parameters: params - parameters gradients - gradients config - training config Returns: Theano updates :type config: deepy.TrainerConfig or dict """ if config and isinstance(config, dict): config = TrainerConfig(config) # Clipping if config: clip_value = config.get("gradient_clipping", None) if clip_value: clip_constant = T.constant(clip_value, dtype=FLOATX) if config.avoid_compute_embed_norm: grad_norm = multiple_l2_norm([t[1] for t in zip(params, gradients) if not t[0].name.startswith("W_embed")]) else: grad_norm = multiple_l2_norm(gradients) isnan = T.or_(T.isnan(grad_norm), T.isinf(grad_norm)) multiplier = ifelse(grad_norm < clip_constant, T.constant(1., dtype=FLOATX), clip_constant / (grad_norm + EPSILON)) # Clip clipped_gradients = [] for param, g in zip(params, gradients): g = multiplier * g if config.avoid_nan: g = T.switch(isnan, np.float32(0.1) * param, g) if config.gradient_tolerance: g = ifelse(grad_norm > config.gradient_tolerance, T.zeros_like(g) + EPSILON, g) clipped_gradients.append(g) gradients = clipped_gradients # Regularization if config and config.weight_l2: regularized_gradients = [] for param, grad in zip(params, gradients): grad = grad + (2 * config.weight_l2 * param) regularized_gradients.append(grad) gradients = regularized_gradients # Avoid nan but not computing the norm # This is not recommended if config and config.avoid_nan and not config.gradient_clipping: logging.info("avoid NaN gradients") new_gradients = [] for grad in gradients: new_grad = ifelse(T.isnan(grad).any(), T.zeros_like(grad) + EPSILON, grad) new_gradients.append(new_grad) gradients = new_gradients # Find method method = "SGD" if config: method = config.get("method", method).upper() # Get Function func = None if method in ["SGD", "ADAGRAD", "ADADELTA", "FINETUNING_ADAGRAD"]: from cores.ada_family import ada_family_core func = ada_family_core elif method == "ADAM": from cores.adam import adam_core func = adam_core elif method == "RMSPROP": from cores.rmsprop import rmsprop_core func = rmsprop_core elif method == "MOMENTUM": from cores.momentum import momentum_core func = momentum_core if not func: raise NotImplementedError("method '%s' is not supported" % method) logging.info("optimize method=%s parameters=%s" % (method, str(params))) free_parameters = [] return_vals = wrap_core(func, config, params, gradients) if type(return_vals) == list and type(return_vals[0]) == list: updates, free_parameters = return_vals else: updates = return_vals # No free param recording if config and not config.record_free_params: free_parameters = [] # Weight bound if config.weight_bound: logging.info("apply weight bound of %.2f" % config.weight_bound) new_updates = [] for param, update_value in updates: bounded_value = (update_value * (T.abs_(update_value) <= config.weight_bound) + config.weight_bound * (update_value > config.weight_bound) + -config.weight_bound * (update_value < -config.weight_bound)) new_updates.append((param, bounded_value)) updates = new_updates return updates, free_parameters
Create a optimizing function receives gradients. Parameters: params - parameters config - training configuration Returns: updating function receives gradients
def optimize_function(params, config=None): """ Create a optimizing function receives gradients. Parameters: params - parameters config - training configuration Returns: updating function receives gradients """ gs = [dim_to_var(p.ndim) for p in params] updates, _ = optimize_updates(params, gs, config) return theano.function(gs, [], updates=updates)
Return updates in the training.
def _learning_updates(self): """ Return updates in the training. """ params = self.training_params() gradients = self.get_gradients(params) return self.optimization_updates(params, gradients)
Get parameters to be optimized.
def training_params(self): """ Get parameters to be optimized. """ params = self.network.parameters # Freeze parameters if self.config.fixed_parameters: logging.info("fixed parameters: %s" % ", ".join(map(str, self.config.fixed_parameters))) params = [p for p in params if p not in self.config.fixed_parameters] return params
Return updates from optimization.
def optimization_updates(self, params, gradients): """ Return updates from optimization. """ updates, free_parameters = optimize_updates(params, gradients, self.config) self.network.free_parameters.extend(free_parameters) logging.info("Added %d free parameters for optimization" % len(free_parameters)) return updates
Get the learning function.: param func:: return:
def learning_function(self): """ Get the learning function. :param func: :return: """ network_updates = list(self.network.updates) + list(self.network.training_updates) learning_updates = list(self._learning_updates()) update_list = network_updates + learning_updates logging.info("network updates: %s" % " ".join(map(str, [x[0] for x in network_updates]))) logging.info("learning updates: %s" % " ".join(map(str, [x[0] for x in learning_updates]))) variables = self.network.input_variables + self.network.target_variables givens = None return theano.function( variables, map(lambda v: theano.Out(v, borrow=True), self.training_variables), updates=update_list, allow_input_downcast=True, mode=self.config.get("theano_mode", None), givens=givens)
Parameters: x_t - 28x28 image l_p - 2x1 focus vector Returns: 4x12 matrix
def _glimpse_sensor(self, x_t, l_p): """ Parameters: x_t - 28x28 image l_p - 2x1 focus vector Returns: 4x12 matrix """ # Turn l_p to the left-top point of rectangle l_p = l_p * 14 + 14 - 2 l_p = T.cast(T.round(l_p), "int32") l_p = l_p * (l_p >= 0) l_p = l_p * (l_p < 24) + (l_p >= 24) * 23 l_p2 = l_p - 2 l_p2 = l_p2 * (l_p2 >= 0) l_p2 = l_p2 * (l_p2 < 20) + (l_p2 >= 20) * 19 l_p3 = l_p - 6 l_p3 = l_p3 * (l_p3 >= 0) l_p3 = l_p3 * (l_p3 < 16) + (l_p3 >= 16) * 15 glimpse_1 = x_t[l_p[0]: l_p[0] + 4][:, l_p[1]: l_p[1] + 4] glimpse_2 = x_t[l_p2[0]: l_p2[0] + 8][:, l_p2[1]: l_p2[1] + 8] glimpse_2 = theano.tensor.signal.downsample.max_pool_2d(glimpse_2, (2,2)) glimpse_3 = x_t[l_p3[0]: l_p3[0] + 16][:, l_p3[1]: l_p3[1] + 16] glimpse_3 = theano.tensor.signal.downsample.max_pool_2d(glimpse_3, (4,4)) return T.concatenate([glimpse_1, glimpse_2, glimpse_3])
Parameters: x_t - 28x28 image l_p - 2x1 focus vector Returns: 7 * 14 matrix
def _refined_glimpse_sensor(self, x_t, l_p): """ Parameters: x_t - 28x28 image l_p - 2x1 focus vector Returns: 7*14 matrix """ # Turn l_p to the left-top point of rectangle l_p = l_p * 14 + 14 - 4 l_p = T.cast(T.round(l_p), "int32") l_p = l_p * (l_p >= 0) l_p = l_p * (l_p < 21) + (l_p >= 21) * 20 glimpse_1 = x_t[l_p[0]: l_p[0] + 7][:, l_p[1]: l_p[1] + 7] # glimpse_2 = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4)) # return T.concatenate([glimpse_1, glimpse_2]) return glimpse_1
Parameters: x_t - 28x28 image l_p - 2x1 focus vector Returns: 4x12 matrix
def _glimpse_network(self, x_t, l_p): """ Parameters: x_t - 28x28 image l_p - 2x1 focus vector Returns: 4x12 matrix """ sensor_output = self._refined_glimpse_sensor(x_t, l_p) sensor_output = T.flatten(sensor_output) h_g = self._relu(T.dot(sensor_output, self.W_g0)) h_l = self._relu(T.dot(l_p, self.W_g1)) g = self._relu(T.dot(h_g, self.W_g2_hg) + T.dot(h_l, self.W_g2_hl)) return g
Parameters: h_t - 256x1 vector Returns: 10x1 vector
def _action_network(self, h_t): """ Parameters: h_t - 256x1 vector Returns: 10x1 vector """ z = self._relu(T.dot(h_t, self.W_a) + self.B_a) return self._softmax(z)
Get baseline model. Parameters: model - model path Returns: network
def get_network(model=None, std=0.005, disable_reinforce=False, random_glimpse=False): """ Get baseline model. Parameters: model - model path Returns: network """ network = NeuralClassifier(input_dim=28 * 28) network.stack_layer(FirstGlimpseLayer(std=std, disable_reinforce=disable_reinforce, random_glimpse=random_glimpse)) if model and os.path.exists(model): network.load_params(model) return network
Compute first glimpse position using down - sampled image.
def _first_glimpse_sensor(self, x_t): """ Compute first glimpse position using down-sampled image. """ downsampled_img = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4)) downsampled_img = downsampled_img.flatten() first_l = T.dot(downsampled_img, self.W_f) if self.disable_reinforce: wf_grad = self.W_f if self.random_glimpse: first_l = self.srng.uniform((2,), low=-1.7, high=1.7) else: sampled_l_t = self._sample_gaussian(first_l, self.cov) sampled_pdf = self._multi_gaussian_pdf(disconnected_grad(sampled_l_t), first_l) wf_grad = T.grad(T.log(sampled_pdf), self.W_f) first_l = sampled_l_t return first_l, wf_grad
Parameters: x_t - 28x28 image l_p - 2x1 focus vector h_p - 256x1 vector Returns: h_t 256x1 vector
def _core_network(self, l_p, h_p, x_t): """ Parameters: x_t - 28x28 image l_p - 2x1 focus vector h_p - 256x1 vector Returns: h_t, 256x1 vector """ g_t = self._glimpse_network(x_t, l_p) h_t = self._tanh(T.dot(g_t, self.W_h_g) + T.dot(h_p, self.W_h) + self.B_h) l_t = self._location_network(h_t) if not self.disable_reinforce: sampled_l_t = self._sample_gaussian(l_t, self.cov) sampled_pdf = self._multi_gaussian_pdf(disconnected_grad(sampled_l_t), l_t) wl_grad = T.grad(T.log(sampled_pdf), self.W_l) else: sampled_l_t = l_t wl_grad = self.W_l if self.random_glimpse and self.disable_reinforce: sampled_l_t = self.srng.uniform((2,), low=-1.7, high=1.7) a_t = self._action_network(h_t) return sampled_l_t, h_t, a_t, wl_grad
All codes that create parameters should be put into setup function.
def prepare(self): """ All codes that create parameters should be put into 'setup' function. """ self.output_dim = 10 self.encoder = Chain(self.input_dim).stack(Dense(self.internal_layer_size, 'tanh')) self.decoder = Chain(self.internal_layer_size).stack(Dense(self.input_dim)) self.classifier = Chain(self.internal_layer_size).stack(Dense(50, 'tanh'), Dense(self.output_dim), Softmax()) self.register_inner_layers(self.encoder, self.decoder, self.classifier) self.target_input = T.ivector('target') self.register_external_inputs(self.target_input)
Build the computation graph here.
def compute_tensor(self, x): """ Build the computation graph here. """ internal_variable = self.encoder.compute_tensor(x) decoding_output = self.decoder.compute_tensor(internal_variable) classification_output = self.classifier.compute_tensor(internal_variable) auto_encoder_cost = AutoEncoderCost(decoding_output, x).get() classification_cost = CrossEntropyCost(classification_output, self.target_input).get() final_cost = 0.01 * auto_encoder_cost + classification_cost error_rate = ErrorRateCost(classification_output, self.target_input).get() self.register_monitors(("err", error_rate), ("encoder_cost", auto_encoder_cost), ("classify_cost", classification_cost)) return final_cost
Process all data with given function. The scheme of function should be x y - > x y.
def map(self, func): """ Process all data with given function. The scheme of function should be x,y -> x,y. """ if self._train_set: self._train_set = map(func, self._train_set) if self._valid_set: self._valid_set = map(func, self._valid_set) if self._test_set: self._test_set = map(func, self._test_set)
Make targets be one - hot vectors.
def vectorize_target(self, size): """ Make targets be one-hot vectors. """ if self._train_set: self._train_set = self._vectorize_set(self._train_set, size) if self._valid_set: self._valid_set = self._vectorize_set(self._valid_set, size) if self._test_set: self._test_set = self._vectorize_set(self._test_set, size)
Print dataset statistics.
def report(self): """ Print dataset statistics. """ logging.info("%s train=%d valid=%d test=%d" % (self.__class__.__name__, len(list(self._train_set)) if self._train_set else 0, len(list(self._valid_set)) if self._valid_set else 0, len(list(self._test_set)) if self._test_set else 0))
We train over mini - batches and evaluate periodically.
def train(self, train_set, valid_set=None, test_set=None, train_size=None): '''We train over mini-batches and evaluate periodically.''' iteration = 0 while True: if not iteration % self.config.test_frequency and test_set: try: self.test(iteration, test_set) except KeyboardInterrupt: logging.info('interrupted!') break if not iteration % self.validation_frequency and valid_set: try: if not self.evaluate(iteration, valid_set): logging.info('patience elapsed, bailing out') break except KeyboardInterrupt: logging.info('interrupted!') break train_message = "" try: train_message = self.train_func(train_set) except KeyboardInterrupt: logging.info('interrupted!') break if not iteration % self.config.monitor_frequency: logging.info('monitor (iter=%i) %s', iteration + 1, train_message) iteration += 1 if hasattr(self.network, "iteration_callback"): self.network.iteration_callback() yield train_message if valid_set: self.set_params(self.best_params) if test_set: self.test(0, test_set)
Sample outputs from LM.
def sample(self, input, steps): """ Sample outputs from LM. """ inputs = [[onehot(self.input_dim, x) for x in input]] for _ in range(steps): target = self.compute(inputs)[0,-1].argmax() input.append(target) inputs[0].append(onehot(self.input_dim, target)) return input
: param x: ( batch time vec )
def compute_tensor(self, x): """ :param x: (batch, time, vec) """ # Target class class_matrix = self.target_tensor // self.output_size class_vector = class_matrix.reshape((-1,)) # Target index target_matrix = self.target_tensor % self.output_size target_vector = target_matrix.reshape((-1,)) # Input matrix input_matrix = x.reshape((-1, self.input_dim)) # Output matrix output_tensor3d = self.output_layer.compute_tensor(x) output_matrix = output_tensor3d.reshape((-1, self.class_size, self.output_size)) arange_vec = self.arange_cache[:output_matrix.shape[0]] sub_output_matrix = output_matrix[arange_vec, class_vector] # Softmax softmax_output_matrix = self.softmax_layer.compute_tensor(sub_output_matrix) # Class prediction class_output_matrix = self.class_layer.compute_tensor(x) # Costs output_cost = LMCost(softmax_output_matrix, target_vector).get() class_cost = LMCost(class_output_matrix, class_matrix).get() final_cost = output_cost + class_cost return final_cost
Compute the alignment weights based on the previous state.
def compute_alignments(self, prev_state, precomputed_values, mask=None): """ Compute the alignment weights based on the previous state. """ WaSp = T.dot(prev_state, self.Wa) UaH = precomputed_values # For test time the UaH will be (time, output_dim) if UaH.ndim == 2: preact = WaSp[:, None, :] + UaH[None, :, :] else: preact = WaSp[:, None, :] + UaH act = T.activate(preact, 'tanh') align_scores = T.dot(act, self.Va) # ~ (batch, time) if mask: mask = (1 - mask) * -99.00 if align_scores.ndim == 3: align_scores += mask[None, :] else: align_scores += mask align_weights = T.nnet.softmax(align_scores) return align_weights
Compute the context vector with soft attention.
def compute_context_vector(self, prev_state, inputs, precomputed_values=None, mask=None): """ Compute the context vector with soft attention. """ precomputed_values = precomputed_values if precomputed_values else self.precompute(inputs) align_weights = self.compute_alignments(prev_state, precomputed_values, mask) context_vector = T.sum(align_weights[:, :, None] * inputs, axis=1) return context_vector
Train the model in multi - GPU environment.
def train(self, train_set, valid_set=None, test_set=None, train_size=None): """ Train the model in multi-GPU environment. """ from platoon.channel import Worker from platoon.param_sync import EASGD, ASGD server_port = self._port param_map = self.create_param_map() # Initialize the worker worker = Worker(control_port=server_port) if self.config.learning_rate: worker.send_req({'init_schedule': self._schedule_params}) self.sync_hyperparams(worker.send_req('sync_hyperparams')['sync_hyperparams']) easgd_alpha = worker.send_req('get_easgd_alpha') if self._using_easgd: self.logger.info("using EASGD with alpha={}".format(easgd_alpha)) else: self.logger.info("using ASGD rule") rule = EASGD(easgd_alpha) if self._using_easgd else ASGD() worker.init_shared_params(param_map.values(), param_sync_rule=rule) worker.send_req({ "set_names": None, "training_names": self.training_names, "evaluation_names": self.evaluation_names }) # Load all training batches, consume vast memory here self.logger.info("started process {}".format(os.getpid())) self.logger.info("(proc {}) load training data".format(os.getpid())) train_batches = list(train_set) network_callback = bool(self.network.training_callbacks) trainer_callback = bool(self._iter_controllers) # Start from valid, so the performance when a worked join can be known worker.copy_to_local() if valid_set: self._run_valid(self.epoch, valid_set, dry_run=True) self.fix_costs() worker.send_req({ "valid_done": None, "valid_costs": self.last_run_costs, "auto_save": self.config.auto_save }) worker.copy_to_local() # Begin the loop while True: resp = worker.send_req('next') if resp == 'stop': break elif resp == 'wait': time.sleep(1) elif resp == 'get_num_batches': worker.send_req({'get_num_batches_done': len(train_batches)}) elif 'eval' in resp: self.best_cost = resp['best_valid_cost'] worker.copy_to_local() valid_costs = None test_costs = None if valid_set: self._run_valid(self.epoch, valid_set) self.fix_costs() valid_costs = self.last_run_costs if test_set: self._run_test(self.epoch, test_set) self.fix_costs() test_costs = self.last_run_costs worker.send_req({ "eval_done": None, "valid_costs": valid_costs, "test_costs": test_costs, "auto_save": self.config.auto_save }) elif 'valid' in resp: self.best_cost = resp['best_valid_cost'] worker.copy_to_local() if valid_set: self._run_valid(self.epoch, valid_set, dry_run=True) self.fix_costs() worker.send_req({ "valid_done": None, "valid_costs": self.last_run_costs, "auto_save": self.config.auto_save }) elif 'train' in resp: batch_ids = resp['train'] batch_costs = [[] for _ in self.training_names] for batch_id in batch_ids: x = train_batches[batch_id] cost_x = self.learn(*x) for i, cost in enumerate(cost_x): batch_costs[i].append(cost) self.last_cost = cost_x[0] if network_callback: self.network.training_callback() if trainer_callback: for func in self._iter_controllers: func(self) worker.sync_params(synchronous=True) worker.send_req({'train_done': None, 'costs': [float(np.mean(c)) for c in batch_costs]}) elif 'sync_hyperparams' in resp: self.sync_hyperparams(resp['sync_hyperparams']) worker.close() return []
A utility function of concatenate.
def concatenate(vars, axis=-1): """ A utility function of concatenate. """ from deepy.core.neural_var import NeuralVariable if isinstance(vars[0], NeuralVariable): concat_var = Concatenate(axis=axis).compute(*vars) if axis == -1 or axis == vars[0].tensor.ndim - 1: concat_var.output_dim = sum([x.output_dim for x in vars], 0) else: concat_var = TT.concatenate(vars, axis) return concat_var
Wrap a Theano tensor into the variable for defining neural network.: param last_dim: last dimension of tensor 0 indicates that the last dimension is flexible: rtype: deepy. core. neural_var. NeuralVariable
def var(tensor_type, last_dim=0, test_shape=None): """ Wrap a Theano tensor into the variable for defining neural network. :param last_dim: last dimension of tensor, 0 indicates that the last dimension is flexible :rtype: deepy.core.neural_var.NeuralVariable """ # Create tensor from deepy.core.neural_var import NeuralVariable from deepy.core.env import env from theano.tensor.var import TensorVariable if isinstance(tensor_type, NeuralVariable): var = tensor_type if last_dim != 0: var.output_dim = last_dim elif isinstance(tensor_type, TensorVariable): var = NeuralVariable(tensor_type, dim=last_dim) elif isinstance(tensor_type, str): theano_tensor = getattr(TT, tensor_type)() var = NeuralVariable(theano_tensor, dim=last_dim) else: raise Exception("tensor_type shall be a string or a NeuralVariable") # Set test value if test_shape: if type(test_shape) != list and type(test_shape) != tuple: # May be it's a value var.set_test_value(test_shape) else: test_val = env.numpy_rand.rand(*test_shape) if len(test_shape) > 0: test_val = test_val.astype(var.tensor.dtype) elif var.tensor.dtype.startswith("int"): test_val = 1 var.set_test_value(test_val) else: # Create a general test_shape dims = [(d + 1) * 3 for d in range(var.tensor.ndim)] if var.dim() != 0: dims[-1] = var.dim() test_val = env.numpy_rand.rand(*dims) if len(dims) > 0: test_val = test_val.astype(var.tensor.dtype) elif var.tensor.dtype.startswith("int"): test_val = 1 var.set_test_value(test_val) return var
Pad sequences to given length in the left or right side.
def _pad(self, side, length): """ Pad sequences to given length in the left or right side. """ if self._train_set: self._train_set = pad_dataset(self._train_set, side, length) if self._valid_set: self._valid_set = pad_dataset(self._valid_set, side, length) if self._test_set: self._test_set = pad_dataset(self._test_set, side, length)
RMSPROP optimization core.
def rmsprop_core(params, gradients, momentum=0.9, learning_rate=0.01): """ RMSPROP optimization core. """ for param, grad in zip(params, gradients): rms_ = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_rms') rms = momentum * rms_ + (1 - momentum) * grad * grad yield rms_, rms yield param, param - learning_rate * grad / T.sqrt(rms + 1e-8)
Pad data set to specified length. Parameters: length - max length a just to the max length in the batch if length is - 1
def pad_dataset(subset, side="right", length=-1): """ Pad data set to specified length. Parameters: length - max length, a just to the max length in the batch if length is -1 """ assert length == -1 or length > 0 if type(subset[0][0][0]) in [float, int, np.int64, np.int32, np.float32]: return _pad_2d(subset, side, length) else: return _pad_3d(subset, side, length)
Prepare for one epoch. Returns: bool: False if to stop the training.
def prepare_epoch(self): """ Prepare for one epoch. Returns: bool: False if to stop the training. """ self.epoch += 1 if self.epoch >= self.epoch_start_halving and ((self.epoch - self.epoch_start_halving) % self._halving_freq == 0): self._lr *= 0.5 self._current_iter = 0 self._iters_from_last_valid = 0 self._train_costs = [] self.prepared_worker_pool.clear() self.batch_pool = range(self.num_train_batches) self.rand.shuffle(self.batch_pool) if self.epoch > self.end_at: self.log("Training is done, wait all workers to stop") return False else: self.log("start epoch {} with lr={}".format(self.epoch, self._lr)) return True
Handles a control_request received from a worker. Returns: string or dict: response
def handle_control(self, req, worker_id, req_info): """ Handles a control_request received from a worker. Returns: string or dict: response 'stop' - the worker should quit 'wait' - wait for 1 second 'eval' - evaluate on valid and test set to start a new epoch 'sync_hyperparams' - set learning rate 'valid' - evaluate on valid and test set, then save the params 'train' - train next batches """ if self.start_time is None: self.start_time = time.time() response = "" if req == 'next': if self.num_train_batches == 0: response = "get_num_batches" elif self._done: response = "stop" self.worker_is_done(worker_id) elif self._evaluating: response = 'wait' elif not self.batch_pool: # End of one iter if self._train_costs: with self._lock: sys.stdout.write("\r") sys.stdout.flush() mean_costs = [] for i in range(len(self._training_names)): mean_costs.append(np.mean([c[i] for c in self._train_costs])) self.log("train (epoch={:2d}) {}".format( self.epoch, self.get_monitor_string(zip(self._training_names, mean_costs))) ) response = {'eval': None, 'best_valid_cost': self._best_valid_cost} self._evaluating = True else: # Continue training if worker_id not in self.prepared_worker_pool: response = {"sync_hyperparams": self.feed_hyperparams()} self.prepared_worker_pool.add(worker_id) elif self._iters_from_last_valid >= self._valid_freq: response = {'valid': None, 'best_valid_cost': self._best_valid_cost} self._iters_from_last_valid = 0 else: response = {"train": self.feed_batches()} elif 'eval_done' in req: with self._lock: self._evaluating = False sys.stdout.write("\r") sys.stdout.flush() if 'test_costs' in req and req['test_costs']: self.log("test (epoch={:2d}) {} (worker {})".format( self.epoch, self.get_monitor_string(req['test_costs']), worker_id) ) if 'valid_costs' in req and req['test_costs']: valid_J = req['valid_costs'][0][1] if valid_J < self._best_valid_cost: self._best_valid_cost = valid_J star_str = "*" else: star_str = "" self.log("valid (epoch={:2d}) {} {} (worker {})".format( self.epoch, self.get_monitor_string(req['valid_costs']), star_str, worker_id)) # if star_str and 'auto_save' in req and req['auto_save']: # self.log("(worker {}) save the model to {}".format( # worker_id, # req['auto_save'] # )) continue_training = self.prepare_epoch() self._epoch_start_time = time.time() if not continue_training: self._done = True self.log("training time {:.4f}s".format(time.time() - self.start_time)) response = "stop" elif 'valid_done' in req: with self._lock: sys.stdout.write("\r") sys.stdout.flush() if 'valid_costs' in req: valid_J = req['valid_costs'][0][1] if valid_J < self._best_valid_cost: self._best_valid_cost = valid_J star_str = "*" else: star_str = "" self.log("valid ( dryrun ) {} {} (worker {})".format( self.get_monitor_string(req['valid_costs']), star_str, worker_id )) # if star_str and 'auto_save' in req and req['auto_save']: # self.log("(worker {}) save the model to {}".format( # worker_id, # req['auto_save'] # )) elif 'train_done' in req: costs = req['costs'] self._train_costs.append(costs) sys.stdout.write("\x1b[2K\r> %d%% | J=%.2f | %.1f batch/s" % ( self._current_iter * 100 / self.num_train_batches, costs[0], float(len(self._train_costs) * self.sync_freq) / (time.time() - self._epoch_start_time))) sys.stdout.flush() elif 'get_num_batches_done' in req: self.num_train_batches = req['get_num_batches_done'] elif 'get_easgd_alpha' in req: response = self._easgd_alpha elif 'sync_hyperparams' in req: response = {"sync_hyperparams": self.feed_hyperparams()} elif 'init_schedule' in req: with self._lock: sys.stdout.write("\r") sys.stdout.flush() self.log("worker {} connected".format(worker_id)) if self.epoch == 0: schedule_params = req['init_schedule'] sch_str = " ".join("{}={}".format(a, b) for (a, b) in schedule_params.items()) self.log("initialize the schedule with {}".format(sch_str)) for key, val in schedule_params.items(): if not val: continue if key == 'learning_rate': self._lr = val elif key == 'start_halving_at': self.epoch_start_halving = val elif key == 'halving_freq': self._halving_freq = val elif key == 'end_at': self.end_at = val elif key == 'sync_freq': self.sync_freq = val elif key == 'valid_freq': self._valid_freq = val elif 'set_names' in req: self._training_names = req['training_names'] self._evaluation_names = req['evaluation_names'] return response
Report elapsed time.
def report(self): """ Report elapsed time. """ if not self.end_time: self.end() print ("Time: {} mins".format((self.end_time - self.start_time )/ 60))
Compare to previous records and return whether the given cost is a new best.: return: True if the given cost is a new best
def compare(self, cost_map): """ Compare to previous records and return whether the given cost is a new best. :return: True if the given cost is a new best """ cri_val = cost_map[self._criteria] if self._best_criteria is None: self._best_criteria = cri_val return True else: if self._smaller_is_better and cri_val < self._best_criteria: self._best_criteria = cri_val return True elif not self._smaller_is_better and cri_val > self._best_criteria: self._best_criteria = cri_val return True else: return False
Run the model with validation data and return costs.
def run(self, data_x): """ Run the model with validation data and return costs. """ output_vars = self.compute(*data_x) return self._extract_costs(output_vars)
This function will be called after each iteration.
def invoke(self): """ This function will be called after each iteration. """ self._counter += 1 if self._counter % self._freq == 0: cnt = 0. sum_map = defaultdict(float) for x in self._trainer.get_data(self._data_split): val_map = self.run(x) if not isinstance(val_map, dict): raise Exception("Monitor.run must return a dict.") for k, val in val_map.items(): sum_map[k] += val cnt += 1 for k in sum_map: sum_map[k] /= cnt new_best = self.compare(sum_map) self._trainer.report(sum_map, self._data_split, new_best=new_best) if new_best: self._trainer.save_checkpoint(self._save_path)
Create inner loop variables.
def _build_loop_vars(self): """ Create inner loop variables. """ from theano.tensor.var import TensorVariable from deepy.core.neural_var import NeuralVariable if not self._loop_vars: self._ordered_out_keys = self._outputs.keys() seq_keys = self._sequences.keys() filled_out_keys = [k for k in self._ordered_out_keys if self._outputs[k]] nonseq_keys = self._non_sequences.keys() dummy_tensors, self._scan_local_vars = get_dummy_args( sequences=[self._sequences[k].tensor for k in seq_keys], outputs_info=[self._outputs[k].tensor for k in self._ordered_out_keys], non_sequences=[self._non_sequences[k].tensor for k in nonseq_keys], **self._kwargs ) dummy_map = dict(zip(seq_keys + filled_out_keys + nonseq_keys, dummy_tensors)) arg_map = self._sequences.copy() arg_map.update(self._outputs) arg_map.update(self._non_sequences) self._loop_vars = LoopVars() for k, dummy_tensor in dummy_map.items(): dummy_var = NeuralVariable(dummy_tensor, dim=arg_map[k].dim()) self._loop_vars[k] = dummy_var
Internal scan with dummy input variables.
def _scan_step(self, vars): """ Internal scan with dummy input variables. """ from neural_var import NeuralVariable if not self._loop_vars: raise Exception("The loop is not initialized. To initialize the loop, use `with loop as vars`") replace_map = {} for k, var in vars.items(): if var is not None: replace_map[self._dummy_nodes[k].tensor] = var.tensor outputs = {} for k in self._outputs: if k not in self._loop_vars: raise Exception("{} can not be found in loop vars.".format(k)) output_node = theano.clone(self._loop_vars[k].tensor, replace_map) outputs[k] = NeuralVariable(output_node, self._loop_vars[k].dim()) return outputs
Get the outputs of the loop. Return specific variables by passing the keys to the arguments.: rtype: MapDict
def get_outputs(self, *args): """ Get the outputs of the loop. Return specific variables by passing the keys to the arguments. :rtype: MapDict """ if args: output_vars = map(self._scan_outputs.get, args) if len(output_vars) == 1: return output_vars[0] else: return output_vars else: return self._scan_outputs
Momentum SGD optimization core.
def momentum_core(params, gradients, momentum=0.9, learning_rate=0.01): """ Momentum SGD optimization core. """ free_parameters = [] updates = [] for param, grad in zip(params, gradients): delta = learning_rate * grad velocity = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_vel') updates.append((velocity, momentum * velocity - delta)) updates.append((param, param + velocity)) free_parameters.append(velocity) return updates, free_parameters
Execute then_branch when training.
def iftrain(self, then_branch, else_branch): """ Execute `then_branch` when training. """ return ifelse(self._training_flag, then_branch, else_branch, name="iftrain")
Switch training mode.: param flag: switch on training mode when flag is True.
def switch_training(self, flag): """ Switch training mode. :param flag: switch on training mode when flag is True. """ if self._is_training == flag: return self._is_training = flag if flag: self._training_flag.set_value(1) else: self._training_flag.set_value(0)
Nesterov s Accelerated Gradient ( NAG ). See http:// www. cs. toronto. edu/ ~fritz/ absps/ momentum. pdf. Still unfinished
def nag_core(params, J, momentum=0.9, learning_rate=0.01): """ Nesterov's Accelerated Gradient (NAG). See http://www.cs.toronto.edu/~fritz/absps/momentum.pdf . Still unfinished """ # TODO: this requires some refractorings. for param in params: step = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_step') velocity = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_vel') yield step, momentum * velocity yield param, param + step yield velocity, step - learning_rate * T.grad(J, param) yield param, param + velocity - step
Skip N batches in the training.
def skip(self, n_batches, n_epochs=0): """ Skip N batches in the training. """ logging.info("skip %d epochs and %d batches" % (n_epochs, n_batches)) self._skip_batches = n_batches self._skip_epochs = n_epochs
Load parameters for the training. This method can load free parameters and resume the training progress.
def load_params(self, path, exclude_free_params=False): """ Load parameters for the training. This method can load free parameters and resume the training progress. """ self.network.load_params(path, exclude_free_params=exclude_free_params) self.best_params = self.copy_params() # Resume the progress if self.network.train_logger.progress() > 0 or self.network.train_logger.epoch() > 0: self.skip(self.network.train_logger.progress(), self.network.train_logger.epoch() - 1)
Add iteration callbacks function ( receives an argument of the trainer ).: param controllers: can be a TrainingController or a function.: type funcs: list of TrainingContoller
def add_iter_controllers(self, *controllers): """ Add iteration callbacks function (receives an argument of the trainer). :param controllers: can be a `TrainingController` or a function. :type funcs: list of TrainingContoller """ for controller in controllers: if isinstance(controller, TrainingController): controller.bind(self) self._iter_controllers.append(controller)
Add epoch callbacks function.: param controllers: can be a TrainingController or a function.
def add_epoch_controllers(self, *controllers): """ Add epoch callbacks function. :param controllers: can be a `TrainingController` or a function. """ for controller in controllers: if isinstance(controller, TrainingController): controller.bind(self) self._epoch_controllers.append(controller)
Train the model and return costs.
def train(self, train_set, valid_set=None, test_set=None, train_size=None): """ Train the model and return costs. """ self._epoch = 0 while True: if self._skip_epochs > 0: logging.info("skipping one epoch ...") self._skip_epochs -= 1 self._epoch += 1 yield None continue # Test if not self._epoch % self.config.test_frequency and test_set: try: self._run_test(self._epoch, test_set) except KeyboardInterrupt: logging.info('interrupted!') break # Validate if not self._epoch % self.validation_frequency and valid_set: try: if not self._run_valid(self._epoch, valid_set): logging.info('patience elapsed, bailing out') break except KeyboardInterrupt: logging.info('interrupted!') break # Train one step try: costs = self._run_train(self._epoch, train_set, train_size) except KeyboardInterrupt: logging.info('interrupted!') break # Check costs if np.isnan(costs[0][1]): logging.info("NaN detected in costs, rollback to last parameters") self.set_params(*self.checkpoint) else: self._epoch += 1 self.network.epoch_callback() yield dict(costs) if valid_set and self.config.get("save_best_parameters", True): self.set_params(*self.best_params) if test_set: self._run_test(-1, test_set)
Run one training iteration.
def _run_train(self, epoch, train_set, train_size=None): """ Run one training iteration. """ self.network.train_logger.record_epoch(epoch + 1) costs = self.train_step(train_set, train_size) if not epoch % self.config.monitor_frequency: self.report(dict(costs), "train", epoch) self.last_run_costs = costs return costs
Run one valid iteration return true if to continue training.
def _run_valid(self, epoch, valid_set, dry_run=False, save_path=None): """ Run one valid iteration, return true if to continue training. """ costs = self.valid_step(valid_set) # this is the same as: (J_i - J_f) / J_i > min improvement _, J = costs[0] new_best = False if self.best_cost - J > self.best_cost * self.min_improvement: # save the best cost and parameters self.best_params = self.copy_params() new_best = True if not dry_run: self.best_cost = J self.best_epoch = epoch self.save_checkpoint(save_path) self.report(dict(costs), type="valid", epoch=0 if dry_run else epoch, new_best=new_best) self.last_run_costs = costs return epoch - self.best_epoch < self.patience
Report the scores and record them in the log.
def report(self, score_map, type="valid", epoch=-1, new_best=False): """ Report the scores and record them in the log. """ type_str = type if len(type_str) < 5: type_str += " " * (5 - len(type_str)) info = " ".join("%s=%.2f" % el for el in score_map.items()) current_epoch = epoch if epoch > 0 else self.current_epoch() epoch_str = "epoch={}".format(current_epoch + 1) if epoch < 0: epoch_str = "dryrun" sys.stdout.write("\r") sys.stdout.flush() marker = " *" if new_best else "" message = "{} ({}) {}{}".format(type_str, epoch_str, info, marker) self.network.train_logger.record(message) logging.info(message)
Get specified split of data.
def get_data(self, data_split="train"): """ Get specified split of data. """ if data_split == 'train': return self._current_train_set elif data_split == 'valid': return self._current_valid_set elif data_split == 'test': return self._current_test_set else: return None
Run until the end.: param epoch_controllers: deprecated
def run(self, train_set, valid_set=None, test_set=None, train_size=None, epoch_controllers=None): """ Run until the end. :param epoch_controllers: deprecated """ epoch_controllers = epoch_controllers if epoch_controllers else [] epoch_controllers += self._epoch_controllers if isinstance(train_set, Dataset): dataset = train_set train_set = dataset.train_set() valid_set = dataset.valid_set() test_set = dataset.test_set() train_size = dataset.train_size() self._current_train_set = train_set self._current_valid_set = valid_set self._current_test_set = test_set if epoch_controllers: for controller in epoch_controllers: controller.bind(self) timer = Timer() for _ in self.train(train_set, valid_set=valid_set, test_set=test_set, train_size=train_size): if epoch_controllers: for controller in epoch_controllers: controller.invoke() if self._ended: break if self._report_time: timer.report()
: type bunch_stack: list of list of int
def _cut_to_pieces(self, bunch_stack): """ :type bunch_stack: list of list of int """ stack_len = len(bunch_stack[0]) for i in xrange(0, stack_len, self.fragment_length): yield np.array(map(lambda stack: stack[i: i + self.fragment_length], bunch_stack))
: type bunch_stack: list of list
def _pad_zeros(self, bunch_stack): """ :type bunch_stack: list of list """ min_len = min(map(len, bunch_stack)) for i in range(len(bunch_stack)): bunch_stack[i] = bunch_stack[i][:min_len]
Apply a function to tensors.
def apply(self, func, dim=None): """ Apply a function to tensors. """ output_dim = dim if dim else self.output_dim return NeuralVariable(func(self.tensor), output_dim)
Rprop optimizer. See http:// sci2s. ugr. es/ keel/ pdf/ algorithm/ articulo/ 2003 - Neuro - Igel - IRprop +. pdf.
def rprop_core(params, gradients, rprop_increase=1.01, rprop_decrease=0.99, rprop_min_step=0, rprop_max_step=100, learning_rate=0.01): """ Rprop optimizer. See http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf. """ for param, grad in zip(params, gradients): grad_tm1 = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_grad') step_tm1 = theano.shared(np.zeros_like(param.get_value()) + learning_rate, name=param.name+ '_step') test = grad * grad_tm1 same = T.gt(test, 0) diff = T.lt(test, 0) step = T.minimum(rprop_max_step, T.maximum(rprop_min_step, step_tm1 * ( T.eq(test, 0) + same * rprop_increase + diff * rprop_decrease))) grad = grad - diff * grad yield param, param - T.sgn(grad) * step yield grad_tm1, grad yield step_tm1, step
Report usage of training parameters.
def report(self): """ Report usage of training parameters. """ if self.logger: self.logger.info("accessed parameters:") for key in self.used_parameters: self.logger.info(" - %s %s" % (key, "(undefined)" if key in self.undefined_parameters else ""))
Create a parameters block.: param layers: register some layers in the block: param name: specify the name of this block
def new_block(self, *layers, **kwargs): """ Create a parameters block. :param layers: register some layers in the block :param name: specify the name of this block """ from deepy.layers.block import Block block = Block(*layers, **kwargs) return block
An alias of deepy. tensor. var.
def var(self, tensor_type, last_dim=0, test_shape=None): """ An alias of deepy.tensor.var. """ from deepy.tensor import var return var(tensor_type, last_dim=last_dim, test_shape=test_shape)
Create vars given a dataset and set test values. Useful when dataset is already defined.
def create_vars_from_data(self, dataset, split="train"): """ Create vars given a dataset and set test values. Useful when dataset is already defined. """ from deepy.core.neural_var import NeuralVariable vars = [] if split == "valid": data_split = dataset.valid_set() elif split == "test": data_split = dataset.test_set() else: data_split = dataset.train_set() first_data_piece = list(data_split)[0] for i, numpy_tensor in enumerate(first_data_piece): if numpy_tensor.dtype == "int64": numpy_tensor = numpy_tensor.astype("int32") if numpy_tensor.dtype == "float64": numpy_tensor = numpy_tensor.astype(env.FLOATX) type_map = { 0: "scalar", 1: "vector", 2: "matrix", 3: "tensor3", 4: "tensor4", 5: "tensor5", } tensor_type = type_map[numpy_tensor.ndim] if numpy_tensor.ndim in type_map else type_map[0] if numpy_tensor.dtype.kind == "i": tensor_type = "i" + tensor_type theano_tensor = getattr(TT, tensor_type)("input_{}_{}".format(i + 1, tensor_type)) last_dim = numpy_tensor.shape[-1] var = NeuralVariable(theano_tensor, dim=last_dim) var.set_test_value(numpy_tensor) vars.append(var) return vars
A loop function the usage is identical with the theano one.: type block: deepy. layers. Block
def scan(self, func, sequences=None, outputs=None, non_sequences=None, block=None, **kwargs): """ A loop function, the usage is identical with the theano one. :type block: deepy.layers.Block """ results, updates = Scanner(func, sequences, outputs, non_sequences, neural_computation=True, **kwargs).compute() if block and updates: if type(updates) == dict: updates = updates.items() block.register_updates(*updates) return results
Start a loop. Usage: with deepy. graph. loop ( sequences = { x: x } outputs = { o: None } ) as vars: vars. o = vars. x + 1 loop_outputs = deepy. graph. loop_outputs () result = loop_outputs. o
def loop(self, sequences=None, outputs=None, non_sequences=None, block=None, **kwargs): """ Start a loop. Usage: ``` with deepy.graph.loop(sequences={"x": x}, outputs={"o": None}) as vars: vars.o = vars.x + 1 loop_outputs = deepy.graph.loop_outputs() result = loop_outputs.o ``` """ from loop import Loop return Loop(sequences, outputs, non_sequences, block, **kwargs)
Get a trainer to optimize given model.: rtype: deepy. trainers. GeneralNeuralTrainer
def get_trainer(self, model, method='sgd', config=None, annealer=None, validator=None): """ Get a trainer to optimize given model. :rtype: deepy.trainers.GeneralNeuralTrainer """ from deepy.trainers import GeneralNeuralTrainer return GeneralNeuralTrainer(model, method=method, config=config, annealer=annealer, validator=validator)
Create a shared theano scalar value.
def shared(self, value, name=None): """ Create a shared theano scalar value. """ if type(value) == int: final_value = np.array(value, dtype="int32") elif type(value) == float: final_value = np.array(value, dtype=env.FLOATX) else: final_value = value return theano.shared(final_value, name=name)
Load parameters from file to fill all blocks sequentially.: type blocks: list of deepy. layers. Block
def fill_parameters(self, path, blocks, exclude_free_params=False, check_parameters=False): """ Load parameters from file to fill all blocks sequentially. :type blocks: list of deepy.layers.Block """ if not os.path.exists(path): raise Exception("model {} does not exist".format(path)) # Decide which parameters to load normal_params = sum([nn.parameters for nn in blocks], []) all_params = sum([nn.all_parameters for nn in blocks], []) # Load parameters if path.endswith(".gz"): opener = gzip.open if path.lower().endswith('.gz') else open handle = opener(path, 'rb') saved_params = pickle.load(handle) handle.close() # Write parameters if len(all_params) != len(saved_params): logging.warning( "parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params), len(saved_params))) for target, source in zip(all_params, saved_params): if not exclude_free_params or target not in normal_params: target.set_value(source) elif path.endswith(".npz"): arrs = np.load(path) # Write parameters if len(all_params) != len(arrs.keys()): logging.warning( "parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params), len(arrs.keys()))) for target, idx in zip(all_params, range(len(arrs.keys()))): if not exclude_free_params or target not in normal_params: source = arrs['arr_%d' % idx] target.set_value(source) else: raise Exception("File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'" % path)
Return size of training data. ( optional ): rtype: number
def train_size(self): """ Return size of training data. (optional) :rtype: number """ train_set = self.train_set() if isinstance(train_set, collections.Iterable): return len(list(train_set)) else: return None
Run it return whether to end training.
def invoke(self): """ Run it, return whether to end training. """ self._iter += 1 if self._iter - max(self._trainer.best_iter, self._annealed_iter) >= self._patience: if self._annealed_times >= self._anneal_times: logging.info("ending") self._trainer.exit() else: self._trainer.set_params(*self._trainer.best_params) self._learning_rate.set_value(self._learning_rate.get_value() * 0.5) self._annealed_times += 1 self._annealed_iter = self._iter logging.info("annealed learning rate to %f" % self._learning_rate.get_value())
Run it return whether to end training.
def invoke(self): """ Run it, return whether to end training. """ self._iter += 1 logging.info("{} epochs left to run".format(self._patience - self._iter)) if self._iter >= self._patience: self._trainer.exit()
Perform reparameterization trick for latent variables.: param layer_size: the size of latent variable
def stack_reparameterization_layer(self, layer_size): """ Perform reparameterization trick for latent variables. :param layer_size: the size of latent variable """ self.rep_layer = ReparameterizationLayer(layer_size, sample=self.sample) self.stack_encoders(self.rep_layer)
Stack encoding layers this must be done before stacking decoding layers.
def stack_encoders(self, *layers): """ Stack encoding layers, this must be done before stacking decoding layers. """ self.stack(*layers) self.encoding_layes.extend(layers)
Stack decoding layers.
def stack_decoders(self, *layers): """ Stack decoding layers. """ self.stack(*layers) self.decoding_layers.extend(layers)
Encode given input.
def encode(self, x): """ Encode given input. """ if not self.encoding_network: self.encoding_network = NeuralNetwork(self.input_dim, self.input_tensor) self.encoding_network.input_variables = self.input_variables for layer in self.encoding_layes: self.encoding_network.stack_layer(layer, no_setup=True) return self.encoding_network.compute(*x)
Decode given representation.
def decode(self, x): """ Decode given representation. """ if not self.rep_dim: raise Exception("rep_dim must be set to decode.") if not self.decoding_network: self.decoding_network = NeuralNetwork(self.rep_dim) for layer in self.decoding_layers: self.decoding_network.stack_layer(layer, no_setup=True) return self.decoding_network.compute(x)
This function creates a 2d gaussian kernel with the standard deviation denoted by sigma
def create_2d_gaussian(dim, sigma): """ This function creates a 2d gaussian kernel with the standard deviation denoted by sigma :param dim: integer denoting a side (1-d) of gaussian kernel :param sigma: floating point indicating the standard deviation :returns: a numpy 2d array """ # check if the dimension is odd if dim % 2 == 0: raise ValueError("Kernel dimension should be odd") # initialize the kernel kernel = np.zeros((dim, dim), dtype=np.float16) # calculate the center point center = dim/2 # calculate the variance variance = sigma ** 2 # calculate the normalization coefficeint coeff = 1. / (2 * variance) # create the kernel for x in range(0, dim): for y in range(0, dim): x_val = abs(x - center) y_val = abs(y - center) numerator = x_val**2 + y_val**2 denom = 2*variance kernel[x,y] = coeff * np.exp(-1. * numerator/denom) return kernel/sum(sum(kernel))
This method performs elastic transformations on an image by convolving with a gaussian kernel.: param image: a numpy nd array: kernel_dim: dimension ( 1 - D ) of the gaussian kernel: param sigma: standard deviation of the kernel: param alpha: a multiplicative factor for image after convolution: param negated: a flag indicating whether the image is negated or not: returns: a nd array transformed image
def elastic_distortion(image, kernel_dim=21, sigma=6, alpha=30, negated=True): """ This method performs elastic transformations on an image by convolving with a gaussian kernel. :param image: a numpy nd array :kernel_dim: dimension(1-D) of the gaussian kernel :param sigma: standard deviation of the kernel :param alpha: a multiplicative factor for image after convolution :param negated: a flag indicating whether the image is negated or not :returns: a nd array transformed image """ # check if the image is a negated one if not negated: image = 255-image # check if kernel dimesnion is odd if kernel_dim % 2 == 0: raise ValueError("Kernel dimension should be odd") # create an empty image result = np.zeros(image.shape) # create random displacement fields displacement_field_x = np.array([[env.numpy_rand.random_integers(-1, 1) for x in xrange(image.shape[0])] \ for y in xrange(image.shape[1])]) * alpha displacement_field_y = np.array([[env.numpy_rand.random_integers(-1, 1) for x in xrange(image.shape[0])] \ for y in xrange(image.shape[1])]) * alpha # create the gaussian kernel kernel = create_2d_gaussian(kernel_dim, sigma) # convolve the fields with the gaussian kernel displacement_field_x = convolve2d(displacement_field_x, kernel) displacement_field_y = convolve2d(displacement_field_y, kernel) # make the distortrd image by averaging each pixel value to the neighbouring # four pixels based on displacement fields for row in xrange(image.shape[1]): for col in xrange(image.shape[0]): low_ii = row + int(math.floor(displacement_field_x[row, col])) high_ii = row + int(math.ceil(displacement_field_x[row, col])) low_jj = col + int(math.floor(displacement_field_y[row, col])) high_jj = col + int(math.ceil(displacement_field_y[row, col])) if low_ii < 0 or low_jj < 0 or high_ii >= image.shape[1] -1 \ or high_jj >= image.shape[0] - 1: continue res = image[low_ii, low_jj]/4 + image[low_ii, high_jj]/4 + \ image[high_ii, low_jj]/4 + image[high_ii, high_jj]/4 result[row, col] = res return result
Stack a neural layer.: type layer: NeuralLayer: param no_setup: whether the layer is already initialized
def stack_layer(self, layer, no_setup=False): """ Stack a neural layer. :type layer: NeuralLayer :param no_setup: whether the layer is already initialized """ if layer.name: layer.name += "%d" % (len(self.layers) + 1) if not self.layers: layer.init(self.input_dim, no_prepare=no_setup) else: layer.init(self.layers[-1].output_dim, no_prepare=no_setup) self._output = layer.compute_tensor(self._output) self._test_output = layer.compute_tensor(self._test_output) self._hidden_outputs.append(self._output) self.register_layer(layer) self.layers.append(layer)
Register the layer so that it s param will be trained. But the output of the layer will not be stacked.
def register_layer(self, layer): """ Register the layer so that it's param will be trained. But the output of the layer will not be stacked. """ if type(layer) == Block: layer.fix() self.parameter_count += layer.parameter_count self.parameters.extend(layer.parameters) self.free_parameters.extend(layer.free_parameters) self.training_monitors.extend(layer.training_monitors) self.testing_monitors.extend(layer.testing_monitors) self.updates.extend(layer.updates) self.training_updates.extend(layer.training_updates) self.input_variables.extend(layer.external_inputs) self.target_variables.extend(layer.external_targets) self.training_callbacks.extend(layer.training_callbacks) self.testing_callbacks.extend(layer.testing_callbacks) self.epoch_callbacks.extend(layer.epoch_callbacks)
Monitoring the outputs of each layer. Useful for troubleshooting convergence problems.
def monitor_layer_outputs(self): """ Monitoring the outputs of each layer. Useful for troubleshooting convergence problems. """ for layer, hidden in zip(self.layers, self._hidden_outputs): self.training_monitors.append(('mean(%s)' % (layer.name), abs(hidden).mean()))
Return all parameters.
def all_parameters(self): """ Return all parameters. """ params = [] params.extend(self.parameters) params.extend(self.free_parameters) return params
Set up variables.
def setup_variables(self): """ Set up variables. """ if self.input_tensor: if type(self.input_tensor) == int: x = dim_to_var(self.input_tensor, name="x") else: x = self.input_tensor else: x = T.matrix('x') self.input_variables.append(x) self._output = x self._test_output = x
Return network output.
def compute(self, *x): """ Return network output. """ self._compile() outs = self._compute(*x) if self._output_keys: return MapDict(dict(zip(self._output_keys, outs))) else: return outs
Save parameters to file.
def save_params(self, path, new_thread=False): """ Save parameters to file. """ save_logger.info(path) param_variables = self.all_parameters params = [p.get_value().copy() for p in param_variables] if new_thread: thread = Thread(target=save_network_params, args=(params, path)) thread.start() else: save_network_params(params, path) self.train_logger.save(path)
Load parameters from file.
def load_params(self, path, exclude_free_params=False): """ Load parameters from file. """ if not os.path.exists(path): return; logging.info("loading parameters from %s" % path) # Decide which parameters to load if exclude_free_params: params_to_load = self.parameters else: params_to_load = self.all_parameters # Load parameters if path.endswith(".gz"): opener = gzip.open if path.lower().endswith('.gz') else open handle = opener(path, 'rb') saved_params = pickle.load(handle) handle.close() # Write parameters for target, source in zip(params_to_load, saved_params): logging.info('%s: setting value %s', target.name, source.shape) target.set_value(source) elif path.endswith(".npz"): arrs = np.load(path) # Write parameters for target, idx in zip(params_to_load, range(len(arrs.keys()))): source = arrs['arr_%d' % idx] logging.info('%s: setting value %s', target.name, source.shape) target.set_value(source) else: raise Exception("File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'" % path) self.train_logger.load(path)
Print network statistics.
def report(self): """ Print network statistics. """ logging.info("network inputs: %s", " ".join(map(str, self.input_variables))) logging.info("network targets: %s", " ".join(map(str, self.target_variables))) logging.info("network parameters: %s", " ".join(map(str, self.all_parameters))) logging.info("parameter count: %d", self.parameter_count)
Initialize the layer.: param no_prepare: avoid calling preparation function
def init(self, input_dim=0, input_dims=None, no_prepare=False): """ Initialize the layer. :param no_prepare: avoid calling preparation function """ if self.initialized: return # configure input dimensions if input_dims: self.input_dims = input_dims self.input_dim = input_dims[0] else: self.input_dim = input_dim self.input_dims = [input_dims] # set default output dimension if self.output_dim == 0: self.output_dim = self.input_dim self.initialized = True # call prepare if not no_prepare: self.prepare() return self
Compute based on NeuralVariable.: type inputs: list of NeuralVariable: return: NeuralVariable
def compute(self, *inputs, **kwargs): """ Compute based on NeuralVariable. :type inputs: list of NeuralVariable :return: NeuralVariable """ from deepy.core.neural_var import NeuralVariable from deepy.core.graph import graph if type(inputs[0]) != NeuralVariable: raise SystemError("The input of `compute` must be NeuralVar") dims = [t.dim() for t in inputs] if len(inputs) == 1: self.init(input_dim=dims[0]) else: self.init(input_dims=dims) # Check block if self.parameters and not self._linked_block: self.belongs_to(graph.default_block()) # convert kwargs train_kwargs, _, _ = convert_to_theano_var(kwargs) output = self.compute_tensor(*[t.tensor for t in inputs], **train_kwargs) if type(output) != list and type(output) != tuple: return NeuralVariable(output, dim=self.output_dim) else: return [NeuralVariable(*item) for item in zip(output, self.output_dims)]
Let the given block or network manage the parameters of this layer.: param block: Block or NeuralNetwork: return: NeuralLayer
def belongs_to(self, block): """ Let the given block or network manage the parameters of this layer. :param block: Block or NeuralNetwork :return: NeuralLayer """ if self._linked_block: raise SystemError("The layer {} has already blonged to {}".format(self.name, self._linked_block.name)) self._linked_block = block block.register_layer(self) return self
Register parameters.
def register_parameters(self, *parameters): """ Register parameters. """ for param in parameters: self.parameter_count += np.prod(param.get_value().shape) self.parameters.extend(parameters)
Register updates that will be executed in each iteration.
def register_updates(self, *updates): """ Register updates that will be executed in each iteration. """ for key, node in updates: if key not in self._registered_updates: self.updates.append((key, node)) self._registered_updates.add(key)
Register updates that will only be executed in training phase.
def register_training_updates(self, *updates): """ Register updates that will only be executed in training phase. """ for key, node in updates: if key not in self._registered_training_updates: self.training_updates.append((key, node)) self._registered_training_updates.add(key)
Register monitors they should be tuple of name and Theano variable.
def register_monitors(self, *monitors): """ Register monitors they should be tuple of name and Theano variable. """ for key, node in monitors: if key not in self._registered_monitors: node *= 1.0 # Avoid CudaNdarray self.training_monitors.append((key, node)) self.testing_monitors.append((key, node)) self._registered_monitors.add(key)
Get the L2 norm of multiple tensors. This function is taken from blocks.
def multiple_l2_norm(tensors): """ Get the L2 norm of multiple tensors. This function is taken from blocks. """ # Another way for doing this, I don't know which one is fast # return T.sqrt(sum(T.sum(t ** 2) for t in tensors)) flattened = [T.as_tensor_variable(t).flatten() for t in tensors] flattened = [(t if t.ndim > 0 else t.dimshuffle('x')) for t in flattened] joined = T.join(0, *flattened) return T.sqrt(T.sqr(joined).sum())
dumps one element to file_obj a file opened in write mode
def dump_one(elt_to_pickle, file_obj): """ dumps one element to file_obj, a file opened in write mode """ pickled_elt_str = dumps(elt_to_pickle) file_obj.write(pickled_elt_str) # record separator is a blank line # (since pickled_elt_str might contain its own newlines) file_obj.write('\n\n')
load contents from file_obj returning a generator that yields one element at a time
def load(file_obj): """ load contents from file_obj, returning a generator that yields one element at a time """ cur_elt = [] for line in file_obj: cur_elt.append(line) if line == '\n': pickled_elt_str = ''.join(cur_elt) cur_elt = [] try: elt = loads(pickled_elt_str) except ValueError: continue yield elt
Fix the block register all the parameters of sub layers.: return:
def fix(self): """ Fix the block, register all the parameters of sub layers. :return: """ if not self.fixed: for layer in self.layers: if not layer.initialized: raise Exception("All sub layers in a block must be initialized when fixing it.") self.register_inner_layers(layer) self.fixed = True
Register one connected layer.: type layer: NeuralLayer
def register_layer(self, layer): """ Register one connected layer. :type layer: NeuralLayer """ if self.fixed: raise Exception("After a block is fixed, no more layers can be registered.") self.layers.append(layer)
Load parameters to the block.
def load_params(self, path, exclude_free_params=False): from deepy.core import graph """ Load parameters to the block. """ from deepy.core.comp_graph import ComputationalGraph model = graph.compile(blocks=[self]) model.load_params(path, exclude_free_params=exclude_free_params)
Compute one step in the RNN.: return: one variable for RNN and GRU multiple variables for LSTM
def compute_step(self, state, lstm_cell=None, input=None, additional_inputs=None): """ Compute one step in the RNN. :return: one variable for RNN and GRU, multiple variables for LSTM """ if not self.initialized: input_dim = None if input and hasattr(input.tag, 'last_dim'): input_dim = input.tag.last_dim self.init(input_dim) input_map = self.merge_inputs(input, additional_inputs=additional_inputs) input_map.update({"state": state, "lstm_cell": lstm_cell}) output_map = self.compute_new_state(input_map) outputs = [output_map.pop("state")] outputs += output_map.values() for tensor in outputs: tensor.tag.last_dim = self.hidden_size if len(outputs) == 1: return outputs[0] else: return outputs
: type input_var: T. var: rtype: dict
def get_initial_states(self, input_var, init_state=None): """ :type input_var: T.var :rtype: dict """ initial_states = {} for state in self.state_names: if state != "state" or not init_state: if self._input_type == 'sequence' and input_var.ndim == 2: init_state = T.alloc(np.cast[env.FLOATX](0.), self.hidden_size) else: init_state = T.alloc(np.cast[env.FLOATX](0.), input_var.shape[0], self.hidden_size) initial_states[state] = init_state return initial_states
: type input_var: T. var: rtype: dict
def get_step_inputs(self, input_var, states=None, mask=None, additional_inputs=None): """ :type input_var: T.var :rtype: dict """ step_inputs = {} if self._input_type == "sequence": if not additional_inputs: additional_inputs = [] if mask: step_inputs['mask'] = mask.dimshuffle(1, 0) step_inputs.update(self.merge_inputs(input_var, additional_inputs=additional_inputs)) else: # step_inputs["mask"] = mask.dimshuffle((1,0)) if mask else None if additional_inputs: step_inputs.update(self.merge_inputs(None, additional_inputs=additional_inputs)) if states: for name in self.state_names: step_inputs[name] = states[name] return step_inputs