text
stringlengths 1
93.6k
|
|---|
target_class = Input((1,))
|
probability = Huffmax(nb_classes, verbose=True, mode=mode)([vector, target_class])
|
huffmax_model = Model(input=[vector, target_class], output=probability)
|
huffmax_model.compile(loss='mse', optimizer='sgd')
|
huffmax_model.predict([X[:1], Y_huffmax[:1]])
|
start_time = time.time()
|
huffmax_model.fit([X, Y_huffmax], np.ones((nb_samples, 1)), batch_size=batch_size)
|
end_time = time.time()
|
times['Huffmax (mode ' + str(mode) + ')'] = end_time - start_time
|
for key in times.keys():
|
print(key + ' : ' + str(times[key]))
|
# <FILESEP>
|
import torch
|
from torch.autograd import Variable
|
from torch import nn
|
from torch.nn import functional as F
|
import numpy as np
|
import math
|
from torch.nn import init
|
from torch.nn.utils import rnn
|
class SPModel(nn.Module):
|
def __init__(self, config, word_mat, char_mat):
|
super().__init__()
|
self.config = config
|
self.word_dim = config.glove_dim
|
self.word_emb = nn.Embedding(len(word_mat), len(word_mat[0]), padding_idx=0)
|
self.word_emb.weight.data.copy_(torch.from_numpy(word_mat))
|
self.word_emb.weight.requires_grad = False
|
self.char_emb = nn.Embedding(len(char_mat), len(char_mat[0]), padding_idx=0)
|
self.char_emb.weight.data.copy_(torch.from_numpy(char_mat))
|
self.char_cnn = nn.Conv1d(config.char_dim, config.char_hidden, 5)
|
self.char_hidden = config.char_hidden
|
self.hidden = config.hidden
|
self.rnn = EncoderRNN(config.char_hidden+self.word_dim, config.hidden, 1, True, True, 1-config.keep_prob, False)
|
self.qc_att = BiAttention(config.hidden*2, 1-config.keep_prob)
|
self.linear_1 = nn.Sequential(
|
nn.Linear(config.hidden*8, config.hidden),
|
nn.ReLU()
|
)
|
self.rnn_2 = EncoderRNN(config.hidden, config.hidden, 1, False, True, 1-config.keep_prob, False)
|
self.self_att = BiAttention(config.hidden*2, 1-config.keep_prob)
|
self.linear_2 = nn.Sequential(
|
nn.Linear(config.hidden*8, config.hidden),
|
nn.ReLU()
|
)
|
self.rnn_sp = EncoderRNN(config.hidden, config.hidden, 1, False, True, 1-config.keep_prob, False)
|
self.linear_sp = nn.Linear(config.hidden*2, 1)
|
self.rnn_start = EncoderRNN(config.hidden*3, config.hidden, 1, False, True, 1-config.keep_prob, False)
|
self.linear_start = nn.Linear(config.hidden*2, 1)
|
self.rnn_end = EncoderRNN(config.hidden*3, config.hidden, 1, False, True, 1-config.keep_prob, False)
|
self.linear_end = nn.Linear(config.hidden*2, 1)
|
self.rnn_type = EncoderRNN(config.hidden*3, config.hidden, 1, False, True, 1-config.keep_prob, False)
|
self.linear_type = nn.Linear(config.hidden*2, 3)
|
self.cache_S = 0
|
def get_output_mask(self, outer):
|
S = outer.size(1)
|
if S <= self.cache_S:
|
return Variable(self.cache_mask[:S, :S], requires_grad=False)
|
self.cache_S = S
|
np_mask = np.tril(np.triu(np.ones((S, S)), 0), 15)
|
self.cache_mask = outer.data.new(S, S).copy_(torch.from_numpy(np_mask))
|
return Variable(self.cache_mask, requires_grad=False)
|
def forward(self, context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, context_lens, start_mapping, end_mapping, all_mapping, return_yp=False):
|
para_size, ques_size, char_size, bsz = context_idxs.size(1), ques_idxs.size(1), context_char_idxs.size(2), context_idxs.size(0)
|
context_mask = (context_idxs > 0).float()
|
ques_mask = (ques_idxs > 0).float()
|
context_ch = self.char_emb(context_char_idxs.contiguous().view(-1, char_size)).view(bsz * para_size, char_size, -1)
|
ques_ch = self.char_emb(ques_char_idxs.contiguous().view(-1, char_size)).view(bsz * ques_size, char_size, -1)
|
context_ch = self.char_cnn(context_ch.permute(0, 2, 1).contiguous()).max(dim=-1)[0].view(bsz, para_size, -1)
|
ques_ch = self.char_cnn(ques_ch.permute(0, 2, 1).contiguous()).max(dim=-1)[0].view(bsz, ques_size, -1)
|
context_word = self.word_emb(context_idxs)
|
ques_word = self.word_emb(ques_idxs)
|
context_output = torch.cat([context_word, context_ch], dim=2)
|
ques_output = torch.cat([ques_word, ques_ch], dim=2)
|
context_output = self.rnn(context_output, context_lens)
|
ques_output = self.rnn(ques_output)
|
output = self.qc_att(context_output, ques_output, ques_mask)
|
output = self.linear_1(output)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.