|
|
|
|
|
import os |
|
|
import ast |
|
|
import numpy as np |
|
|
import json |
|
|
import pandas as pd |
|
|
import matplotlib.pyplot as plt |
|
|
import pdb |
|
|
from tqdm import tqdm |
|
|
from nltk.tokenize import WhitespaceTokenizer |
|
|
from sklearn.preprocessing import LabelEncoder |
|
|
|
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import torch.optim as optim |
|
|
from torch.utils.data import Dataset, DataLoader |
|
|
from torch.utils.data import DataLoader, TensorDataset |
|
|
from torch.nn.utils.rnn import pad_sequence |
|
|
|
|
|
from Multilabel_task_head import MultiLabelTaskHead |
|
|
from singlelabel_task_head import SingleLabelTaskHead |
|
|
from base_network import base_network |
|
|
from multi_task import MultiTaskModel |
|
|
|
|
|
batch_size = 32 |
|
|
epoch = 1000 |
|
|
max_seq_length = 128 |
|
|
input_size = 128 |
|
|
device = 'cpu' |
|
|
|
|
|
|
|
|
with open('data/data_Xtrain.json', 'r') as file: |
|
|
X_train = json.load(file) |
|
|
|
|
|
with open('data/data_Xval.json', 'r') as file: |
|
|
Xval = json.load(file) |
|
|
|
|
|
y_train = pd.read_csv('data/data_ytrain.csv') |
|
|
y_train_s = y_train['Intent Of Lie (Gaining Advantage/Gaining Esteem/Avoiding Punishment/Avoiding Embarrassment/Protecting Themselves)'] |
|
|
|
|
|
y_train_m1 = y_train['ordered_list_1'] |
|
|
y_train_m1 = y_train_m1.apply(ast.literal_eval) |
|
|
|
|
|
le = LabelEncoder() |
|
|
y_train_s = le.fit_transform(y_train_s) |
|
|
|
|
|
|
|
|
X_train = np.array(X_train) |
|
|
|
|
|
y_train_s = np.array(y_train_s) |
|
|
|
|
|
|
|
|
print(X_train.shape) |
|
|
|
|
|
|
|
|
tokenizer = WhitespaceTokenizer() |
|
|
tokenized_sentences = [tokenizer.tokenize( |
|
|
sentence)[:max_seq_length] for sentence in X_train] |
|
|
vocab = {token: i+1 for i, |
|
|
token in enumerate(set(token for sent in tokenized_sentences for token in sent))} |
|
|
indexed_sequences = [torch.tensor([vocab.get(token, 0) for token in sent] + [ |
|
|
0] * (max_seq_length - len(sent))) for sent in tokenized_sentences] |
|
|
padded_sequences = pad_sequence( |
|
|
indexed_sequences, batch_first=True, padding_value=0) |
|
|
|
|
|
|
|
|
|
|
|
X_train = padded_sequences |
|
|
|
|
|
X_train, y_train_s = torch.tensor(X_train), torch.tensor(y_train_s) |
|
|
X_train = X_train.to(device) |
|
|
y_train_s = y_train_s.to(device) |
|
|
dataset_train = TensorDataset(X_train, y_train_s) |
|
|
dataloader_train = DataLoader( |
|
|
dataset_train, batch_size=batch_size, shuffle=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
task_heads = [SingleLabelTaskHead( |
|
|
input_size=128, output_size=10, device=device), MultiLabelTaskHead(input_size=128, output_size=10, device=device)] |
|
|
model = MultiTaskModel(base_network(input_size=7700+1, embedding_size=128, output_size=128, |
|
|
hidden_size=128, num_layers=2, dropout=0.5, bidirectional=False, device=device), task_heads, device=device) |
|
|
|
|
|
optimizer = optim.Adam(model.parameters(), lr=0.001) |
|
|
loss_fn = nn.CrossEntropyLoss() |
|
|
|
|
|
|
|
|
def train(model, dataloader_train, optimizer, criterion, epoch): |
|
|
model.train() |
|
|
for batch_idx, (data, target) in enumerate(dataloader_train): |
|
|
optimizer.zero_grad() |
|
|
task_outputs = model(data) |
|
|
print(task_outputs) |
|
|
losses = [loss_fn(output, label) |
|
|
for output, label in zip(task_outputs, [target])] |
|
|
loss = sum(losses) |
|
|
loss.backward() |
|
|
optimizer.step() |
|
|
if batch_idx % 100 == 0: |
|
|
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( |
|
|
epoch, batch_idx * len(data), len(dataloader_train.dataset), |
|
|
100. * batch_idx / len(dataloader_train), loss.item())) |
|
|
|
|
|
|
|
|
for epoch in range(1, epoch + 1): |
|
|
train(model, dataloader_train, optimizer, loss_fn, epoch) |
|
|
|