ankurani's picture
Upload 31 files
76cd8d0 verified
# this is the main file, where we shall integrate all the other files and run the code
import os
import ast
import numpy as np
import json
import pandas as pd
import matplotlib.pyplot as plt
import pdb
from tqdm import tqdm
from nltk.tokenize import WhitespaceTokenizer
from sklearn.preprocessing import LabelEncoder
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import DataLoader, TensorDataset
from torch.nn.utils.rnn import pad_sequence
from Multilabel_task_head import MultiLabelTaskHead
from singlelabel_task_head import SingleLabelTaskHead
from base_network import base_network
from multi_task import MultiTaskModel
batch_size = 32
epoch = 1000
max_seq_length = 128
input_size = 128
device = 'cpu'
# Load the data
with open('data/data_Xtrain.json', 'r') as file:
X_train = json.load(file)
with open('data/data_Xval.json', 'r') as file:
Xval = json.load(file)
y_train = pd.read_csv('data/data_ytrain.csv')
y_train_s = y_train['Intent Of Lie (Gaining Advantage/Gaining Esteem/Avoiding Punishment/Avoiding Embarrassment/Protecting Themselves)']
# yval = pd.read_csv('data/data_yval.csv')
y_train_m1 = y_train['ordered_list_1']
y_train_m1 = y_train_m1.apply(ast.literal_eval)
le = LabelEncoder()
y_train_s = le.fit_transform(y_train_s)
# Convert the data into numpy arrays
X_train = np.array(X_train)
# Xval = np.array(Xval)
y_train_s = np.array(y_train_s)
# yval = np.array(yval)
print(X_train.shape)
# Tokenize and pad the data
tokenizer = WhitespaceTokenizer()
tokenized_sentences = [tokenizer.tokenize(
sentence)[:max_seq_length] for sentence in X_train]
vocab = {token: i+1 for i,
token in enumerate(set(token for sent in tokenized_sentences for token in sent))}
indexed_sequences = [torch.tensor([vocab.get(token, 0) for token in sent] + [
0] * (max_seq_length - len(sent))) for sent in tokenized_sentences]
padded_sequences = pad_sequence(
indexed_sequences, batch_first=True, padding_value=0)
# attention_mask = torch.where(padded_sequences != 0, torch.tensor(1), torch.tensor(0))
X_train = padded_sequences
X_train, y_train_s = torch.tensor(X_train), torch.tensor(y_train_s)
X_train = X_train.to(device)
y_train_s = y_train_s.to(device)
dataset_train = TensorDataset(X_train, y_train_s)
dataloader_train = DataLoader(
dataset_train, batch_size=batch_size, shuffle=True)
# ### define for validation
# dataset_val = TensorDataset(Xval, yval)
# dataloader_val = DataLoader(dataset_val, batch_size=batch_size, shuffle=True)
# model = base_network(input_size, embedding_size, output_size,
# hidden_size, num_layers, dropout, bidirectional, device)
# , TaskHead2(), ..., TaskHeadN()]
task_heads = [SingleLabelTaskHead(
input_size=128, output_size=10, device=device), MultiLabelTaskHead(input_size=128, output_size=10, device=device)]
model = MultiTaskModel(base_network(input_size=7700+1, embedding_size=128, output_size=128,
hidden_size=128, num_layers=2, dropout=0.5, bidirectional=False, device=device), task_heads, device=device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
def train(model, dataloader_train, optimizer, criterion, epoch):
model.train()
for batch_idx, (data, target) in enumerate(dataloader_train):
optimizer.zero_grad()
task_outputs = model(data)
print(task_outputs)
losses = [loss_fn(output, label)
for output, label in zip(task_outputs, [target])]
loss = sum(losses)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(dataloader_train.dataset),
100. * batch_idx / len(dataloader_train), loss.item()))
for epoch in range(1, epoch + 1):
train(model, dataloader_train, optimizer, loss_fn, epoch)