File size: 3,652 Bytes
29a6c53 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import math
import torch
from sklearn.utils import shuffle
from sklearn.metrics import f1_score, classification_report
from transformers import CamembertModel, CamembertTokenizer, CamembertForSequenceClassification
import pandas as pd
import numpy as np
from loadDataSet import loadData, labels_to_numeric, flatten_labels
from helpers import get_device
def get_prediction(text, tokenizer, model, max_len, device):
# prepare our text into tokenized sequence
inputs = tokenizer(text, padding=True, truncation=True, max_length=max_len, return_tensors="pt").to(device)
# perform inference to our model
outputs = model(**inputs)
# get output probabilities by doing softmax
probs = outputs[0].softmax(1)
#print(outputs[1])
# executing argmax function to get the candidate label
return probs#.argmax()
if __name__ == "__main__":
# Device
device = get_device()
# Paths
base_path = "../code/"#"/home/mgaman/projects/french_dialect/data/Corpus/"
test_path = base_path + "test_slices.txt"
# Load the data
testSamples, testLabels = loadData("test", test_path)
print("Test size: %d" % len(testSamples))
# BERT directory
bert_dir = './bert_models_saved/best_model/'
# Load the BERT tokenizer.
print('Loading BERT tokenizer...')
tokenizer = CamembertTokenizer.from_pretrained(bert_dir, do_lowercase=True)
# Compute max sentence length
# Sentence length: Max = 512; Min = 3; Average = 445.26780711145534
#compute_max_sent_length(tokenizer, trainSamples)
# Model path
model_path = os.path.join(bert_dir, "pytorch_model.bin")
# Load the model
model = CamembertForSequenceClassification.from_pretrained("camembert-base", num_labels=4, output_hidden_states=True)
model = model.float()
model.load_state_dict(torch.load(model_path))
model.to(device)
# Use approx the average in the dataset
max_len = 128
# Labels to numeric format
# 0 - BE, 1 - CA, 2 - CH, 3 - FR
testLabels = labels_to_numeric(testLabels)
# Predict
preds_all_labels = []
preds_proba = []
preds = []
for i in range(len(testSamples)):
# Get prediction
pred = get_prediction(testSamples[1], tokenizer, model, max_len, device)
# Get the list of pred probs for each class
pred_list = pred.cpu().detach().numpy().tolist()[0]
preds_all_labels.append(pred_list)
# Get the prediction probability for the most likely class
pred_proba = pred.max().item()
preds_proba.append(pred_proba)
# Get the predicted class index
pred_index = pred.argmax().item()
preds.append(pred_index)
print(classification_report(testLabels, preds, digits=6, target_names=["BE", "CA", "CH", "FR"]))
batch_size = 32
# Model class must be defined somewhere
model_path = os.path.join(bert_dir, "bert.model")
best_model = CustomBERTModel()
# Predict
test_gt, test_preds = predict(best_model, input_ids_test, test_dataloader, device)
# Accuracy
compute_accuracy(test_gt, test_preds)
# Flatten
test_gt_flat, test_preds_flat = flatten_labels(test_gt, test_preds)
# Macro F1 score
f1_macro = f1_score(test_gt_flat, test_preds_flat, average='macro')
print("F1 macro: ", f1_macro)
# Write preds in file
#writePredictions(train_preds, os.path.join(".", "predictions_train.csv"))
#writePredictions(val_preds, os.path.join(".", "predictions_validation.csv"))
#writePredictions(test_preds, os.path.join(".", "predictions_test.csv"))
|