Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K - 10K
License:
scoring scripts
Browse files
scoring-scripts/compute_MCC.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.metrics import matthews_corrcoef
|
| 2 |
+
import numpy as np
|
| 3 |
+
def compute_MCC(references_dataset, predictions_dataset, ref_col='ner_tags', pred_col='pred_ner_tags'):
|
| 4 |
+
# computes the Matthews correlation coeff between two datasets
|
| 5 |
+
|
| 6 |
+
# sort by id
|
| 7 |
+
references_dataset = references_dataset.sort('unique_id')
|
| 8 |
+
predictions_dataset = predictions_dataset.sort('unique_id')
|
| 9 |
+
|
| 10 |
+
# check that tokens match
|
| 11 |
+
assert(references_dataset['tokens']==predictions_dataset['tokens'])
|
| 12 |
+
|
| 13 |
+
# the lists have to be flattened
|
| 14 |
+
flat_ref_tags = np.concatenate(references_dataset[ref_col])
|
| 15 |
+
flat_pred_tags = np.concatenate(predictions_dataset[pred_col])
|
| 16 |
+
|
| 17 |
+
mcc_score = matthews_corrcoef(y_true=flat_ref_tags,
|
| 18 |
+
y_pred=flat_pred_tags)
|
| 19 |
+
|
| 20 |
+
return(mcc_score)
|
scoring-scripts/compute_seqeval.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_metric
|
| 2 |
+
from ast import literal_eval
|
| 3 |
+
def compute_seqeval(references_dataset, predictions_dataset, ref_col='ner_tags', pred_col='pred_ner_tags'):
|
| 4 |
+
# computes the seqeval scores
|
| 5 |
+
|
| 6 |
+
# sort by id
|
| 7 |
+
references_dataset = references_dataset.sort('unique_id')
|
| 8 |
+
predictions_dataset = predictions_dataset.sort('unique_id')
|
| 9 |
+
|
| 10 |
+
# load the huggingface metric function
|
| 11 |
+
seqeval = load_metric('seqeval')
|
| 12 |
+
|
| 13 |
+
# check that tokens match
|
| 14 |
+
assert(references_dataset['tokens']==predictions_dataset['tokens'])
|
| 15 |
+
|
| 16 |
+
# ensure IOB2?
|
| 17 |
+
|
| 18 |
+
# compute scores
|
| 19 |
+
seqeval_results = seqeval.compute(predictions = predictions_dataset[pred_col],
|
| 20 |
+
references = references_dataset[ref_col],
|
| 21 |
+
scheme = 'IOB2',
|
| 22 |
+
suffix = False,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
# change all values to regular (not numpy) floats (otherwise cannot be serialized to json)
|
| 26 |
+
seqeval_results = literal_eval(str(seqeval_results))
|
| 27 |
+
|
| 28 |
+
return(seqeval_results)
|