text
stringlengths 1
93.6k
|
|---|
fns = []
|
for i in range(len(real_annots)):
|
if len(real_annots[i]) == 0:
|
continue
|
tp = set(real_annots[i]).intersection(set(pred_annots[i]))
|
fp = pred_annots[i] - tp
|
fn = real_annots[i] - tp
|
for go_id in fp:
|
mi += go.get_ic(go_id)
|
for go_id in fn:
|
ru += go.get_ic(go_id)
|
fps.append(fp)
|
fns.append(fn)
|
tpn = len(tp)
|
fpn = len(fp)
|
fnn = len(fn)
|
total += 1
|
recall = tpn / (1.0 * (tpn + fnn))
|
r += recall
|
if len(pred_annots[i]) > 0:
|
p_total += 1
|
precision = tpn / (1.0 * (tpn + fpn))
|
p += precision
|
ru /= total
|
mi /= total
|
r /= total
|
if p_total > 0:
|
p /= p_total
|
f = 0.0
|
if p + r > 0:
|
f = 2 * p * r / (p + r)
|
s = math.sqrt(ru * ru + mi * mi)
|
return f, p, r, s, ru, mi, fps, fns
|
if __name__ == '__main__':
|
main()
|
# <FILESEP>
|
from pickle import load
|
from utils.model import *
|
from utils.load_data import loadTrainData, loadValData, data_generator
|
from tensorflow.keras.callbacks import ModelCheckpoint
|
from config import config, rnnConfig
|
import random
|
# Setting random seed for reproducibility of results
|
random.seed(config['random_seed'])
|
"""
|
*Some simple checking
|
"""
|
assert type(config['num_of_epochs']) is int, 'Please provide an integer value for `num_of_epochs` parameter in config.py file'
|
assert type(config['max_length']) is int, 'Please provide an integer value for `max_length` parameter in config.py file'
|
assert type(config['batch_size']) is int, 'Please provide an integer value for `batch_size` parameter in config.py file'
|
assert type(config['beam_search_k']) is int, 'Please provide an integer value for `beam_search_k` parameter in config.py file'
|
assert type(config['random_seed']) is int, 'Please provide an integer value for `random_seed` parameter in config.py file'
|
assert type(rnnConfig['embedding_size']) is int, 'Please provide an integer value for `embedding_size` parameter in config.py file'
|
assert type(rnnConfig['LSTM_units']) is int, 'Please provide an integer value for `LSTM_units` parameter in config.py file'
|
assert type(rnnConfig['dense_units']) is int, 'Please provide an integer value for `dense_units` parameter in config.py file'
|
assert type(rnnConfig['dropout']) is float, 'Please provide a float value for `dropout` parameter in config.py file'
|
"""
|
*Load Data
|
*X1 : Image features
|
*X2 : Text features(Captions)
|
"""
|
X1train, X2train, max_length = loadTrainData(config)
|
X1val, X2val = loadValData(config)
|
"""
|
*Load the tokenizer
|
"""
|
tokenizer = load(open(config['tokenizer_path'], 'rb'))
|
vocab_size = len(tokenizer.word_index) + 1
|
"""
|
*Now that we have the image features from CNN model, we need to feed them to a RNN Model.
|
*Define the RNN model
|
"""
|
# model = RNNModel(vocab_size, max_length, rnnConfig, config['model_type'])
|
model = AlternativeRNNModel(vocab_size, max_length, rnnConfig, config['model_type'])
|
print('RNN Model (Decoder) Summary : ')
|
print(model.summary())
|
"""
|
*Train the model save after each epoch
|
"""
|
num_of_epochs = config['num_of_epochs']
|
batch_size = config['batch_size']
|
steps_train = len(X2train)//batch_size
|
if len(X2train)%batch_size!=0:
|
steps_train = steps_train+1
|
steps_val = len(X2val)//batch_size
|
if len(X2val)%batch_size!=0:
|
steps_val = steps_val+1
|
model_save_path = config['model_data_path']+"model_"+str(config['model_type'])+"_epoch-{epoch:02d}_train_loss-{loss:.4f}_val_loss-{val_loss:.4f}.hdf5"
|
checkpoint = ModelCheckpoint(model_save_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
|
callbacks = [checkpoint]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.