Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -31,6 +31,8 @@ def predict(context,question):
|
|
| 31 |
sparse_end_time = time.perf_counter()
|
| 32 |
sparse_duration = (sparse_end_time - sparse_start_time) * 1000
|
| 33 |
sparse_answer = sparse_predictions['answer']
|
|
|
|
|
|
|
| 34 |
|
| 35 |
# dense_start_time = time.perf_counter()
|
| 36 |
# dense_predictions = dense_qa_pipeline(context=context,question=question)
|
|
@@ -38,7 +40,7 @@ def predict(context,question):
|
|
| 38 |
# dense_duration = (dense_end_time - dense_start_time) * 1000
|
| 39 |
# dense_answer = dense_predictions['answer']
|
| 40 |
|
| 41 |
-
return sparse_answer,sparse_duration #,dense_answer,dense_duration
|
| 42 |
|
| 43 |
md = """This prediction model is designed to answer a question about a given input text--reading comprehension. The model does not just answer questions in general -- it only works from the text that you provide. However, automated reading comprehension can be a valuable task.
|
| 44 |
|
|
@@ -53,9 +55,10 @@ Author of Hugging Face Space: Benjamin Consolvo, AI Solutions Engineer Manager a
|
|
| 53 |
# predict()
|
| 54 |
context=gr.Text(lines=10,label="Context")
|
| 55 |
question=gr.Text(label="Question")
|
| 56 |
-
sparse_answer=gr.Text(label="
|
| 57 |
-
sparse_duration=gr.Text(label="
|
| 58 |
-
|
|
|
|
| 59 |
# dense_answer=gr.Text(label="Dense Answer")
|
| 60 |
# dense_duration=gr.Text(label="Dense latency (ms)")
|
| 61 |
|
|
@@ -66,7 +69,7 @@ iface = gr.Interface(
|
|
| 66 |
fn=predict,
|
| 67 |
inputs=[context,question],
|
| 68 |
# outputs=[sparse_answer,sparse_duration,dense_answer,dense_duration],
|
| 69 |
-
outputs=[sparse_answer,sparse_duration],
|
| 70 |
examples=[[apple_context,apple_question]],
|
| 71 |
title = "Question & Answer with Sparse BERT using the SQuAD dataset",
|
| 72 |
description = md,
|
|
|
|
| 31 |
sparse_end_time = time.perf_counter()
|
| 32 |
sparse_duration = (sparse_end_time - sparse_start_time) * 1000
|
| 33 |
sparse_answer = sparse_predictions['answer']
|
| 34 |
+
sparse_score = sparse_predictions['score']
|
| 35 |
+
sparse_start = sparse_predictions['start']
|
| 36 |
|
| 37 |
# dense_start_time = time.perf_counter()
|
| 38 |
# dense_predictions = dense_qa_pipeline(context=context,question=question)
|
|
|
|
| 40 |
# dense_duration = (dense_end_time - dense_start_time) * 1000
|
| 41 |
# dense_answer = dense_predictions['answer']
|
| 42 |
|
| 43 |
+
return sparse_answer,sparse_duration,sparse_score,sparse_start #,dense_answer,dense_duration
|
| 44 |
|
| 45 |
md = """This prediction model is designed to answer a question about a given input text--reading comprehension. The model does not just answer questions in general -- it only works from the text that you provide. However, automated reading comprehension can be a valuable task.
|
| 46 |
|
|
|
|
| 55 |
# predict()
|
| 56 |
context=gr.Text(lines=10,label="Context")
|
| 57 |
question=gr.Text(label="Question")
|
| 58 |
+
sparse_answer=gr.Text(label="Answer")
|
| 59 |
+
sparse_duration=gr.Text(label="Latency (ms)")
|
| 60 |
+
sparse_score=gr.Text(label="Probability score")
|
| 61 |
+
sparse_start=gr.Text(label="Starting character")
|
| 62 |
# dense_answer=gr.Text(label="Dense Answer")
|
| 63 |
# dense_duration=gr.Text(label="Dense latency (ms)")
|
| 64 |
|
|
|
|
| 69 |
fn=predict,
|
| 70 |
inputs=[context,question],
|
| 71 |
# outputs=[sparse_answer,sparse_duration,dense_answer,dense_duration],
|
| 72 |
+
outputs=[sparse_answer,sparse_score,sparse_start,sparse_duration],
|
| 73 |
examples=[[apple_context,apple_question]],
|
| 74 |
title = "Question & Answer with Sparse BERT using the SQuAD dataset",
|
| 75 |
description = md,
|