Spaces:
Runtime error
Runtime error
update desc and layout
Browse files
app.py
CHANGED
|
@@ -82,10 +82,12 @@ def predict(btn_upload, counter,image_hid, input, history):
|
|
| 82 |
return history, history, "uploaded_image.png", counter, image_hid
|
| 83 |
|
| 84 |
#Blocks Layout
|
| 85 |
-
with gr.Blocks(css="#chatbot-component .overflow-y-auto{height:500px}"
|
| 86 |
-
with gr.
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
| 89 |
style="
|
| 90 |
display: inline-flex;
|
| 91 |
align-items: center;
|
|
@@ -105,18 +107,17 @@ with gr.Blocks(css="#chatbot-component .overflow-y-auto{height:500px}") as demo:
|
|
| 105 |
This model was contributed by <a href="https://twitter.com/NielsRogge" target="_blank">nielsr</a>.
|
| 106 |
The BLIP-2 model was proposed in <a href="https://arxiv.org/abs/2301.12597" target="_blank">BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models</a>
|
| 107 |
by Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi.<br><br>
|
| 108 |
-
</p>
|
| 109 |
-
</div>""")
|
| 110 |
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
|
| 121 |
#Using Event Listeners
|
| 122 |
btn_upload.upload(predict, [btn_upload, counter_out, image_hid, text_in, state_in], [chatbot, state_in, text_out, counter_out, image_hid])
|
|
|
|
| 82 |
return history, history, "uploaded_image.png", counter, image_hid
|
| 83 |
|
| 84 |
#Blocks Layout
|
| 85 |
+
with gr.Blocks() as demo: #css="#chatbot-component .overflow-y-auto{height:500px}"
|
| 86 |
+
with gr.Row():
|
| 87 |
+
with gr.Column():
|
| 88 |
+
#with gr.Accordion("See details"):
|
| 89 |
+
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
| 90 |
+
<div
|
| 91 |
style="
|
| 92 |
display: inline-flex;
|
| 93 |
align-items: center;
|
|
|
|
| 107 |
This model was contributed by <a href="https://twitter.com/NielsRogge" target="_blank">nielsr</a>.
|
| 108 |
The BLIP-2 model was proposed in <a href="https://arxiv.org/abs/2301.12597" target="_blank">BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models</a>
|
| 109 |
by Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi.<br><br>
|
| 110 |
+
</p></div>""")
|
|
|
|
| 111 |
|
| 112 |
+
with gr.Column(elem_id = "column_container"):
|
| 113 |
+
#text_in = gr.Textbox(value='', placeholder="Type your questions here and press enter", elem_id = "input_prompt", visible=False, label='Great! Now you can ask questions to get more information about the image')
|
| 114 |
+
btn_upload = gr.UploadButton("Upload image!", file_types=["image"], file_count="single", elem_id="upload_button")
|
| 115 |
+
chatbot = gr.Chatbot(elem_id = 'chatbot-component', label='Conversational with Images')
|
| 116 |
+
text_in = gr.Textbox(value='', placeholder="Type your questions here and press enter", elem_id = "input_prompt", visible=False, label='Great! Now you can ask questions to get more information about the image')
|
| 117 |
+
state_in = gr.State()
|
| 118 |
+
counter_out = gr.Number(visible=False, value=0, precision=0)
|
| 119 |
+
text_out = gr.Textbox(visible=False) #getting imag name out
|
| 120 |
+
image_hid = gr.Image(visible=False) #, type='pil')
|
| 121 |
|
| 122 |
#Using Event Listeners
|
| 123 |
btn_upload.upload(predict, [btn_upload, counter_out, image_hid, text_in, state_in], [chatbot, state_in, text_out, counter_out, image_hid])
|