Spaces:
Runtime error
Runtime error
enabled blip large
Browse files
app.py
CHANGED
|
@@ -11,8 +11,8 @@ torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/0000000397
|
|
| 11 |
torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
|
| 12 |
torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
|
| 13 |
|
| 14 |
-
git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
|
| 15 |
-
git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
|
| 16 |
|
| 17 |
# git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
|
| 18 |
# git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
|
|
@@ -20,11 +20,11 @@ git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
|
|
| 20 |
# git_processor_large_textcaps = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps")
|
| 21 |
# git_model_large_textcaps = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
|
| 22 |
|
| 23 |
-
blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 24 |
-
blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
|
| 29 |
# blip2_processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
|
| 30 |
# blip2_model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
|
|
@@ -43,11 +43,11 @@ blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-
|
|
| 43 |
|
| 44 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 45 |
|
| 46 |
-
git_model_base.to(device)
|
| 47 |
-
blip_model_base.to(device)
|
| 48 |
# git_model_large_coco.to(device)
|
| 49 |
# git_model_large_textcaps.to(device)
|
| 50 |
-
|
| 51 |
# vitgpt_model.to(device)
|
| 52 |
# coca_model.to(device)
|
| 53 |
# blip2_model.to(device)
|
|
@@ -76,15 +76,15 @@ def generate_caption_coca(model, transform, image):
|
|
| 76 |
|
| 77 |
|
| 78 |
def generate_captions(image):
|
| 79 |
-
caption_git_base = generate_caption(git_processor_base, git_model_base, image)
|
| 80 |
|
| 81 |
# caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
|
| 82 |
|
| 83 |
# caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image)
|
| 84 |
|
| 85 |
-
caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
|
| 86 |
|
| 87 |
-
|
| 88 |
|
| 89 |
# caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
|
| 90 |
|
|
@@ -95,22 +95,22 @@ def generate_captions(image):
|
|
| 95 |
# caption_blip2_8_bit = generate_caption(blip2_processor_8_bit, blip2_model_8_bit, image, use_float_16=True).strip()
|
| 96 |
|
| 97 |
# return caption_git_large_coco, caption_git_large_textcaps, caption_blip_large, caption_coca, caption_blip2_8_bit
|
| 98 |
-
return
|
| 99 |
|
| 100 |
|
| 101 |
|
| 102 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|
| 103 |
# outputs = [gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"), gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by CoCa"), gr.outputs.Textbox(label="Caption generated by BLIP-2 OPT 6.7b")]
|
| 104 |
outputs = [
|
| 105 |
-
gr.outputs.Textbox(label="Caption generated by GIT-base fine-tuned on COCO"),
|
| 106 |
# gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"),
|
| 107 |
# gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"),
|
| 108 |
-
gr.outputs.Textbox(label="Caption generated by BLIP-base"),
|
| 109 |
-
|
| 110 |
# gr.outputs.Textbox(label="Caption generated by vitgpt")
|
| 111 |
]
|
| 112 |
|
| 113 |
-
title = "Interactive demo:
|
| 114 |
description = "Gradio Demo to compare GIT, BLIP, CoCa, and BLIP-2, 4 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
|
| 115 |
article = "<p style='text-align: center'><a href='https://huggingface.co/docs/transformers/main/model_doc/blip' target='_blank'>BLIP docs</a> | <a href='https://huggingface.co/docs/transformers/main/model_doc/git' target='_blank'>GIT docs</a></p>"
|
| 116 |
|
|
|
|
| 11 |
torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
|
| 12 |
torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
|
| 13 |
|
| 14 |
+
# git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
|
| 15 |
+
# git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
|
| 16 |
|
| 17 |
# git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
|
| 18 |
# git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
|
|
|
|
| 20 |
# git_processor_large_textcaps = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps")
|
| 21 |
# git_model_large_textcaps = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
|
| 22 |
|
| 23 |
+
# blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 24 |
+
# blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 25 |
|
| 26 |
+
blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 27 |
+
blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 28 |
|
| 29 |
# blip2_processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
|
| 30 |
# blip2_model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
|
|
|
|
| 43 |
|
| 44 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 45 |
|
| 46 |
+
# git_model_base.to(device)
|
| 47 |
+
# blip_model_base.to(device)
|
| 48 |
# git_model_large_coco.to(device)
|
| 49 |
# git_model_large_textcaps.to(device)
|
| 50 |
+
blip_model_large.to(device)
|
| 51 |
# vitgpt_model.to(device)
|
| 52 |
# coca_model.to(device)
|
| 53 |
# blip2_model.to(device)
|
|
|
|
| 76 |
|
| 77 |
|
| 78 |
def generate_captions(image):
|
| 79 |
+
# caption_git_base = generate_caption(git_processor_base, git_model_base, image)
|
| 80 |
|
| 81 |
# caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
|
| 82 |
|
| 83 |
# caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image)
|
| 84 |
|
| 85 |
+
# caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
|
| 86 |
|
| 87 |
+
caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
|
| 88 |
|
| 89 |
# caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
|
| 90 |
|
|
|
|
| 95 |
# caption_blip2_8_bit = generate_caption(blip2_processor_8_bit, blip2_model_8_bit, image, use_float_16=True).strip()
|
| 96 |
|
| 97 |
# return caption_git_large_coco, caption_git_large_textcaps, caption_blip_large, caption_coca, caption_blip2_8_bit
|
| 98 |
+
return caption_blip_large
|
| 99 |
|
| 100 |
|
| 101 |
|
| 102 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|
| 103 |
# outputs = [gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"), gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by CoCa"), gr.outputs.Textbox(label="Caption generated by BLIP-2 OPT 6.7b")]
|
| 104 |
outputs = [
|
| 105 |
+
# gr.outputs.Textbox(label="Caption generated by GIT-base fine-tuned on COCO"),
|
| 106 |
# gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"),
|
| 107 |
# gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"),
|
| 108 |
+
# gr.outputs.Textbox(label="Caption generated by BLIP-base"),
|
| 109 |
+
gr.outputs.Textbox(label="Caption generated by BLIP-large"),
|
| 110 |
# gr.outputs.Textbox(label="Caption generated by vitgpt")
|
| 111 |
]
|
| 112 |
|
| 113 |
+
title = "Interactive demo: blip-large"
|
| 114 |
description = "Gradio Demo to compare GIT, BLIP, CoCa, and BLIP-2, 4 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
|
| 115 |
article = "<p style='text-align: center'><a href='https://huggingface.co/docs/transformers/main/model_doc/blip' target='_blank'>BLIP docs</a> | <a href='https://huggingface.co/docs/transformers/main/model_doc/git' target='_blank'>GIT docs</a></p>"
|
| 116 |
|