Spaces:
Runtime error
Runtime error
| #import requests | |
| import gradio as gr | |
| from gradio_client import Client | |
| from PIL import Image | |
| from io import BytesIO | |
| from diffusers import StableDiffusionUpscalePipeline | |
| import torch | |
| import os | |
| import requests | |
| HF_TOKEN = os.environ.get('HF_TOKEN') | |
| client_if = Client("ysharma/IF", hf_token=HF_TOKEN) | |
| client_pick = Client("yuvalkirstain/PickScore") | |
| # load upscaling model and scheduler | |
| model_id = "stabilityai/stable-diffusion-x4-upscaler" | |
| pipeline_upscale = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16) | |
| pipeline_upscale = pipeline_upscale.to("cuda") | |
| def get_IF_op(prompt, neg_prompt): | |
| print("inside get_IF_op") | |
| filepaths = client_if.predict(prompt, neg_prompt, 1,4,7.0, 'smart100',50, api_name="/generate64") | |
| folder_path = filepaths[0] | |
| file_list = os.listdir(folder_path) | |
| file_list = [os.path.join(folder_path, f) for f in file_list if f != 'captions.json'] | |
| print(f"^^file list is: {file_list}") | |
| return file_list | |
| def get_pickscores(prompt, image_tmps): | |
| print("inside get_pickscores") | |
| #Get the predictons | |
| probabilities1 = client_pick.predict(prompt, image_tmps[0], image_tmps[1], fn_index=0) | |
| probabilities2 = client_pick.predict(prompt, image_tmps[2], image_tmps[3], fn_index=0) | |
| probabilities_all = list(probabilities1) + list(probabilities2) | |
| max_score = max(probabilities_all) | |
| max_score_index = probabilities_all.index(max_score) | |
| best_match_image = image_tmps[max_score_index] | |
| return best_match_image | |
| def get_upscale_op(prompt, gallery_if): | |
| print("inside get_upscale_op") | |
| print(f"^^gallery_if is: {gallery_if}") | |
| image_tmps = [val['name'] for val in gallery_if] | |
| # get pickscores | |
| best_match_image = get_pickscores(prompt, image_tmps) | |
| # let's get the best pick! | |
| low_res_img = Image.open(best_match_image).convert("RGB") | |
| low_res_img = low_res_img.resize((128, 128)) | |
| # Upscaling the best pick | |
| upscaled_image = pipeline_upscale(prompt=prompt, image=low_res_img).images[0] | |
| #upscaled_image.save("upsampled.png") | |
| return upscaled_image | |
| theme = gr.themes.Monochrome( | |
| neutral_hue="cyan", | |
| radius_size="md", | |
| spacing_size="sm",) | |
| title = """<h1 align="center">🔥Gradio pipeline to use DeepFloyd IF more effectively!</h1><br> | |
| <h2 align="center">Demo build using <a href="https://huggingface.co/spaces/DeepFloyd/IF">DeeepFloyd IF</a> and <a href="https://huggingface.co/spaces/yuvalkirstain/PickScore">Pick-A-Pic PickScore</a> models.</h2> | |
| <h2 align="center">💪💪Gradio-Client library allows you to use gradio demo for these two cutting edge models as API endpoints</h2>""" | |
| description = """<br><br><h4>Steps to build this pipeline: | |
| - Duplicate the Deepfloyd IF Space to avoid queue | |
| - Create a Cient for this duplicated space using gradio python client | |
| - Generate intial 4-image gallery using the client and a prompt | |
| - Create a Client for PickScore Space using gradio python client | |
| - Feed the image Gallery into PickScore client | |
| - Generate Probabilities for images, choose the image with highest probability value and display it | |
| </h4>""" | |
| theme = gr.themes.Monochrome( | |
| neutral_hue="cyan", | |
| radius_size="md", | |
| spacing_size="sm",) | |
| title = """<h1 align="center">🔥Gradio pipeline to use DeepFloyd IF more effectively!</h1><br> | |
| <h2 align="center">Demo build using <a href="https://huggingface.co/spaces/DeepFloyd/IF">DeeepFloyd IF</a> and <a href="https://huggingface.co/spaces/yuvalkirstain/PickScore">Pick-A-Pic PickScore</a> models.</h2> | |
| <h2 align="center">💪💪Gradio-Client library allows you to use gradio demo for these two cutting edge models as API endpoints</h2>""" | |
| description = """<br><br><h4>Steps to build this pipeline: | |
| - Duplicate the Deepfloyd IF Space to avoid queue | |
| - Create a Cient for this duplicated space using gradio python client | |
| - Generate intial 4-image gallery using the client and a prompt | |
| - Create a Client for PickScore Space using gradio python client | |
| - Feed the image Gallery into PickScore client | |
| - Generate Probabilities for images, choose the image with highest probability value and display it | |
| </h4>""" | |
| with gr.Blocks(theme=theme) as demo: | |
| gr.HTML(title) | |
| gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/Effectively_Using_IF?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space to skip the queue and run in a private space</center>''') | |
| with gr.Row(variant='compact'): | |
| with gr.Column(scale=4): | |
| prompt = gr.Textbox(label='Prompt') | |
| neg_prompt = gr.Textbox(label='Negative Prompt') | |
| with gr.Column(scale=1): | |
| b1 = gr.Button("Generate 'IF' Output").style(full_width=True) | |
| with gr.Row(variant='compact'): | |
| with gr.Column(): | |
| gallery_if = gr.Gallery(label='IF Space outputs', ).style(columns=4, object_fit="contain", preview=True, height='auto') | |
| b2 = gr.Button("Get the best generation using Pick-A-Pic") | |
| image_picakapic = gr.Image(label="PickAPic Evaluated Output").style(height=450) | |
| gr.Markdown(description) | |
| b1.click(get_IF_op,[prompt, neg_prompt], gallery_if) | |
| prompt.submit(get_IF_op,[prompt, neg_prompt], gallery_if) | |
| b2.click(get_upscale_op,[prompt, gallery_if], image_picakapic) | |
| demo.queue(concurrency_count=2, max_size=10) | |
| demo.launch(debug=True) |