Spaces:
Build error
Build error
| import gradio as gr | |
| import os | |
| import io | |
| import requests, json | |
| from PIL import Image | |
| import base64 | |
| from dotenv import load_dotenv, find_dotenv | |
| _ = load_dotenv(find_dotenv()) # read local .env file | |
| hf_api_key = os.environ['HF_API_KEY'] | |
| # Text-to-image endpoint | |
| def get_completion(inputs, parameters=None, ENDPOINT_URL=os.environ['HF_API_TTI_STABILITY_AI']): | |
| headers = { | |
| "Authorization": f"Bearer {hf_api_key}", | |
| "Content-Type": "application/json" | |
| } | |
| data = {"inputs": inputs} | |
| if parameters is not None: | |
| data.update({"parameters": parameters}) | |
| response = requests.post(ENDPOINT_URL, headers=headers, data=json.dumps(data)) | |
| # Check the content type of the response | |
| content_type = response.headers.get('Content-Type', '') | |
| print(content_type) | |
| if 'application/json' in content_type: | |
| return json.loads(response.content.decode("utf-8")) | |
| elif 'image/' in content_type: | |
| return response.content # return raw image data | |
| response.raise_for_status() # raise an error for unexpected content types | |
| #A helper function to convert the PIL image to base64 | |
| # so you can send it to the API | |
| def base64_to_pil(img_base64): | |
| base64_decoded = base64.b64decode(img_base64) | |
| byte_stream = io.BytesIO(base64_decoded) | |
| pil_image = Image.open(byte_stream) | |
| return pil_image | |
| def generate(prompt, negative_prompt, steps, guidance, width, height): | |
| params = { | |
| "negative_prompt": negative_prompt, | |
| "num_inference_steps": steps, | |
| "guidance_scale": guidance, | |
| "width": width, | |
| "height": height | |
| } | |
| output = get_completion(prompt,params) | |
| # Check if the output is an image (bytes) or JSON (dict) | |
| if isinstance(output, dict): | |
| raise ValueError("Expected an image but received JSON: {}".format(output)) | |
| # If output is raw image data, convert it to a PIL image | |
| result_image = Image.open(io.BytesIO(output)) | |
| return result_image | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Image Generation with stable-diffusion-xl-base-1.0") | |
| with gr.Row(): | |
| with gr.Column(scale=4): | |
| prompt = gr.Textbox(label="Your prompt") #Give prompt some real estate | |
| with gr.Column(scale=1, min_width=50): | |
| btn = gr.Button("Submit") #Submit button side by side! | |
| with gr.Accordion("Advanced options", open=False): #Let's hide the advanced options! | |
| negative_prompt = gr.Textbox(label="Negative prompt") | |
| with gr.Row(): | |
| with gr.Column(): | |
| steps = gr.Slider(label="Inference Steps", minimum=1, maximum=100, value=25, | |
| info="In many steps will the denoiser denoise the image?") | |
| guidance = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, value=7, | |
| info="Controls how much the text prompt influences the result") | |
| with gr.Column(): | |
| width = gr.Slider(label="Width", minimum=64, maximum=1024, step=64, value=512) | |
| height = gr.Slider(label="Height", minimum=64, maximum=1024, step=64, value=512) | |
| output = gr.Image(label="Result") #Move the output up too | |
| btn.click(fn=generate, inputs=[prompt,negative_prompt,steps,guidance,width,height], outputs=[output]) | |
| gr.close_all() | |
| demo.launch(share=True) |