Spaces:
Runtime error
Runtime error
Remove wrong model id
Browse files
app.py
CHANGED
|
@@ -12,29 +12,7 @@ from cool_models import make_models
|
|
| 12 |
|
| 13 |
help_text = """"""
|
| 14 |
|
| 15 |
-
example_instructions = [
|
| 16 |
-
"Make it a picasso painting",
|
| 17 |
-
"as if it were by modigliani",
|
| 18 |
-
"convert to a bronze statue",
|
| 19 |
-
"Turn it into an anime.",
|
| 20 |
-
"have it look like a graphic novel",
|
| 21 |
-
"make him gain weight",
|
| 22 |
-
"what would he look like bald?",
|
| 23 |
-
"Have him smile",
|
| 24 |
-
"Put him in a cocktail party.",
|
| 25 |
-
"move him at the beach.",
|
| 26 |
-
"add dramatic lighting",
|
| 27 |
-
"Convert to black and white",
|
| 28 |
-
"What if it were snowing?",
|
| 29 |
-
"Give him a leather jacket",
|
| 30 |
-
"Turn him into a cyborg!",
|
| 31 |
-
"make him wear a beanie",
|
| 32 |
-
]
|
| 33 |
-
|
| 34 |
-
model_id = "timbrooks/instruct-pix2pix"
|
| 35 |
-
|
| 36 |
def main():
|
| 37 |
-
# pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None).to("cuda")
|
| 38 |
segmodel, model, diffusion, ldm, bert, clip_model, model_params = make_models()
|
| 39 |
|
| 40 |
def generate(
|
|
@@ -61,7 +39,6 @@ def main():
|
|
| 61 |
from_text, instruction, negative_prompt, input_image.convert('RGB'), seed, guidance_scale, clip_guidance_scale, cutn, l2_sim_lambda
|
| 62 |
)
|
| 63 |
|
| 64 |
-
# edited_image = input_image
|
| 65 |
return [seed, edited_image_1]
|
| 66 |
|
| 67 |
def reset():
|
|
@@ -75,10 +52,7 @@ def main():
|
|
| 75 |
RDM: Region-Aware Diffusion for Zero-shot Text-driven Image Editing
|
| 76 |
</h1>
|
| 77 |
<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
|
| 78 |
-
|
| 79 |
-
<a href="https://huggingface.co/spaces/timbrooks/instruct-pix2pix?duplicate=true">
|
| 80 |
-
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
| 81 |
-
<p/>""")
|
| 82 |
with gr.Row():
|
| 83 |
with gr.Column(scale=1, min_width=100):
|
| 84 |
generate_button = gr.Button("Generate")
|
|
|
|
| 12 |
|
| 13 |
help_text = """"""
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
def main():
|
|
|
|
| 16 |
segmodel, model, diffusion, ldm, bert, clip_model, model_params = make_models()
|
| 17 |
|
| 18 |
def generate(
|
|
|
|
| 39 |
from_text, instruction, negative_prompt, input_image.convert('RGB'), seed, guidance_scale, clip_guidance_scale, cutn, l2_sim_lambda
|
| 40 |
)
|
| 41 |
|
|
|
|
| 42 |
return [seed, edited_image_1]
|
| 43 |
|
| 44 |
def reset():
|
|
|
|
| 52 |
RDM: Region-Aware Diffusion for Zero-shot Text-driven Image Editing
|
| 53 |
</h1>
|
| 54 |
<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
|
| 55 |
+
</p>""")
|
|
|
|
|
|
|
|
|
|
| 56 |
with gr.Row():
|
| 57 |
with gr.Column(scale=1, min_width=100):
|
| 58 |
generate_button = gr.Button("Generate")
|