Spaces:
Runtime error
Runtime error
update
Browse files- app.py +11 -11
- inference.py +0 -22
- teaser.jpg +0 -0
app.py
CHANGED
|
@@ -24,9 +24,6 @@ import gradio as gr
|
|
| 24 |
import torch
|
| 25 |
|
| 26 |
from inference import inference_fn
|
| 27 |
-
# from inference_custom_diffusion import InferencePipeline
|
| 28 |
-
# from trainer import Trainer
|
| 29 |
-
# from uploader import upload
|
| 30 |
|
| 31 |
|
| 32 |
# def parse_args() -> argparse.Namespace:
|
|
@@ -49,9 +46,12 @@ It is recommended to upgrade to GPU in Settings after duplicating this space to
|
|
| 49 |
DETAILDESCRIPTION='''
|
| 50 |
ReVersion
|
| 51 |
'''
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
| 55 |
# DETAILDESCRIPTION='''
|
| 56 |
# Custom Diffusion allows you to fine-tune text-to-image diffusion models, such as Stable Diffusion, given a few images of a new concept (~4-20).
|
| 57 |
# We fine-tune only a subset of model parameters, namely key and value projection matrices, in the cross-attention layers and the modifier token used to represent the object.
|
|
@@ -124,16 +124,16 @@ def create_inference_demo(func: inference_fn) -> gr.Blocks:
|
|
| 124 |
# placeholder='Example: "<R>"')
|
| 125 |
|
| 126 |
with gr.Accordion('Other Parameters', open=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
guidance_scale = gr.Slider(label='Classifier-Free Guidance Scale',
|
| 128 |
minimum=0,
|
| 129 |
maximum=50,
|
| 130 |
step=0.1,
|
| 131 |
value=7.5)
|
| 132 |
-
num_samples = gr.Slider(label='Number of Images to Generate',
|
| 133 |
-
minimum=0,
|
| 134 |
-
maximum=10.,
|
| 135 |
-
step=1,
|
| 136 |
-
value=10)
|
| 137 |
ddim_steps = gr.Slider(label='Number of DDIM Sampling Steps',
|
| 138 |
minimum=10,
|
| 139 |
maximum=100,
|
|
|
|
| 24 |
import torch
|
| 25 |
|
| 26 |
from inference import inference_fn
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
# def parse_args() -> argparse.Namespace:
|
|
|
|
| 46 |
DETAILDESCRIPTION='''
|
| 47 |
ReVersion
|
| 48 |
'''
|
| 49 |
+
DETAILDESCRIPTION='''
|
| 50 |
+
ReVersion: <R> represents the learned text token for a relation. Use <R> in your prompt for relation-specific generation.
|
| 51 |
+
<center>
|
| 52 |
+
<img src="teaser.jpg" width="600" align="center">
|
| 53 |
+
</center>
|
| 54 |
+
'''
|
| 55 |
# DETAILDESCRIPTION='''
|
| 56 |
# Custom Diffusion allows you to fine-tune text-to-image diffusion models, such as Stable Diffusion, given a few images of a new concept (~4-20).
|
| 57 |
# We fine-tune only a subset of model parameters, namely key and value projection matrices, in the cross-attention layers and the modifier token used to represent the object.
|
|
|
|
| 124 |
# placeholder='Example: "<R>"')
|
| 125 |
|
| 126 |
with gr.Accordion('Other Parameters', open=False):
|
| 127 |
+
num_samples = gr.Slider(label='Number of Images to Generate',
|
| 128 |
+
minimum=4,
|
| 129 |
+
maximum=8,
|
| 130 |
+
step=2,
|
| 131 |
+
value=6)
|
| 132 |
guidance_scale = gr.Slider(label='Classifier-Free Guidance Scale',
|
| 133 |
minimum=0,
|
| 134 |
maximum=50,
|
| 135 |
step=0.1,
|
| 136 |
value=7.5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
ddim_steps = gr.Slider(label='Number of DDIM Sampling Steps',
|
| 138 |
minimum=10,
|
| 139 |
maximum=100,
|
inference.py
CHANGED
|
@@ -54,12 +54,6 @@ def inference_fn(
|
|
| 54 |
pipe = StableDiffusionPipeline.from_pretrained(os.path.join('experiments', model_id),torch_dtype=torch.float16).to('cuda')
|
| 55 |
else:
|
| 56 |
pipe = StableDiffusionPipeline.from_pretrained(os.path.join('experiments', model_id)).to('cpu')
|
| 57 |
-
# # make directory to save images
|
| 58 |
-
# image_root_folder = os.path.join('experiments', model_id, 'inference')
|
| 59 |
-
# os.makedirs(image_root_folder, exist_ok = True)
|
| 60 |
-
|
| 61 |
-
# if prompt is None and args.template_name is None:
|
| 62 |
-
# raise ValueError("please input a single prompt through'--prompt' or select a batch of prompts using '--template_name'.")
|
| 63 |
|
| 64 |
# single text prompt
|
| 65 |
if prompt is not None:
|
|
@@ -67,33 +61,17 @@ def inference_fn(
|
|
| 67 |
else:
|
| 68 |
prompt_list = []
|
| 69 |
|
| 70 |
-
# if args.template_name is not None:
|
| 71 |
-
# # read the selected text prompts for generation
|
| 72 |
-
# prompt_list.extend(inference_templates[args.template_name])
|
| 73 |
-
|
| 74 |
for prompt in prompt_list:
|
| 75 |
# insert relation prompt <R>
|
| 76 |
# prompt = prompt.lower().replace("<r>", "<R>").format(placeholder_string)
|
| 77 |
prompt = prompt.lower().replace("<r>", "<R>").format("<R>")
|
| 78 |
|
| 79 |
-
|
| 80 |
-
# # make sub-folder
|
| 81 |
-
# image_folder = os.path.join(image_root_folder, prompt, 'samples')
|
| 82 |
-
# os.makedirs(image_folder, exist_ok = True)
|
| 83 |
-
|
| 84 |
# batch generation
|
| 85 |
images = pipe(prompt, num_inference_steps=ddim_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_samples).images
|
| 86 |
|
| 87 |
-
# # save generated images
|
| 88 |
-
# for idx, image in enumerate(images):
|
| 89 |
-
# image_name = f"{str(idx).zfill(4)}.png"
|
| 90 |
-
# image_path = os.path.join(image_folder, image_name)
|
| 91 |
-
# image.save(image_path)
|
| 92 |
-
|
| 93 |
# save a grid of images
|
| 94 |
image_grid = make_image_grid(images, rows=2, cols=math.ceil(num_samples/2))
|
| 95 |
print(image_grid)
|
| 96 |
-
# image_grid_path = os.path.join(image_root_folder, prompt, f'{prompt}.png')
|
| 97 |
|
| 98 |
return image_grid
|
| 99 |
|
|
|
|
| 54 |
pipe = StableDiffusionPipeline.from_pretrained(os.path.join('experiments', model_id),torch_dtype=torch.float16).to('cuda')
|
| 55 |
else:
|
| 56 |
pipe = StableDiffusionPipeline.from_pretrained(os.path.join('experiments', model_id)).to('cpu')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
# single text prompt
|
| 59 |
if prompt is not None:
|
|
|
|
| 61 |
else:
|
| 62 |
prompt_list = []
|
| 63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
for prompt in prompt_list:
|
| 65 |
# insert relation prompt <R>
|
| 66 |
# prompt = prompt.lower().replace("<r>", "<R>").format(placeholder_string)
|
| 67 |
prompt = prompt.lower().replace("<r>", "<R>").format("<R>")
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
# batch generation
|
| 70 |
images = pipe(prompt, num_inference_steps=ddim_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_samples).images
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
# save a grid of images
|
| 73 |
image_grid = make_image_grid(images, rows=2, cols=math.ceil(num_samples/2))
|
| 74 |
print(image_grid)
|
|
|
|
| 75 |
|
| 76 |
return image_grid
|
| 77 |
|
teaser.jpg
ADDED
|