Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import spaces
|
| 2 |
import contextlib
|
| 3 |
import gc
|
| 4 |
import json
|
|
@@ -173,7 +173,7 @@ examples = [
|
|
| 173 |
global pipeline
|
| 174 |
global MultiResNetModel
|
| 175 |
|
| 176 |
-
|
| 177 |
def load_ckpt(input_style):
|
| 178 |
global pipeline
|
| 179 |
global MultiResNetModel
|
|
@@ -264,7 +264,7 @@ cur_input_style = "GrayImage(ScreenStyle)"
|
|
| 264 |
load_ckpt(cur_input_style)
|
| 265 |
cur_input_style = None
|
| 266 |
|
| 267 |
-
|
| 268 |
def fix_random_seeds(seed):
|
| 269 |
random.seed(seed)
|
| 270 |
np.random.seed(seed)
|
|
@@ -280,7 +280,7 @@ def process_multi_images(files):
|
|
| 280 |
imgs.append(img)
|
| 281 |
return imgs
|
| 282 |
|
| 283 |
-
|
| 284 |
def extract_lines(image):
|
| 285 |
src = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
|
| 286 |
|
|
@@ -305,7 +305,7 @@ def extract_lines(image):
|
|
| 305 |
torch.cuda.empty_cache()
|
| 306 |
return outimg
|
| 307 |
|
| 308 |
-
|
| 309 |
def to_screen_image(input_image):
|
| 310 |
global opt
|
| 311 |
global ScreenModel
|
|
@@ -321,7 +321,7 @@ def to_screen_image(input_image):
|
|
| 321 |
torch.cuda.empty_cache()
|
| 322 |
return image_pil
|
| 323 |
|
| 324 |
-
|
| 325 |
def extract_line_image(query_image_, input_style, resolution):
|
| 326 |
if resolution == "640x640":
|
| 327 |
tar_width = 640
|
|
@@ -348,7 +348,7 @@ def extract_line_image(query_image_, input_style, resolution):
|
|
| 348 |
torch.cuda.empty_cache()
|
| 349 |
return input_context, extracted_line, input_context
|
| 350 |
|
| 351 |
-
|
| 352 |
def colorize_image(VAE_input, input_context, reference_images, resolution, seed, input_style, num_inference_steps):
|
| 353 |
if VAE_input is None or input_context is None:
|
| 354 |
gr.Info("Please preprocess the image first")
|
|
@@ -543,4 +543,4 @@ with gr.Blocks() as demo:
|
|
| 543 |
# )
|
| 544 |
|
| 545 |
|
| 546 |
-
demo.launch()
|
|
|
|
| 1 |
+
#import spaces
|
| 2 |
import contextlib
|
| 3 |
import gc
|
| 4 |
import json
|
|
|
|
| 173 |
global pipeline
|
| 174 |
global MultiResNetModel
|
| 175 |
|
| 176 |
+
#@spaces.GPU
|
| 177 |
def load_ckpt(input_style):
|
| 178 |
global pipeline
|
| 179 |
global MultiResNetModel
|
|
|
|
| 264 |
load_ckpt(cur_input_style)
|
| 265 |
cur_input_style = None
|
| 266 |
|
| 267 |
+
#@spaces.GPU
|
| 268 |
def fix_random_seeds(seed):
|
| 269 |
random.seed(seed)
|
| 270 |
np.random.seed(seed)
|
|
|
|
| 280 |
imgs.append(img)
|
| 281 |
return imgs
|
| 282 |
|
| 283 |
+
#@spaces.GPU
|
| 284 |
def extract_lines(image):
|
| 285 |
src = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
|
| 286 |
|
|
|
|
| 305 |
torch.cuda.empty_cache()
|
| 306 |
return outimg
|
| 307 |
|
| 308 |
+
#@spaces.GPU
|
| 309 |
def to_screen_image(input_image):
|
| 310 |
global opt
|
| 311 |
global ScreenModel
|
|
|
|
| 321 |
torch.cuda.empty_cache()
|
| 322 |
return image_pil
|
| 323 |
|
| 324 |
+
#@spaces.GPU
|
| 325 |
def extract_line_image(query_image_, input_style, resolution):
|
| 326 |
if resolution == "640x640":
|
| 327 |
tar_width = 640
|
|
|
|
| 348 |
torch.cuda.empty_cache()
|
| 349 |
return input_context, extracted_line, input_context
|
| 350 |
|
| 351 |
+
#@spaces.GPU(duration=180)
|
| 352 |
def colorize_image(VAE_input, input_context, reference_images, resolution, seed, input_style, num_inference_steps):
|
| 353 |
if VAE_input is None or input_context is None:
|
| 354 |
gr.Info("Please preprocess the image first")
|
|
|
|
| 543 |
# )
|
| 544 |
|
| 545 |
|
| 546 |
+
demo.launch(share = True)
|