Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,675 Bytes
75c9c9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
from app.logger_config import logger as logging
from app.utils import (
debug_current_device,
get_current_device
)
import os
import gradio as gr
import spaces
import torch
@spaces.GPU
def gpu_compute(name):
logging.debug("=== Start of gpu_compute() ===")
debug_current_device()
tensor,device_name = compute(name)
logging.debug("=== End of gpu_compute() ===")
return f"Tensor: {tensor.cpu().numpy()} | Device: {device_name}"
def cpu_compute(name):
logging.debug("=== Start of cpu_compute() ===")
debug_current_device()
tensor,device_name = compute(name)
logging.debug("=== End of cpu_compute() ===")
return f"Tensor: {tensor.cpu().numpy()} | Device: {device_name}"
def compute(name) :
# Get device info
device, device_name = get_current_device()
# Create a tensor
tensor = torch.tensor([len(name)], dtype=torch.float32, device=device)
logging.debug(f"Tensor created: {tensor}")
# Optional: free GPU memory
if torch.cuda.is_available():
torch.cuda.empty_cache()
logging.debug("GPU cache cleared")
return tensor, device_name
block = gr.Blocks()
with block as demo:
with gr.Row():
input_text = gr.Text()
output_text = gr.Text()
with gr.Row():
gpu_button = gr.Button("GPU compute")
cpu_button = gr.Button("CPU compute")
gpu_button.click(fn=gpu_compute, inputs=[input_text],outputs=[output_text])
cpu_button.click(fn=cpu_compute, inputs=[input_text],outputs=[output_text])
with gr.Blocks() as demo:
block.render()
if __name__ == "__main__":
demo.queue(max_size=10, api_open=False).launch(show_api=False) |