Archime's picture
add logging
b5c329a
raw
history blame
2 kB
from app.logger_config import logger as logging
import os
import gradio as gr
import spaces
import torch
logging.info("-----------info------------")
logging.debug("-----------debug------------")
@spaces.GPU(progress=gr.Progress(track_tqdm=True))
def greet(name):
logging.debug("=== Start of greet() ===")
# Check GPU availability
has_gpu = torch.cuda.is_available()
logging.debug(f"GPU available: {has_gpu}")
if has_gpu:
device = torch.device("cuda")
device_name = torch.cuda.get_device_name(0)
memory_allocated = torch.cuda.memory_allocated(0) / (1024 ** 2)
memory_reserved = torch.cuda.memory_reserved(0) / (1024 ** 2)
memory_total = torch.cuda.get_device_properties(0).total_memory / (1024 ** 2)
capability = torch.cuda.get_device_capability(0)
current_device = torch.cuda.current_device()
# Detailed GPU logs
logging.debug(f"GPU name : {device_name}")
logging.debug(f"Current device ID : {current_device}")
logging.debug(f"CUDA capability : {capability}")
logging.debug(f"Memory allocated : {memory_allocated:.2f} MB")
logging.debug(f"Memory reserved : {memory_reserved:.2f} MB")
logging.debug(f"Total memory : {memory_total:.2f} MB")
else:
device = torch.device("cpu")
device_name = "CPU (no GPU detected)"
logging.debug("No GPU detected, using CPU")
# Create tensor
tensor = torch.tensor([len(name)], dtype=torch.float32, device=device)
logging.debug(f"Tensor created: {tensor}")
# Optional GPU cleanup
if has_gpu:
torch.cuda.empty_cache()
logging.debug("GPU cache cleared")
logging.debug("=== End of greet() ===")
return f"Tensor: {tensor.cpu().numpy()} | Device: {device_name}"
demo = gr.Interface(
fn=greet,
inputs="text",
outputs="text",
title="GPU Info Demo",
description="Retourne un tenseur et le nom du GPU utilisé"
)
demo.launch()