Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -76,7 +76,7 @@ def run_comparison(iterations: int, seed: int, use_deterministic: bool, device:
|
|
| 76 |
"""
|
| 77 |
|
| 78 |
# Set device environment variable for subprocess
|
| 79 |
-
# On Hugging Face Spaces
|
| 80 |
if device == "cuda":
|
| 81 |
try:
|
| 82 |
import torch
|
|
@@ -85,18 +85,21 @@ def run_comparison(iterations: int, seed: int, use_deterministic: bool, device:
|
|
| 85 |
try:
|
| 86 |
# Try to get device name to verify GPU works
|
| 87 |
gpu_name = torch.cuda.get_device_name(0)
|
| 88 |
-
|
|
|
|
|
|
|
| 89 |
except Exception as e:
|
| 90 |
-
print(f"⚠️ GPU detection failed: {e}
|
| 91 |
-
|
|
|
|
| 92 |
else:
|
| 93 |
-
print("⚠️ CUDA not available,
|
| 94 |
device = "cpu"
|
| 95 |
except ImportError:
|
| 96 |
-
print("⚠️ PyTorch not available,
|
| 97 |
device = "cpu"
|
| 98 |
except Exception as e:
|
| 99 |
-
print(f"⚠️ GPU check error: {e},
|
| 100 |
device = "cpu"
|
| 101 |
|
| 102 |
# Set environment variable for subprocess to pick up
|
|
@@ -168,25 +171,33 @@ def run_comparison(iterations: int, seed: int, use_deterministic: bool, device:
|
|
| 168 |
|
| 169 |
|
| 170 |
def check_gpu():
|
| 171 |
-
"""Check if GPU is available."""
|
| 172 |
try:
|
| 173 |
import torch
|
|
|
|
|
|
|
| 174 |
if torch.cuda.is_available():
|
| 175 |
try:
|
| 176 |
gpu_name = torch.cuda.get_device_name(0)
|
| 177 |
gpu_count = torch.cuda.device_count()
|
| 178 |
-
|
|
|
|
| 179 |
except Exception as e:
|
| 180 |
-
|
|
|
|
| 181 |
else:
|
| 182 |
-
#
|
| 183 |
if os.getenv("SPACE_ID"):
|
| 184 |
-
|
| 185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
except ImportError:
|
| 187 |
return "⚠️ PyTorch not installed"
|
| 188 |
except Exception as e:
|
| 189 |
-
return f"⚠️
|
| 190 |
|
| 191 |
|
| 192 |
# Create Gradio interface
|
|
@@ -240,10 +251,10 @@ with gr.Blocks(title="MentorFlow - Strategy Comparison") as demo:
|
|
| 240 |
)
|
| 241 |
|
| 242 |
device = gr.Radio(
|
| 243 |
-
choices=["
|
| 244 |
-
value="
|
| 245 |
label="Device",
|
| 246 |
-
info="
|
| 247 |
)
|
| 248 |
|
| 249 |
with gr.Column():
|
|
|
|
| 76 |
"""
|
| 77 |
|
| 78 |
# Set device environment variable for subprocess
|
| 79 |
+
# On Hugging Face Spaces with GPU, try to use CUDA
|
| 80 |
if device == "cuda":
|
| 81 |
try:
|
| 82 |
import torch
|
|
|
|
| 85 |
try:
|
| 86 |
# Try to get device name to verify GPU works
|
| 87 |
gpu_name = torch.cuda.get_device_name(0)
|
| 88 |
+
gpu_count = torch.cuda.device_count()
|
| 89 |
+
print(f"✅ GPU available: {gpu_name} (Count: {gpu_count})")
|
| 90 |
+
# Keep device as "cuda"
|
| 91 |
except Exception as e:
|
| 92 |
+
print(f"⚠️ GPU detection failed: {e}")
|
| 93 |
+
print(" Attempting to use CUDA anyway (may work)...")
|
| 94 |
+
# Don't fallback immediately - let it try
|
| 95 |
else:
|
| 96 |
+
print("⚠️ CUDA not available, falling back to CPU")
|
| 97 |
device = "cpu"
|
| 98 |
except ImportError:
|
| 99 |
+
print("⚠️ PyTorch not available, falling back to CPU")
|
| 100 |
device = "cpu"
|
| 101 |
except Exception as e:
|
| 102 |
+
print(f"⚠️ GPU check error: {e}, falling back to CPU")
|
| 103 |
device = "cpu"
|
| 104 |
|
| 105 |
# Set environment variable for subprocess to pick up
|
|
|
|
| 171 |
|
| 172 |
|
| 173 |
def check_gpu():
|
| 174 |
+
"""Check if GPU is available on Hugging Face Spaces."""
|
| 175 |
try:
|
| 176 |
import torch
|
| 177 |
+
|
| 178 |
+
# Check CUDA availability
|
| 179 |
if torch.cuda.is_available():
|
| 180 |
try:
|
| 181 |
gpu_name = torch.cuda.get_device_name(0)
|
| 182 |
gpu_count = torch.cuda.device_count()
|
| 183 |
+
cuda_version = torch.version.cuda
|
| 184 |
+
return f"✅ GPU Available: {gpu_name} (Count: {gpu_count}, CUDA: {cuda_version})"
|
| 185 |
except Exception as e:
|
| 186 |
+
# GPU might be available but not immediately accessible
|
| 187 |
+
return f"✅ GPU Detected (accessing: {str(e)[:50]}...)"
|
| 188 |
else:
|
| 189 |
+
# On Hugging Face Spaces, check environment
|
| 190 |
if os.getenv("SPACE_ID"):
|
| 191 |
+
# Check if GPU hardware is allocated
|
| 192 |
+
hf_hardware = os.getenv("SPACE_HARDWARE", "unknown")
|
| 193 |
+
if "gpu" in hf_hardware.lower() or "t4" in hf_hardware.lower() or "l4" in hf_hardware.lower():
|
| 194 |
+
return f"⚠️ GPU Hardware ({hf_hardware}) allocated but not accessible yet. Try running anyway."
|
| 195 |
+
return f"⚠️ No GPU on this Space (hardware: {hf_hardware}). Please configure GPU tier."
|
| 196 |
+
return "⚠️ No GPU available, will use CPU"
|
| 197 |
except ImportError:
|
| 198 |
return "⚠️ PyTorch not installed"
|
| 199 |
except Exception as e:
|
| 200 |
+
return f"⚠️ GPU check error: {str(e)}"
|
| 201 |
|
| 202 |
|
| 203 |
# Create Gradio interface
|
|
|
|
| 251 |
)
|
| 252 |
|
| 253 |
device = gr.Radio(
|
| 254 |
+
choices=["cuda", "cpu"],
|
| 255 |
+
value="cuda", # Default to GPU for HF Spaces with Nvidia 4xL4
|
| 256 |
label="Device",
|
| 257 |
+
info="GPU (cuda) recommended for Nvidia 4xL4, CPU fallback available"
|
| 258 |
)
|
| 259 |
|
| 260 |
with gr.Column():
|