File size: 3,447 Bytes
c53e4c1
 
75c9c9a
 
 
 
 
6f523af
f2d7035
c53e4c1
f2d7035
c53e4c1
 
f2d7035
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c53e4c1
f2d7035
 
 
 
aaaa3df
 
 
 
f2d7035
 
 
 
41380c4
 
75c9c9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f523af
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import torch
from app.logger_config import logger as logging
import hmac
import hashlib
import base64
import os
import time
import random

def debug_current_device():
    """Safely logs GPU or CPU information without crashing on stateless GPU."""
    logging.debug("=== Debugging current device ===")

    try:
        if torch.cuda.is_available():
            device_name = torch.cuda.get_device_name(0)
            memory_allocated = torch.cuda.memory_allocated(0) / (1024 ** 2)
            memory_reserved = torch.cuda.memory_reserved(0) / (1024 ** 2)
            memory_total = torch.cuda.get_device_properties(0).total_memory / (1024 ** 2)
            capability = torch.cuda.get_device_capability(0)
            current_device = torch.cuda.current_device()
            logging.debug(f"GPU name          : {device_name}")
            logging.debug(f"Current device ID : {current_device}")
            logging.debug(f"CUDA capability   : {capability}")
            logging.debug(f"Memory allocated  : {memory_allocated:.2f} MB")
            logging.debug(f"Memory reserved   : {memory_reserved:.2f} MB")
            logging.debug(f"Total memory      : {memory_total:.2f} MB")
        else:
            logging.debug("No GPU detected, running on CPU")

    except RuntimeError as e:
        # Handles Hugging Face Spaces “Stateless GPU” restriction
        if "CUDA must not be initialized" in str(e):
            logging.warning("⚠️ Skipping CUDA info: Stateless GPU environment detected.")
        else:
            logging.error(f"Unexpected CUDA error: {e}")


def get_current_device():
    """Returns the current device safely."""
    try:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        device_name = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU"
        torch.tensor([0], dtype=torch.float32, device=device)
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            logging.debug("GPU cache cleared")
    except RuntimeError as e:
        if "CUDA must not be initialized" in str(e):
            device = torch.device("cpu")
            device_name = "CPU (stateless GPU mode)"
        # else:
        #     raise
    return device, device_name



def generate_coturn_config():
    """
    Génère une configuration Coturn complète avec authentification dynamique (use-auth-secret).
    Returns:
        dict: Objet coturn_config prêt à être utilisé côté client WebRTC.
    """

    secret_key = os.getenv("TURN_SECRET_KEY", "your_secret_key")
    ttl = int(os.getenv("TURN_TTL", 3600))
    turn_url = os.getenv("TURN_URL", "turn:*******")
    turn_s_url = os.getenv("TURN_S_URL", "turns:*****")
    user = os.getenv("TURN_USER", "client")

    timestamp = int(time.time()) + ttl
    username = f"{timestamp}:{user}"
    password = base64.b64encode(
        hmac.new(secret_key.encode(), username.encode(), hashlib.sha1).digest()
    ).decode()
 
    coturn_config = {
        "iceServers": [
            {
                "urls": [
                    f"{turn_url}",
                    f"{turn_s_url}",
                ],
                "username": username,
                "credential": password,
            }
        ]
    }
    return coturn_config






def raise_function():
    """Raise an error randomly (1 out of 10 times)."""
    if random.randint(1, 50) == 1:
        raise RuntimeError("Random failure triggered!")