Archime commited on
Commit
b5c329a
·
1 Parent(s): c984053

add logging

Browse files
Files changed (5) hide show
  1. .gitignore +5 -0
  2. app.py +40 -9
  3. app/__init__.py +0 -0
  4. app/logger_config.py +59 -0
  5. requirements.txt +2 -1
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ .env
2
+ .vscode/settings.json
3
+ app/__pycache__/
4
+ __pycache__/
5
+ logs/app.log
app.py CHANGED
@@ -1,23 +1,54 @@
 
 
1
  import gradio as gr
2
  import spaces
3
  import torch
4
 
5
- @spaces.GPU
 
 
 
 
 
6
  def greet(name):
7
- # Détection du GPU
8
- if torch.cuda.is_available():
9
- device_name = torch.cuda.get_device_name(0)
 
 
 
 
10
  device = torch.device("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  else:
12
- device_name = "CPU (aucun GPU détecté)"
13
  device = torch.device("cpu")
 
 
14
 
15
- # Crée un tenseur sur GPU ou CPU
16
  tensor = torch.tensor([len(name)], dtype=torch.float32, device=device)
 
 
 
 
 
 
17
 
18
- # Retourne une info lisible
19
- result = f"Tensor: {tensor.cpu().numpy()} | Device: {device_name}"
20
- return result
21
 
22
  demo = gr.Interface(
23
  fn=greet,
 
1
+ from app.logger_config import logger as logging
2
+ import os
3
  import gradio as gr
4
  import spaces
5
  import torch
6
 
7
+
8
+
9
+ logging.info("-----------info------------")
10
+ logging.debug("-----------debug------------")
11
+
12
+ @spaces.GPU(progress=gr.Progress(track_tqdm=True))
13
  def greet(name):
14
+ logging.debug("=== Start of greet() ===")
15
+
16
+ # Check GPU availability
17
+ has_gpu = torch.cuda.is_available()
18
+ logging.debug(f"GPU available: {has_gpu}")
19
+
20
+ if has_gpu:
21
  device = torch.device("cuda")
22
+ device_name = torch.cuda.get_device_name(0)
23
+ memory_allocated = torch.cuda.memory_allocated(0) / (1024 ** 2)
24
+ memory_reserved = torch.cuda.memory_reserved(0) / (1024 ** 2)
25
+ memory_total = torch.cuda.get_device_properties(0).total_memory / (1024 ** 2)
26
+ capability = torch.cuda.get_device_capability(0)
27
+ current_device = torch.cuda.current_device()
28
+
29
+ # Detailed GPU logs
30
+ logging.debug(f"GPU name : {device_name}")
31
+ logging.debug(f"Current device ID : {current_device}")
32
+ logging.debug(f"CUDA capability : {capability}")
33
+ logging.debug(f"Memory allocated : {memory_allocated:.2f} MB")
34
+ logging.debug(f"Memory reserved : {memory_reserved:.2f} MB")
35
+ logging.debug(f"Total memory : {memory_total:.2f} MB")
36
  else:
 
37
  device = torch.device("cpu")
38
+ device_name = "CPU (no GPU detected)"
39
+ logging.debug("No GPU detected, using CPU")
40
 
41
+ # Create tensor
42
  tensor = torch.tensor([len(name)], dtype=torch.float32, device=device)
43
+ logging.debug(f"Tensor created: {tensor}")
44
+
45
+ # Optional GPU cleanup
46
+ if has_gpu:
47
+ torch.cuda.empty_cache()
48
+ logging.debug("GPU cache cleared")
49
 
50
+ logging.debug("=== End of greet() ===")
51
+ return f"Tensor: {tensor.cpu().numpy()} | Device: {device_name}"
 
52
 
53
  demo = gr.Interface(
54
  fn=greet,
app/__init__.py ADDED
File without changes
app/logger_config.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv, find_dotenv
3
+ load_dotenv(find_dotenv())
4
+ # logger_config.py
5
+ import logging
6
+ from logging.handlers import RotatingFileHandler
7
+ import os
8
+ # from nemo.utils.nemo_logging import Logger
9
+
10
+ # nemo_logger = Logger()
11
+ # nemo_logger.remove_stream_handlers()
12
+
13
+ DEBUG = os.getenv("DEBUG", "false").lower() == "true"
14
+ print(DEBUG)
15
+ # Create "logs" directory if it doesn't exist
16
+ os.makedirs("logs", exist_ok=True)
17
+
18
+ logger = logging.getLogger("app_logger") # unique name
19
+ logger.setLevel(logging.DEBUG if DEBUG else logging.INFO)
20
+
21
+ # Avoid duplicate handlers
22
+ if not logger.handlers:
23
+ formatter = logging.Formatter("%(asctime)s | %(levelname)s | %(module)s | %(message)s")
24
+
25
+
26
+ # Console handler
27
+ console_handler = logging.StreamHandler()
28
+ console_handler.setLevel(logging.DEBUG if DEBUG else logging.INFO)
29
+ console_handler.setFormatter(formatter)
30
+ logger.addHandler(console_handler)
31
+
32
+ # File handler
33
+ file_handler = RotatingFileHandler("logs/app.log", maxBytes=2 * 1024 * 1024, backupCount=5)
34
+ file_handler.setLevel(logging.DEBUG if DEBUG else logging.INFO)
35
+ file_handler.setFormatter(formatter)
36
+ logger.addHandler(file_handler)
37
+
38
+ # Startup message
39
+ logger.debug("Logger initialized (DEBUG)") if DEBUG else logger.info("Logger initialized (Production Mode)")
40
+ # --- Logger Silencing Function ---
41
+
42
+ def silence_logger(logger_name: str | None, level=logging.ERROR):
43
+ """Sets the log level and removes StreamHandlers for a given logger."""
44
+ target_logger = logging.getLogger(logger_name)
45
+ target_logger.setLevel(level)
46
+
47
+ for handler in target_logger.handlers[:]:
48
+ if isinstance(handler, logging.StreamHandler):
49
+ target_logger.removeHandler(handler)
50
+
51
+ # Ensure messages don't propagate to the parent (root) logger
52
+ target_logger.propagate = False
53
+
54
+ # silence_logger(None, logging.CRITICAL)
55
+ # logger.propagate = True
56
+
57
+ # silence_logger("nemo_logger", logging.CRITICAL)
58
+ # silence_logger("pytorch_lightning", logging.ERROR)
59
+
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  gradio
2
  spaces
3
- torch
 
 
1
  gradio
2
  spaces
3
+ torch
4
+ python-dotenv