|
|
|
|
|
""" |
|
|
Elizabeth Training Monitor |
|
|
Monitors training processes and automatically restarts if needed |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import time |
|
|
import subprocess |
|
|
import signal |
|
|
import logging |
|
|
from datetime import datetime |
|
|
from pathlib import Path |
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
|
level=logging.INFO, |
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', |
|
|
handlers=[ |
|
|
logging.FileHandler('/workspace/elizabeth_logs/training_monitor.log'), |
|
|
logging.StreamHandler(sys.stdout) |
|
|
] |
|
|
) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class TrainingMonitor: |
|
|
"""Monitor and manage Elizabeth training processes""" |
|
|
|
|
|
def __init__(self): |
|
|
self.training_script = "/workspace/elizabeth-repo/src/elizabeth_main.py" |
|
|
self.max_restarts = 10 |
|
|
self.restart_delay = 30 |
|
|
self.process = None |
|
|
self.restart_count = 0 |
|
|
|
|
|
|
|
|
os.makedirs("/workspace/elizabeth_logs", exist_ok=True) |
|
|
|
|
|
def start_training(self): |
|
|
"""Start the training process""" |
|
|
try: |
|
|
logger.info("Starting Elizabeth training session...") |
|
|
|
|
|
|
|
|
self.process = subprocess.Popen( |
|
|
[ |
|
|
sys.executable, self.training_script, |
|
|
"--interactive", |
|
|
"--version", "v0.0.2" |
|
|
], |
|
|
stdout=subprocess.PIPE, |
|
|
stderr=subprocess.PIPE, |
|
|
text=True, |
|
|
bufsize=1, |
|
|
universal_newlines=True |
|
|
) |
|
|
|
|
|
logger.info(f"Training process started with PID: {self.process.pid}") |
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Failed to start training: {e}") |
|
|
return False |
|
|
|
|
|
def monitor_process(self): |
|
|
"""Monitor the training process and handle output""" |
|
|
try: |
|
|
|
|
|
while True: |
|
|
if self.process.stdout: |
|
|
output = self.process.stdout.readline() |
|
|
if output: |
|
|
logger.info(f"TRAINING: {output.strip()}") |
|
|
|
|
|
if self.process.stderr: |
|
|
error = self.process.stderr.readline() |
|
|
if error: |
|
|
logger.error(f"TRAINING_ERROR: {error.strip()}") |
|
|
|
|
|
|
|
|
return_code = self.process.poll() |
|
|
if return_code is not None: |
|
|
logger.warning(f"Training process exited with code: {return_code}") |
|
|
return return_code |
|
|
|
|
|
time.sleep(1) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Monitoring error: {e}") |
|
|
return -1 |
|
|
|
|
|
def graceful_shutdown(self): |
|
|
"""Gracefully shutdown the training process""" |
|
|
if self.process: |
|
|
try: |
|
|
logger.info("Sending graceful shutdown signal...") |
|
|
self.process.terminate() |
|
|
|
|
|
|
|
|
for _ in range(10): |
|
|
if self.process.poll() is not None: |
|
|
break |
|
|
time.sleep(1) |
|
|
|
|
|
|
|
|
if self.process.poll() is None: |
|
|
logger.warning("Process not terminating, forcing kill...") |
|
|
self.process.kill() |
|
|
|
|
|
logger.info("Training process shutdown complete") |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Shutdown error: {e}") |
|
|
|
|
|
def run_monitoring_loop(self): |
|
|
"""Main monitoring loop with automatic restarts""" |
|
|
logger.info("🚀 Starting Elizabeth Training Monitor") |
|
|
logger.info(f"Max restarts: {self.max_restarts}") |
|
|
logger.info(f"Restart delay: {self.restart_delay}s") |
|
|
|
|
|
while self.restart_count <= self.max_restarts: |
|
|
try: |
|
|
|
|
|
if not self.start_training(): |
|
|
logger.error("Failed to start training process") |
|
|
break |
|
|
|
|
|
|
|
|
return_code = self.monitor_process() |
|
|
|
|
|
|
|
|
if return_code == 0: |
|
|
logger.info("Training completed successfully") |
|
|
break |
|
|
elif self.restart_count < self.max_restarts: |
|
|
self.restart_count += 1 |
|
|
logger.warning(f"Restarting training ({self.restart_count}/{self.max_restarts})...") |
|
|
logger.info(f"Waiting {self.restart_delay} seconds before restart...") |
|
|
time.sleep(self.restart_delay) |
|
|
else: |
|
|
logger.error("Max restart attempts reached") |
|
|
break |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
logger.info("Received interrupt signal, shutting down...") |
|
|
break |
|
|
except Exception as e: |
|
|
logger.error(f"Unexpected error in monitoring loop: {e}") |
|
|
self.restart_count += 1 |
|
|
if self.restart_count <= self.max_restarts: |
|
|
logger.info(f"Restarting after error... ({self.restart_count}/{self.max_restarts})") |
|
|
time.sleep(self.restart_delay) |
|
|
else: |
|
|
break |
|
|
|
|
|
|
|
|
self.graceful_shutdown() |
|
|
logger.info("Training monitor shutting down") |
|
|
|
|
|
def get_status(self): |
|
|
"""Get current monitoring status""" |
|
|
return { |
|
|
"restart_count": self.restart_count, |
|
|
"max_restarts": self.max_restarts, |
|
|
"process_active": self.process and self.process.poll() is None, |
|
|
"process_pid": self.process.pid if self.process else None, |
|
|
"timestamp": datetime.now().isoformat() |
|
|
} |
|
|
|
|
|
def main(): |
|
|
"""Command line interface""" |
|
|
import argparse |
|
|
|
|
|
parser = argparse.ArgumentParser(description="Elizabeth Training Monitor") |
|
|
parser.add_argument("--start", action="store_true", help="Start monitoring") |
|
|
parser.add_argument("--status", action="store_true", help="Show status") |
|
|
parser.add_argument("--stop", action="store_true", help="Stop monitoring") |
|
|
parser.add_argument("--max-restarts", type=int, default=10, help="Max restart attempts") |
|
|
parser.add_argument("--restart-delay", type=int, default=30, help="Restart delay in seconds") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
monitor = TrainingMonitor() |
|
|
monitor.max_restarts = args.max_restarts |
|
|
monitor.restart_delay = args.restart_delay |
|
|
|
|
|
if args.start: |
|
|
monitor.run_monitoring_loop() |
|
|
elif args.status: |
|
|
status = monitor.get_status() |
|
|
print("Training Monitor Status:") |
|
|
for key, value in status.items(): |
|
|
print(f" {key}: {value}") |
|
|
elif args.stop: |
|
|
monitor.graceful_shutdown() |
|
|
print("Shutdown signal sent") |
|
|
else: |
|
|
print("No action specified. Use --help for options.") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |