|
|
|
|
|
""" |
|
|
Elizabeth Training Manager |
|
|
Advanced training management with multiple training modes and robust monitoring |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import time |
|
|
import subprocess |
|
|
import signal |
|
|
import logging |
|
|
import json |
|
|
from datetime import datetime |
|
|
from pathlib import Path |
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
|
level=logging.INFO, |
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', |
|
|
handlers=[ |
|
|
logging.FileHandler('/workspace/elizabeth_logs/training_manager.log'), |
|
|
logging.StreamHandler(sys.stdout) |
|
|
] |
|
|
) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class TrainingManager: |
|
|
"""Advanced training management for Elizabeth""" |
|
|
|
|
|
def __init__(self): |
|
|
self.script_path = "/workspace/elizabeth-repo/src/elizabeth_main.py" |
|
|
self.max_restarts = 20 |
|
|
self.restart_delay = 30 |
|
|
self.process = None |
|
|
self.restart_count = 0 |
|
|
self.training_mode = "interactive" |
|
|
|
|
|
|
|
|
self.training_configs = { |
|
|
"interactive": { |
|
|
"args": ["--interactive", "--version", "v0.0.2"], |
|
|
"description": "Interactive session with human guidance" |
|
|
}, |
|
|
"autonomous": { |
|
|
"args": ["--interactive", "--version", "v0.0.2"], |
|
|
"description": "Fully autonomous learning mode" |
|
|
}, |
|
|
"learning": { |
|
|
"args": ["--version", "v0.0.2"], |
|
|
"input_file": "/workspace/training_data/learning_prompts.txt", |
|
|
"description": "Focused learning from training data" |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
os.makedirs("/workspace/elizabeth_logs", exist_ok=True) |
|
|
os.makedirs("/workspace/training_data", exist_ok=True) |
|
|
|
|
|
|
|
|
self.set_environment() |
|
|
|
|
|
def set_environment(self): |
|
|
"""Set training environment variables""" |
|
|
os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN", "") |
|
|
os.environ["HUGGINGFACE_HUB_ENABLE_HF_TRANSFER"] = "1" |
|
|
os.environ["PYTHONUNBUFFERED"] = "1" |
|
|
|
|
|
def start_training(self, mode="interactive"): |
|
|
"""Start training session with specified mode""" |
|
|
try: |
|
|
config = self.training_configs.get(mode, self.training_configs["interactive"]) |
|
|
|
|
|
logger.info(f"Starting {mode} training session...") |
|
|
logger.info(f"Description: {config['description']}") |
|
|
|
|
|
|
|
|
cmd = [sys.executable, self.script_path] + config["args"] |
|
|
|
|
|
|
|
|
stdin = None |
|
|
if "input_file" in config and os.path.exists(config["input_file"]): |
|
|
stdin = open(config["input_file"], "r") |
|
|
|
|
|
self.process = subprocess.Popen( |
|
|
cmd, |
|
|
stdout=subprocess.PIPE, |
|
|
stderr=subprocess.PIPE, |
|
|
text=True, |
|
|
bufsize=1, |
|
|
universal_newlines=True, |
|
|
stdin=stdin |
|
|
) |
|
|
|
|
|
logger.info(f"Training process started with PID: {self.process.pid}") |
|
|
logger.info(f"Command: {' '.join(cmd)}") |
|
|
|
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Failed to start {mode} training: {e}") |
|
|
return False |
|
|
|
|
|
def monitor_training(self, timeout=3600): |
|
|
"""Monitor training process with timeout""" |
|
|
start_time = time.time() |
|
|
|
|
|
try: |
|
|
while True: |
|
|
|
|
|
if time.time() - start_time > timeout: |
|
|
logger.warning(f"Training timeout after {timeout} seconds") |
|
|
return "timeout" |
|
|
|
|
|
|
|
|
return_code = self.process.poll() |
|
|
if return_code is not None: |
|
|
logger.info(f"Training process completed with code: {return_code}") |
|
|
return "completed" |
|
|
|
|
|
|
|
|
if self.process.stdout: |
|
|
output = self.process.stdout.readline() |
|
|
if output: |
|
|
logger.info(f"TRAINING_OUT: {output.strip()}") |
|
|
|
|
|
if self.process.stderr: |
|
|
error = self.process.stderr.readline() |
|
|
if error: |
|
|
logger.error(f"TRAINING_ERR: {error.strip()}") |
|
|
|
|
|
|
|
|
if time.time() - start_time > 300: |
|
|
|
|
|
try: |
|
|
os.kill(self.process.pid, 0) |
|
|
except OSError: |
|
|
logger.warning("Process appears to be unresponsive") |
|
|
return "stalled" |
|
|
|
|
|
time.sleep(1) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Monitoring error: {e}") |
|
|
return "error" |
|
|
|
|
|
def graceful_shutdown(self): |
|
|
"""Gracefully shutdown training""" |
|
|
if self.process: |
|
|
try: |
|
|
logger.info("Initiating graceful shutdown...") |
|
|
|
|
|
|
|
|
self.process.terminate() |
|
|
|
|
|
|
|
|
for i in range(10): |
|
|
if self.process.poll() is not None: |
|
|
break |
|
|
time.sleep(1) |
|
|
|
|
|
|
|
|
if self.process.poll() is None: |
|
|
logger.warning("Process not terminating, forcing kill...") |
|
|
self.process.kill() |
|
|
|
|
|
logger.info("Training shutdown complete") |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Shutdown error: {e}") |
|
|
|
|
|
def run_continuous_training(self): |
|
|
"""Main continuous training loop""" |
|
|
logger.info("🚀 Starting Elizabeth Continuous Training Manager") |
|
|
logger.info(f"Mode: {self.training_mode}") |
|
|
logger.info(f"Max restarts: {self.max_restarts}") |
|
|
|
|
|
training_sessions = [] |
|
|
|
|
|
while self.restart_count <= self.max_restarts: |
|
|
session_start = datetime.now() |
|
|
|
|
|
try: |
|
|
|
|
|
if not self.start_training(self.training_mode): |
|
|
logger.error("Failed to start training session") |
|
|
break |
|
|
|
|
|
|
|
|
result = self.monitor_training() |
|
|
session_end = datetime.now() |
|
|
duration = (session_end - session_start).total_seconds() |
|
|
|
|
|
|
|
|
session_info = { |
|
|
"start": session_start.isoformat(), |
|
|
"end": session_end.isoformat(), |
|
|
"duration": duration, |
|
|
"result": result, |
|
|
"restart_count": self.restart_count, |
|
|
"pid": self.process.pid if self.process else None |
|
|
} |
|
|
training_sessions.append(session_info) |
|
|
|
|
|
logger.info(f"Session completed: {result}, Duration: {duration:.1f}s") |
|
|
|
|
|
|
|
|
if result == "completed": |
|
|
logger.info("Training session completed successfully") |
|
|
break |
|
|
elif self.restart_count < self.max_restarts: |
|
|
self.restart_count += 1 |
|
|
logger.warning(f"Restarting training ({self.restart_count}/{self.max_restarts})...") |
|
|
logger.info(f"Waiting {self.restart_delay} seconds before restart...") |
|
|
|
|
|
|
|
|
self.save_session_history(training_sessions) |
|
|
|
|
|
time.sleep(self.restart_delay) |
|
|
else: |
|
|
logger.error("Max restart attempts reached") |
|
|
break |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
logger.info("Received interrupt signal") |
|
|
break |
|
|
except Exception as e: |
|
|
logger.error(f"Unexpected error: {e}") |
|
|
self.restart_count += 1 |
|
|
if self.restart_count <= self.max_restarts: |
|
|
logger.info(f"Restarting after error... ({self.restart_count}/{self.max_restarts})") |
|
|
time.sleep(self.restart_delay) |
|
|
else: |
|
|
break |
|
|
|
|
|
|
|
|
self.graceful_shutdown() |
|
|
self.save_session_history(training_sessions) |
|
|
|
|
|
logger.info("Training manager shutting down") |
|
|
logger.info(f"Total sessions: {len(training_sessions)}") |
|
|
logger.info(f"Total restarts: {self.restart_count}") |
|
|
|
|
|
def save_session_history(self, sessions): |
|
|
"""Save training session history""" |
|
|
try: |
|
|
history_file = "/workspace/elizabeth_logs/training_history.json" |
|
|
with open(history_file, 'w') as f: |
|
|
json.dump({ |
|
|
"sessions": sessions, |
|
|
"total_sessions": len(sessions), |
|
|
"total_restarts": self.restart_count, |
|
|
"last_update": datetime.now().isoformat() |
|
|
}, f, indent=2) |
|
|
except Exception as e: |
|
|
logger.error(f"Failed to save session history: {e}") |
|
|
|
|
|
def get_status(self): |
|
|
"""Get current status""" |
|
|
return { |
|
|
"training_mode": self.training_mode, |
|
|
"restart_count": self.restart_count, |
|
|
"max_restarts": self.max_restarts, |
|
|
"process_active": self.process and self.process.poll() is None, |
|
|
"process_pid": self.process.pid if self.process else None, |
|
|
"timestamp": datetime.now().isoformat() |
|
|
} |
|
|
|
|
|
def main(): |
|
|
"""Command line interface""" |
|
|
import argparse |
|
|
|
|
|
parser = argparse.ArgumentParser(description="Elizabeth Training Manager") |
|
|
parser.add_argument("--start", action="store_true", help="Start continuous training") |
|
|
parser.add_argument("--mode", choices=['interactive', 'autonomous', 'learning'], |
|
|
default='interactive', help="Training mode") |
|
|
parser.add_argument("--status", action="store_true", help="Show status") |
|
|
parser.add_argument("--stop", action="store_true", help="Stop training") |
|
|
parser.add_argument("--max-restarts", type=int, default=20, help="Max restart attempts") |
|
|
parser.add_argument("--restart-delay", type=int, default=30, help="Restart delay in seconds") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
manager = TrainingManager() |
|
|
manager.max_restarts = args.max_restarts |
|
|
manager.restart_delay = args.restart_delay |
|
|
manager.training_mode = args.mode |
|
|
|
|
|
if args.start: |
|
|
manager.run_continuous_training() |
|
|
elif args.status: |
|
|
status = manager.get_status() |
|
|
print("Training Manager Status:") |
|
|
for key, value in status.items(): |
|
|
print(f" {key}: {value}") |
|
|
elif args.stop: |
|
|
manager.graceful_shutdown() |
|
|
print("Shutdown signal sent") |
|
|
else: |
|
|
print("No action specified. Use --help for options.") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |