File size: 7,378 Bytes
c3efd49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
"""Checkpoint management for training."""
import torch
import json
from pathlib import Path
from typing import Dict, Any, Optional, List
from datetime import datetime
import logging

logger = logging.getLogger(__name__)


class CheckpointManager:
    """
    Manages model checkpoints during training.
    
    Handles saving, loading, and cleanup of checkpoints.
    """
    
    def __init__(
        self,
        checkpoint_dir: str = "checkpoints",
        max_checkpoints: int = 5,
        save_interval: int = 10
    ):
        """
        Initialize checkpoint manager.
        
        Args:
            checkpoint_dir: Directory to save checkpoints
            max_checkpoints: Maximum number of checkpoints to keep
            save_interval: Save checkpoint every N episodes
        """
        self.checkpoint_dir = Path(checkpoint_dir)
        self.checkpoint_dir.mkdir(parents=True, exist_ok=True)
        
        self.max_checkpoints = max_checkpoints
        self.save_interval = save_interval
        
        self.checkpoint_history = []
        
        logger.info(f"CheckpointManager initialized: dir={checkpoint_dir}, max={max_checkpoints}, interval={save_interval}")
    
    def should_save(self, episode: int) -> bool:
        """
        Check if checkpoint should be saved at this episode.
        
        Args:
            episode: Current episode number
        
        Returns:
            True if should save checkpoint
        """
        if episode == 0:
            return False
        
        return episode % self.save_interval == 0
    
    def save_checkpoint(
        self,
        model,
        episode: int,
        metrics: Optional[Dict[str, Any]] = None,
        is_best: bool = False
    ) -> str:
        """
        Save a checkpoint.
        
        Args:
            model: Model to save
            episode: Current episode number
            metrics: Optional training metrics
            is_best: Whether this is the best model so far
        
        Returns:
            Path to saved checkpoint
        """
        # Create checkpoint filename
        if is_best:
            filename = "best_model.pt"
        else:
            filename = f"checkpoint_episode_{episode}.pt"
        
        checkpoint_path = self.checkpoint_dir / filename
        
        # Prepare metadata
        metadata = {
            'episode': episode,
            'timestamp': datetime.now().isoformat(),
            'is_best': is_best
        }
        
        if metrics:
            metadata['metrics'] = metrics
        
        # Save checkpoint
        model.save_checkpoint(str(checkpoint_path), metadata=metadata)
        
        # Record in history
        self.checkpoint_history.append({
            'path': str(checkpoint_path),
            'episode': episode,
            'timestamp': metadata['timestamp'],
            'is_best': is_best
        })
        
        logger.info(f"Checkpoint saved: {checkpoint_path}")
        
        # Cleanup old checkpoints
        if not is_best:
            self._cleanup_old_checkpoints()
        
        return str(checkpoint_path)
    
    def load_checkpoint(
        self,
        model,
        checkpoint_path: Optional[str] = None,
        load_best: bool = False
    ) -> Dict[str, Any]:
        """
        Load a checkpoint.
        
        Args:
            model: Model to load checkpoint into
            checkpoint_path: Optional specific checkpoint path
            load_best: If True, load best model
        
        Returns:
            Checkpoint metadata
        """
        if load_best:
            checkpoint_path = str(self.checkpoint_dir / "best_model.pt")
        elif checkpoint_path is None:
            # Load most recent checkpoint
            checkpoint_path = self._get_latest_checkpoint()
            if checkpoint_path is None:
                raise FileNotFoundError("No checkpoints found")
        
        metadata = model.load_checkpoint(checkpoint_path)
        
        logger.info(f"Checkpoint loaded: {checkpoint_path}")
        logger.info(f"Episode: {metadata.get('episode', 'unknown')}")
        
        return metadata
    
    def _get_latest_checkpoint(self) -> Optional[str]:
        """
        Get path to most recent checkpoint.
        
        Returns:
            Path to latest checkpoint or None
        """
        checkpoints = sorted(
            self.checkpoint_dir.glob("checkpoint_episode_*.pt"),
            key=lambda p: p.stat().st_mtime,
            reverse=True
        )
        
        if checkpoints:
            return str(checkpoints[0])
        
        return None
    
    def _cleanup_old_checkpoints(self) -> None:
        """Remove old checkpoints, keeping only the most recent N."""
        # Get all episode checkpoints (not best model)
        checkpoints = sorted(
            self.checkpoint_dir.glob("checkpoint_episode_*.pt"),
            key=lambda p: p.stat().st_mtime,
            reverse=True
        )
        
        # Remove old checkpoints
        if len(checkpoints) > self.max_checkpoints:
            for old_checkpoint in checkpoints[self.max_checkpoints:]:
                old_checkpoint.unlink()
                logger.debug(f"Removed old checkpoint: {old_checkpoint}")
    
    def list_checkpoints(self) -> List[Dict[str, Any]]:
        """
        List all available checkpoints.
        
        Returns:
            List of checkpoint information
        """
        checkpoints = []
        
        for checkpoint_file in self.checkpoint_dir.glob("*.pt"):
            stat = checkpoint_file.stat()
            checkpoints.append({
                'path': str(checkpoint_file),
                'name': checkpoint_file.name,
                'size_mb': stat.st_size / (1024 * 1024),
                'modified': datetime.fromtimestamp(stat.st_mtime).isoformat()
            })
        
        return sorted(checkpoints, key=lambda x: x['modified'], reverse=True)
    
    def get_checkpoint_history(self) -> List[Dict[str, Any]]:
        """
        Get checkpoint history.
        
        Returns:
            List of checkpoint records
        """
        return self.checkpoint_history
    
    def save_training_state(
        self,
        state: Dict[str, Any],
        filename: str = "training_state.json"
    ) -> None:
        """
        Save training state to JSON.
        
        Args:
            state: Training state dictionary
            filename: Output filename
        """
        state_path = self.checkpoint_dir / filename
        
        with open(state_path, 'w') as f:
            json.dump(state, f, indent=2)
        
        logger.info(f"Training state saved: {state_path}")
    
    def load_training_state(
        self,
        filename: str = "training_state.json"
    ) -> Dict[str, Any]:
        """
        Load training state from JSON.
        
        Args:
            filename: State filename
        
        Returns:
            Training state dictionary
        """
        state_path = self.checkpoint_dir / filename
        
        if not state_path.exists():
            raise FileNotFoundError(f"Training state not found: {state_path}")
        
        with open(state_path, 'r') as f:
            state = json.load(f)
        
        logger.info(f"Training state loaded: {state_path}")
        
        return state