id
stringlengths 6
6
| text
stringlengths 20
17.2k
| title
stringclasses 1
value |
|---|---|---|
271824
|
in(
self,
trainer=None,
**kwargs,
):
"""
Trains the model using the specified dataset and training configuration.
This method facilitates model training with a range of customizable settings. It supports training with a
custom trainer or the default training approach. The method handles scenarios such as resuming training
from a checkpoint, integrating with Ultralytics HUB, and updating model and configuration after training.
When using Ultralytics HUB, if the session has a loaded model, the method prioritizes HUB training
arguments and warns if local arguments are provided. It checks for pip updates and combines default
configurations, method-specific defaults, and user-provided arguments to configure the training process.
Args:
trainer (BaseTrainer | None): Custom trainer instance for model training. If None, uses default.
**kwargs (Any): Arbitrary keyword arguments for training configuration. Common options include:
data (str): Path to dataset configuration file.
epochs (int): Number of training epochs.
batch_size (int): Batch size for training.
imgsz (int): Input image size.
device (str): Device to run training on (e.g., 'cuda', 'cpu').
workers (int): Number of worker threads for data loading.
optimizer (str): Optimizer to use for training.
lr0 (float): Initial learning rate.
patience (int): Epochs to wait for no observable improvement for early stopping of training.
Returns:
(Dict | None): Training metrics if available and training is successful; otherwise, None.
Raises:
AssertionError: If the model is not a PyTorch model.
PermissionError: If there is a permission issue with the HUB session.
ModuleNotFoundError: If the HUB SDK is not installed.
Examples:
>>> model = YOLO("yolo11n.pt")
>>> results = model.train(data="coco8.yaml", epochs=3)
"""
self._check_is_pytorch_model()
if hasattr(self.session, "model") and self.session.model.id: # Ultralytics HUB session with loaded model
if any(kwargs):
LOGGER.warning("WARNING ⚠️ using HUB training arguments, ignoring local training arguments.")
kwargs = self.session.train_args # overwrite kwargs
checks.check_pip_update_available()
overrides = yaml_load(checks.check_yaml(kwargs["cfg"])) if kwargs.get("cfg") else self.overrides
custom = {
# NOTE: handle the case when 'cfg' includes 'data'.
"data": overrides.get("data") or DEFAULT_CFG_DICT["data"] or TASK2DATA[self.task],
"model": self.overrides["model"],
"task": self.task,
} # method defaults
args = {**overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
if args.get("resume"):
args["resume"] = self.ckpt_path
self.trainer = (trainer or self._smart_load("trainer"))(overrides=args, _callbacks=self.callbacks)
if not args.get("resume"): # manually set model only if not resuming
self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml)
self.model = self.trainer.model
self.trainer.hub_session = self.session # attach optional HUB session
self.trainer.train()
# Update model and cfg after training
if RANK in {-1, 0}:
ckpt = self.trainer.best if self.trainer.best.exists() else self.trainer.last
self.model, _ = attempt_load_one_weight(ckpt)
self.overrides = self.model.args
self.metrics = getattr(self.trainer.validator, "metrics", None) # TODO: no metrics returned by DDP
return self.metrics
def tune(
self,
use_ray=False,
iterations=10,
*args,
**kwargs,
):
"""
Conducts hyperparameter tuning for the model, with an option to use Ray Tune.
This method supports two modes of hyperparameter tuning: using Ray Tune or a custom tuning method.
When Ray Tune is enabled, it leverages the 'run_ray_tune' function from the ultralytics.utils.tuner module.
Otherwise, it uses the internal 'Tuner' class for tuning. The method combines default, overridden, and
custom arguments to configure the tuning process.
Args:
use_ray (bool): If True, uses Ray Tune for hyperparameter tuning. Defaults to False.
iterations (int): The number of tuning iterations to perform. Defaults to 10.
*args (List): Variable length argument list for additional arguments.
**kwargs (Dict): Arbitrary keyword arguments. These are combined with the model's overrides and defaults.
Returns:
(Dict): A dictionary containing the results of the hyperparameter search.
Raises:
AssertionError: If the model is not a PyTorch model.
Examples:
>>> model = YOLO("yolo11n.pt")
>>> results = model.tune(use_ray=True, iterations=20)
>>> print(results)
"""
self._check_is_pytorch_model()
if use_ray:
from ultralytics.utils.tuner import run_ray_tune
return run_ray_tune(self, max_samples=iterations, *args, **kwargs)
else:
from .tuner import Tuner
custom = {} # method defaults
args = {**self.overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations)
def _apply(self, fn) -> "Model":
"""
Applies a function to model tensors that are not parameters or registered buffers.
This method extends the functionality of the parent class's _apply method by additionally resetting the
predictor and updating the device in the model's overrides. It's typically used for operations like
moving the model to a different device or changing its precision.
Args:
fn (Callable): A function to be applied to the model's tensors. This is typically a method like
to(), cpu(), cuda(), half(), or float().
Returns:
(Model): The model instance with the function applied and updated attributes.
Raises:
AssertionError: If the model is not a PyTorch model.
Examples:
>>> model = Model("yolo11n.pt")
>>> model = model._apply(lambda t: t.cuda()) # Move model to GPU
"""
self._check_is_pytorch_model()
self = super()._apply(fn) # noqa
self.predictor = None # reset predictor as device may have changed
self.overrides["device"] = self.device # was str(self.device) i.e. device(type='cuda', index=0) -> 'cuda:0'
return self
@property
def names(self) -> list:
"""
Retrieves the class names associated with the loaded model.
This property returns the class names if they are defined in the model. It checks the class names for validity
using the 'check_class_names' function from the ultralytics.nn.autobackend module. If the predictor is not
initialized, it sets it up before retrieving the names.
Returns:
(Dict[int, str]): A dict of class names associated with the model.
Raises:
AttributeError: If the model or predictor does not have a 'names' attribute.
Examples:
>>> model = YOLO("yolo11n.pt")
>>> print(model.names)
{0: 'person', 1: 'bicycle', 2: 'car', ...}
"""
from ultralytics.nn.autobackend import check_class_names
if hasattr(self.model, "names"):
return check_class_names(self.model.names)
if not self.predictor: # export formats will not have predictor defined until predict() is called
self.predictor = self._smart_load("predictor")(overrides=self.overrides, _callbacks=self.callbacks)
self.predictor.setup_model(model=self.model, verbose=False)
return self.predictor.model.names
@property
def device(self) -> torch.device:
"""
Retrieves the device on which the model's parameters are allocated.
This property determines the device (CPU or GPU) where the model's parameters are currently stored. It is
applicable only to models that are instances of nn.Module.
Returns:
(torch.device): The device (CPU/GPU) of the model.
Raises:
AttributeError: If the model is not a PyTorch nn.Module instance.
Examples:
>>> model = YOLO("yolo11n.pt")
>>> print(model.device)
device(type='cuda', index=0) # if CUDA is available
>>> model = model.to("cpu")
>>> print(model.device)
device(type='cpu')
"""
return next(self.model.parameters()).device if isinstance(self.model, nn.Module) else None
@prop
| |
271825
|
def transforms(self):
"""
Retrieves the transformations applied to the input data of the loaded model.
This property returns the transformations if they are defined in the model. The transforms
typically include preprocessing steps like resizing, normalization, and data augmentation
that are applied to input data before it is fed into the model.
Returns:
(object | None): The transform object of the model if available, otherwise None.
Examples:
>>> model = YOLO("yolo11n.pt")
>>> transforms = model.transforms
>>> if transforms:
... print(f"Model transforms: {transforms}")
... else:
... print("No transforms defined for this model.")
"""
return self.model.transforms if hasattr(self.model, "transforms") else None
def add_callback(self, event: str, func) -> None:
"""
Adds a callback function for a specified event.
This method allows registering custom callback functions that are triggered on specific events during
model operations such as training or inference. Callbacks provide a way to extend and customize the
behavior of the model at various stages of its lifecycle.
Args:
event (str): The name of the event to attach the callback to. Must be a valid event name recognized
by the Ultralytics framework.
func (Callable): The callback function to be registered. This function will be called when the
specified event occurs.
Raises:
ValueError: If the event name is not recognized or is invalid.
Examples:
>>> def on_train_start(trainer):
... print("Training is starting!")
>>> model = YOLO("yolo11n.pt")
>>> model.add_callback("on_train_start", on_train_start)
>>> model.train(data="coco8.yaml", epochs=1)
"""
self.callbacks[event].append(func)
def clear_callback(self, event: str) -> None:
"""
Clears all callback functions registered for a specified event.
This method removes all custom and default callback functions associated with the given event.
It resets the callback list for the specified event to an empty list, effectively removing all
registered callbacks for that event.
Args:
event (str): The name of the event for which to clear the callbacks. This should be a valid event name
recognized by the Ultralytics callback system.
Examples:
>>> model = YOLO("yolo11n.pt")
>>> model.add_callback("on_train_start", lambda: print("Training started"))
>>> model.clear_callback("on_train_start")
>>> # All callbacks for 'on_train_start' are now removed
Notes:
- This method affects both custom callbacks added by the user and default callbacks
provided by the Ultralytics framework.
- After calling this method, no callbacks will be executed for the specified event
until new ones are added.
- Use with caution as it removes all callbacks, including essential ones that might
be required for proper functioning of certain operations.
"""
self.callbacks[event] = []
def reset_callbacks(self) -> None:
"""
Resets all callbacks to their default functions.
This method reinstates the default callback functions for all events, removing any custom callbacks that were
previously added. It iterates through all default callback events and replaces the current callbacks with the
default ones.
The default callbacks are defined in the 'callbacks.default_callbacks' dictionary, which contains predefined
functions for various events in the model's lifecycle, such as on_train_start, on_epoch_end, etc.
This method is useful when you want to revert to the original set of callbacks after making custom
modifications, ensuring consistent behavior across different runs or experiments.
Examples:
>>> model = YOLO("yolo11n.pt")
>>> model.add_callback("on_train_start", custom_function)
>>> model.reset_callbacks()
# All callbacks are now reset to their default functions
"""
for event in callbacks.default_callbacks.keys():
self.callbacks[event] = [callbacks.default_callbacks[event][0]]
@staticmethod
def _reset_ckpt_args(args: dict) -> dict:
"""
Resets specific arguments when loading a PyTorch model checkpoint.
This static method filters the input arguments dictionary to retain only a specific set of keys that are
considered important for model loading. It's used to ensure that only relevant arguments are preserved
when loading a model from a checkpoint, discarding any unnecessary or potentially conflicting settings.
Args:
args (dict): A dictionary containing various model arguments and settings.
Returns:
(dict): A new dictionary containing only the specified include keys from the input arguments.
Examples:
>>> original_args = {"imgsz": 640, "data": "coco.yaml", "task": "detect", "batch": 16, "epochs": 100}
>>> reset_args = Model._reset_ckpt_args(original_args)
>>> print(reset_args)
{'imgsz': 640, 'data': 'coco.yaml', 'task': 'detect'}
"""
include = {"imgsz", "data", "task", "single_cls"} # only remember these arguments when loading a PyTorch model
return {k: v for k, v in args.items() if k in include}
# def __getattr__(self, attr):
# """Raises error if object has no requested attribute."""
# name = self.__class__.__name__
# raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
def _smart_load(self, key: str):
"""
Loads the appropriate module based on the model task.
This method dynamically selects and returns the correct module (model, trainer, validator, or predictor)
based on the current task of the model and the provided key. It uses the task_map attribute to determine
the correct module to load.
Args:
key (str): The type of module to load. Must be one of 'model', 'trainer', 'validator', or 'predictor'.
Returns:
(object): The loaded module corresponding to the specified key and current task.
Raises:
NotImplementedError: If the specified key is not supported for the current task.
Examples:
>>> model = Model(task="detect")
>>> predictor = model._smart_load("predictor")
>>> trainer = model._smart_load("trainer")
Notes:
- This method is typically used internally by other methods of the Model class.
- The task_map attribute should be properly initialized with the correct mappings for each task.
"""
try:
return self.task_map[self.task][key]
except Exception as e:
name = self.__class__.__name__
mode = inspect.stack()[1][3] # get the function name.
raise NotImplementedError(
emojis(f"WARNING ⚠️ '{name}' model does not support '{mode}' mode for '{self.task}' task yet.")
) from e
@property
def task_map(self) -> dict:
"""
Provides a mapping from model tasks to corresponding classes for different modes.
This property method returns a dictionary that maps each supported task (e.g., detect, segment, classify)
to a nested dictionary. The nested dictionary contains mappings for different operational modes
(model, trainer, validator, predictor) to their respective class implementations.
The mapping allows for dynamic loading of appropriate classes based on the model's task and the
desired operational mode. This facilitates a flexible and extensible architecture for handling
various tasks and modes within the Ultralytics framework.
Returns:
(Dict[str, Dict[str, Any]]): A dictionary where keys are task names (str) and values are
nested dictionaries. Each nested dictionary has keys 'model', 'trainer', 'validator', and
'predictor', mapping to their respective class implementations.
Examples:
>>> model = Model()
>>> task_map = model.task_map
>>> detect_class_map = task_map["detect"]
>>> segment_class_map = task_map["segment"]
Note:
The actual implementation of this method may vary depending on the specific tasks and
classes supported by the Ultralytics framework. The docstring provides a general
description of the expected behavior and structure.
"""
raise NotImplementedError("Please provide task map for your model!")
| |
271828
|
train(self, world_size):
"""Builds dataloaders and optimizer on correct rank process."""
# Model
self.run_callbacks("on_pretrain_routine_start")
ckpt = self.setup_model()
self.model = self.model.to(self.device)
self.set_model_attributes()
# Freeze layers
freeze_list = (
self.args.freeze
if isinstance(self.args.freeze, list)
else range(self.args.freeze)
if isinstance(self.args.freeze, int)
else []
)
always_freeze_names = [".dfl"] # always freeze these layers
freeze_layer_names = [f"model.{x}." for x in freeze_list] + always_freeze_names
for k, v in self.model.named_parameters():
# v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
if any(x in k for x in freeze_layer_names):
LOGGER.info(f"Freezing layer '{k}'")
v.requires_grad = False
elif not v.requires_grad and v.dtype.is_floating_point: # only floating point Tensor can require gradients
LOGGER.info(
f"WARNING ⚠️ setting 'requires_grad=True' for frozen layer '{k}'. "
"See ultralytics.engine.trainer for customization of frozen layers."
)
v.requires_grad = True
# Check AMP
self.amp = torch.tensor(self.args.amp).to(self.device) # True or False
if self.amp and RANK in {-1, 0}: # Single-GPU and DDP
callbacks_backup = callbacks.default_callbacks.copy() # backup callbacks as check_amp() resets them
self.amp = torch.tensor(check_amp(self.model), device=self.device)
callbacks.default_callbacks = callbacks_backup # restore callbacks
if RANK > -1 and world_size > 1: # DDP
dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None)
self.amp = bool(self.amp) # as boolean
self.scaler = (
torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
)
if world_size > 1:
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
# Check imgsz
gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride)
self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1)
self.stride = gs # for multiscale training
# Batch size
if self.batch_size < 1 and RANK == -1: # single-GPU only, estimate best batch size
self.args.batch = self.batch_size = check_train_batch_size(
model=self.model,
imgsz=self.args.imgsz,
amp=self.amp,
batch=self.batch_size,
)
# Dataloaders
batch_size = self.batch_size // max(world_size, 1)
self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=LOCAL_RANK, mode="train")
if RANK in {-1, 0}:
# Note: When training DOTA dataset, double batch size could get OOM on images with >2000 objects.
self.test_loader = self.get_dataloader(
self.testset, batch_size=batch_size if self.args.task == "obb" else batch_size * 2, rank=-1, mode="val"
)
self.validator = self.get_validator()
metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix="val")
self.metrics = dict(zip(metric_keys, [0] * len(metric_keys)))
self.ema = ModelEMA(self.model)
if self.args.plots:
self.plot_training_labels()
# Optimizer
self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing
weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs # scale weight_decay
iterations = math.ceil(len(self.train_loader.dataset) / max(self.batch_size, self.args.nbs)) * self.epochs
self.optimizer = self.build_optimizer(
model=self.model,
name=self.args.optimizer,
lr=self.args.lr0,
momentum=self.args.momentum,
decay=weight_decay,
iterations=iterations,
)
# Scheduler
self._setup_scheduler()
self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False
self.resume_training(ckpt)
self.scheduler.last_epoch = self.start_epoch - 1 # do not move
self.run_callbacks("on_pretrain_routine_end")
def _do_t
| |
271830
|
self):
"""Save model training checkpoints with additional metadata."""
import io
# Serialize ckpt to a byte buffer once (faster than repeated torch.save() calls)
buffer = io.BytesIO()
torch.save(
{
"epoch": self.epoch,
"best_fitness": self.best_fitness,
"model": None, # resume and final checkpoints derive from EMA
"ema": deepcopy(self.ema.ema).half(),
"updates": self.ema.updates,
"optimizer": convert_optimizer_state_dict_to_fp16(deepcopy(self.optimizer.state_dict())),
"train_args": vars(self.args), # save as dict
"train_metrics": {**self.metrics, **{"fitness": self.fitness}},
"train_results": self.read_results_csv(),
"date": datetime.now().isoformat(),
"version": __version__,
"license": "AGPL-3.0 (https://ultralytics.com/license)",
"docs": "https://docs.ultralytics.com",
},
buffer,
)
serialized_ckpt = buffer.getvalue() # get the serialized content to save
# Save checkpoints
self.last.write_bytes(serialized_ckpt) # save last.pt
if self.best_fitness == self.fitness:
self.best.write_bytes(serialized_ckpt) # save best.pt
if (self.save_period > 0) and (self.epoch % self.save_period == 0):
(self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt) # save epoch, i.e. 'epoch3.pt'
# if self.args.close_mosaic and self.epoch == (self.epochs - self.args.close_mosaic - 1):
# (self.wdir / "last_mosaic.pt").write_bytes(serialized_ckpt) # save mosaic checkpoint
def get_dataset(self):
"""
Get train, val path from data dict if it exists.
Returns None if data format is not recognized.
"""
try:
if self.args.task == "classify":
data = check_cls_dataset(self.args.data)
elif self.args.data.split(".")[-1] in {"yaml", "yml"} or self.args.task in {
"detect",
"segment",
"pose",
"obb",
}:
data = check_det_dataset(self.args.data)
if "yaml_file" in data:
self.args.data = data["yaml_file"] # for validating 'yolo train data=url.zip' usage
except Exception as e:
raise RuntimeError(emojis(f"Dataset '{clean_url(self.args.data)}' error ❌ {e}")) from e
self.data = data
return data["train"], data.get("val") or data.get("test")
def setup_model(self):
"""Load/create/download model for any task."""
if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed
return
cfg, weights = self.model, None
ckpt = None
if str(self.model).endswith(".pt"):
weights, ckpt = attempt_load_one_weight(self.model)
cfg = weights.yaml
elif isinstance(self.args.pretrained, (str, Path)):
weights, _ = attempt_load_one_weight(self.args.pretrained)
self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1) # calls Model(cfg, weights)
return ckpt
def optimizer_step(self):
"""Perform a single step of the training optimizer with gradient clipping and EMA update."""
self.scaler.unscale_(self.optimizer) # unscale gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0) # clip gradients
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
if self.ema:
self.ema.update(self.model)
def preprocess_batch(self, batch):
"""Allows custom preprocessing model inputs and ground truths depending on task type."""
return batch
def validate(self):
"""
Runs validation on test set using self.validator.
The returned dict is expected to contain "fitness" key.
"""
metrics = self.validator(self)
fitness = metrics.pop("fitness", -self.loss.detach().cpu().numpy()) # use loss as fitness measure if not found
if not self.best_fitness or self.best_fitness < fitness:
self.best_fitness = fitness
return metrics, fitness
def get_model(self, cfg=None, weights=None, verbose=True):
"""Get model and raise NotImplementedError for loading cfg files."""
raise NotImplementedError("This task trainer doesn't support loading cfg files")
def get_validator(self):
"""Returns a NotImplementedError when the get_validator function is called."""
raise NotImplementedError("get_validator function not implemented in trainer")
def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
"""Returns dataloader derived from torch.data.Dataloader."""
raise NotImplementedError("get_dataloader function not implemented in trainer")
def build_dataset(self, img_path, mode="train", batch=None):
"""Build dataset."""
raise NotImplementedError("build_dataset function not implemented in trainer")
def label_loss_items(self, loss_items=None, prefix="train"):
"""
Returns a loss dict with labelled training loss items tensor.
Note:
This is not needed for classification but necessary for segmentation & detection
"""
return {"loss": loss_items} if loss_items is not None else ["loss"]
def set_model_attributes(self):
"""To set or update model parameters before training."""
self.model.names = self.data["names"]
def build_targets(self, preds, targets):
"""Builds target tensors for training YOLO model."""
pass
def progress_string(self):
"""Returns a string describing training progress."""
return ""
# TODO: may need to put these following functions into callback
def plot_training_samples(self, batch, ni):
"""Plots training samples during YOLO training."""
pass
def plot_training_labels(self):
"""Plots training labels for YOLO model."""
pass
def save_metrics(self, metrics):
"""Saves training metrics to a CSV file."""
keys, vals = list(metrics.keys()), list(metrics.values())
n = len(metrics) + 2 # number of cols
s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time"] + keys)).rstrip(",") + "\n") # header
t = time.time() - self.train_time_start
with open(self.csv, "a") as f:
f.write(s + ("%.6g," * n % tuple([self.epoch + 1, t] + vals)).rstrip(",") + "\n")
def plot_metrics(self):
"""Plot and display metrics visually."""
pass
def on_plot(self, name, data=None):
"""Registers plots (e.g. to be consumed in callbacks)."""
path = Path(name)
self.plots[path] = {"data": data, "timestamp": time.time()}
def final_eval(self):
"""Performs final evaluation and validation for object detection YOLO model."""
ckpt = {}
for f in self.last, self.best:
if f.exists():
if f is self.last:
ckpt = strip_optimizer(f)
elif f is self.best:
k = "train_results" # update best.pt train_metrics from last.pt
strip_optimizer(f, updates={k: ckpt[k]} if k in ckpt else None)
LOGGER.info(f"\nValidating {f}...")
self.validator.args.plots = self.args.plots
self.metrics = self.validator(model=f)
self.metrics.pop("fitness", None)
self.run_callbacks("on_fit_epoch_end")
def check_resume(self, overrides):
"""Check if resume checkpoint exists and update arguments accordingly."""
resume = self.args.resume
if resume:
try:
exists = isinstance(resume, (str, Path)) and Path(resume).exists()
last = Path(check_file(resume) if exists else get_latest_run())
# Check that resume data YAML exists, otherwise strip to force re-download of dataset
ckpt_args = attempt_load_weights(last).args
if not Path(ckpt_args["data"]).exists():
ckpt_args["data"] = self.args.data
resume = True
self.args = get_cfg(ckpt_args)
self.args.model = self.args.resume = str(last) # reinstate model
for k in (
"imgsz",
"batch",
"device",
"close_mosaic",
): # allow arg updates to reduce memory or update device on resume
if k in overrides:
setattr(self.args, k, overrides[k])
except Exception as e:
raise FileNotFoundError(
"Resume checkpoint not found. Please pass a valid checkpoint to resume from, "
"i.e. 'yolo train resume model=path/to/last.pt'"
) from e
self.resume = resume
def resume_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.