file_name
stringlengths 20
75
| text
stringlengths 1.03k
82.6k
|
|---|---|
verl__base_config.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from dataclasses import FrozenInstanceError, dataclass, fields
from typing import Any
# BaseConfig class inherits from collections.abc.Mapping, which means it can act like a dictionary
@dataclass
class BaseConfig(collections.abc.Mapping):
"""The BaseConfig provides dict-like interface for a dataclass config.
By default all fields in the config is not mutable, unless specified in
"_mutable_fields". The BaseConfig class implements the Mapping Abstract Base Class.
This allows instances of this class to be used like dictionaries.
"""
_mutable_fields = set()
_target_: str = ""
def __setattr__(self, name: str, value):
"""Set the value of an attribute. Check if the attr is mutable before setting the value."""
# If the field already exists, it's considered frozen unless it's in _mutable_fields
if name in self.__dict__ and name not in getattr(self, "_mutable_fields", set()):
raise FrozenInstanceError(f"Field '{name}' is frozen and cannot be modified")
super().__setattr__(name, value)
def get(self, key: str, default: Any = None) -> Any:
"""Get the value associated with the given key. If the key does not exist, return the default value.
Args:
key (str): The attribute name to retrieve.
default (Any, optional): The value to return if the attribute does not exist. Defaults to None.
Returns:
Any: The value of the attribute or the default value.
"""
try:
return getattr(self, key)
except AttributeError:
return default
def __getitem__(self, key: str):
"""Implement the [] operator for the class. Allows accessing attributes like dictionary items.
Args:
key (str): The attribute name to retrieve.
Returns:
Any: The value of the attribute.
Raises:
AttributeError: If the attribute does not exist.
TypeError: If the key type is not string
"""
return getattr(self, key)
def __iter__(self):
"""Implement the iterator protocol. Allows iterating over the attribute names of the instance.
Yields:
str: The name of each field in the dataclass.
"""
for f in fields(self):
yield f.name
def __len__(self):
"""
Return the number of fields in the dataclass.
Returns:
int: The number of fields in the dataclass.
"""
return len(fields(self))
|
verl__checkpoint_engine__base.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from abc import ABC, abstractmethod
from typing import Any, Generator, TypedDict
import ray
import torch
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, register
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup
from verl.utils.distributed import initialize_global_process_group_ray
from verl.utils.ray_utils import auto_await
from verl.workers.config import HFModelConfig, RolloutConfig
from verl.workers.rollout import BaseRollout, RolloutReplica, get_rollout_class
class TensorMeta(TypedDict):
name: str
shape: torch.Size
dtype: torch.dtype
offset: int
class CheckpointEngineRegistry:
"""Checkpoint engine registry."""
_registry: dict[str, type["CheckpointEngine"]] = {}
def register(backend: str):
"""Register a checkpoint engine.
Args:
backend: The backend of the checkpoint engine.
"""
def wrapper(cls: type["CheckpointEngine"]):
CheckpointEngineRegistry._registry[backend] = cls
return cls
return wrapper
@classmethod
def get(cls, backend: str) -> type["CheckpointEngine"]:
"""Get the checkpoint engine class.
Args:
backend: The backend of the checkpoint engine.
Returns:
The checkpoint engine class.
"""
return cls._registry[backend]
@classmethod
def new(cls, backend: str, *args, **kwargs) -> "CheckpointEngine":
"""Create a new checkpoint engine instance.
Args:
backend: The backend of the checkpoint engine.
*args: Variable length argument pass to the checkpoint engine constructor.
**kwargs: Arbitrary keyword arguments pass to the checkpoint engine constructor.
Returns:
A new checkpoint engine instance.
"""
if backend not in cls._registry:
raise ValueError(f"Checkpoint engine {backend} not registered")
return cls._registry[backend](*args, **kwargs)
class CheckpointEngine(ABC):
"""CheckpointEngine is an abstraction to transfer weights from trainer to rollout.
In trainer process:
>>> trainer = EngineRegistry.new(...) # FSDP, Megatron, VeOmini, TorchTitan, ...
>>> engine = CheckpointEngine.new(...) # NCCLCheckpointEngine, NIXLCheckpointEngine, ...
>>> await engine.send_weights(trainer.get_per_tensor_param())
In rollout process:
>>> engine = CheckpointEngine.new(...)
>>> server_adapter = ServerAdapter()
>>> await server_adapter.update_weights(engine.get_weights()) # update weights via cuda ipc
"""
@abstractmethod
def prepare(self) -> dict[str, Any]:
"""Prepare checkpoint engine before each step send_weights/receive_weights.
1. Allocate weight bucket.
2. [Optional] Register weight bucket for RDMA.
3. Return metadata to build communication topology: master ip:port, register RDMA description, etc.
Args:
worker_group: The worker group that the checkpoint engine will be used.
Returns:
A dictionary that contains the metadata of the worker group.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_topology(
cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]
) -> tuple[dict[str, list[Any]], dict[str, list[Any]]]:
"""Build communication topology between all workers.
Args:
trainer_world_size: The world size of the trainer worker group.
rollout_world_size: The world size of the rollout replica.
metadata: A list of metadata `prepare` from all workers.
Returns:
A tuple of two dictionaries that contains the communication topology for trainer and rollout worker group.
Each dict value should be a list argument equal to the world size of the worker group to dispatch to
`init_process_group`.
```
world_size = rollout.world_size + trainer.world_size
kwargs = {
"rank": list(range(world_size)),
"world_size": [world_size] * world_size,
"master_metadata": [metadata[0]] * world_size,
}
```
"""
raise NotImplementedError
@abstractmethod
def init_process_group(self, **kwargs):
"""Init process group for checkpoint engine.
Args:
**kwargs: Keyword arguments from `build_topology`.
"""
raise NotImplementedError
@abstractmethod
def finalize(self):
"""Finalize checkpoint engine after each step send_weights/receive_weights.
1. Free weight bucket.
1. [Optional] Deregister weight bucket for RDMA.
2. [Optional] Destroy process group.
"""
raise NotImplementedError
@abstractmethod
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
raise NotImplementedError
@abstractmethod
async def receive_weights(self) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
raise NotImplementedError
class CheckpointEngineWithCache(CheckpointEngine):
"""Checkpoint engine with local cache: shm, disk, etc. This allow to synchronize weights without interrupting
rollout ongoing requests (partial rollout). After requests exhausted, rollout can get weights from local cache.
Laminar: https://arxiv.org/abs/2510.12633
"""
@abstractmethod
async def get_weights(self) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Get the weights of the model from local cache.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
raise NotImplementedError
@CheckpointEngineRegistry.register("naive")
class ColocatedCheckpointEngine(CheckpointEngine):
"""Checkpoint engine for trainer and rollout colocated on same GPU.
In trainer process:
>>> engine = ColocatedCheckpointEngine()
>>> trainer = Trainer()
>>> server_adapter = ServerAdapter()
>>> engine.send_weights(trainer.get_per_tensor_param())
>>> server_adapter.update_weights(engine.receive_weights())
"""
def __init__(self, bucket_size: int, is_master: bool = False) -> None:
self.bucket_size = bucket_size
self.is_master = is_master
def prepare(self):
raise NotImplementedError
def init_process_group(self, **kwargs):
raise NotImplementedError
def finalize(self):
raise NotImplementedError
@classmethod
def build_topology(cls, *args, **kwargs):
raise NotImplementedError
def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
self.weights = weights
def receive_weights(self) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
yield from self.weights
self.weights = None
class CheckpointEngineWorker(Worker):
"""CheckpointEngineWorker colocated with inference engine's WorkerProc on same GPU.
Args:
rollout_config: The rollout configuration.
model_config: The model configuration.
server_adapter: The server adapter to update weights.
"""
def __init__(
self,
rollout_config: RolloutConfig,
model_config: HFModelConfig,
server_adapter: BaseRollout = None,
) -> None:
self.rollout_config = rollout_config
self.model_config = model_config
# sglang and trt-llm need device_mesh for internal communication
initialize_global_process_group_ray(timeout_second=None, backend="cpu:gloo")
self.server_adapter: BaseRollout = server_adapter or get_rollout_class(
rollout_config.name, rollout_config.mode
)(config=rollout_config, model_config=model_config, device_mesh=None)
backend = rollout_config.checkpoint_engine.backend
bucket_size = rollout_config.checkpoint_engine.update_weights_bucket_megabytes << 20
engine_kwargs = rollout_config.checkpoint_engine.engine_kwargs.get(backend, {})
self.checkpoint_engine = CheckpointEngineRegistry.new(backend, bucket_size=bucket_size, **engine_kwargs)
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
async def update_weights(self):
weights = self.checkpoint_engine.receive_weights()
await self.server_adapter.update_weights(weights)
@register(dispatch_mode=Dispatch.DP_COMPUTE, blocking=False)
def execute_checkpoint_engine(self, method: str, *args, **kwargs):
return getattr(self.checkpoint_engine, method)(*args, **kwargs)
_worker_cls = ray.remote(CheckpointEngineWorker)
class CheckpointEngineManager:
"""Checkpoint engine manager to coordinate weight synchronization between trainer and rollout replicas.
- ME: model engine, FSDP, MCore, VeOmni, export full tensor generator `get_per_tensor_param`
- CE: checkpoint engine, NCCL, NIXL, etc
In trainer, model engine and checkpoint engine are in same process.
In rollout, checkpoint engine and rollout worker are in separate process, update weights via cuda ipc.
```
┌────────┬────────┬─────┬────────┐ ┌───────────────────┬───────────────────┐
│ ┌────┐ │ ┌────┐ │ │ ┌────┐ │ │ Replica 0 │ Replica 1 │
│ │ ME0│ │ │ ME1│ │ │ │ MEn│ │ ├────┬────┬────┬────┼────┬────┬────┬────┤
│ └──┬─┘ │ └────┘ │ ... │ └────┘ │ │ 0 │ 1 │ 2 │ 3 │ 0 │ 1 │ 2 │ 3 │
│ v | | | | └──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┘
| ┌──┴─┐ │ ┌────┐ │ │ ┌────┐ │ ^ ^ ^ cuda ipc ^ ^ ^
│ │ CE │ │ │ CE │ │ │ │ CE │ │ ┌──┴─┬──┴─┬──┴─┬──┴─┬──┴─┬──┴─┬──┴─┬──┴─┐
│ └──┬─┘ │ └────┘ │ │ └────┘ │ │ CE │ CE │ CE │ CE │ CE │ CE │ CE │ CE |
└────┼───┴────────┴─────┴────────┘ └──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┘
v | | | | | | | |
└─────────────(nccl/nixl/..)─────────────┴────┴────┴────┴────┴────┴────┴────┘
```
Args:
backend: The checkpoint engine backend.
trainer: The trainer worker group.
replicas: The list of rollout replicas.
"""
def __init__(
self,
backend: str,
trainer: RayWorkerGroup,
replicas: list[RolloutReplica],
) -> None:
self.backend = backend
self.backend_cls = CheckpointEngineRegistry.get(backend)
self.trainer = trainer
self.replicas = replicas
def build_process_group(self, rollout: RayWorkerGroup):
"""Build process group for trainer and rollout replicas."""
trainer = self.trainer
# 1. prepare all workers
metadata = ray.get(
trainer.execute_checkpoint_engine(["prepare"] * trainer.world_size)
+ rollout.execute_checkpoint_engine(["prepare"] * rollout.world_size)
)
# 2. build communication topology between all workers
trainer_kwargs, rollout_kwargs = self.backend_cls.build_topology(
trainer.world_size, rollout.world_size, metadata
)
for k, v in trainer_kwargs.items():
assert len(v) == trainer.world_size, f"trainer_kwargs[{k}] must have length of {trainer.world_size}"
for k, v in rollout_kwargs.items():
assert len(v) == rollout.world_size, f"rollout_kwargs[{k}] must have length of {rollout.world_size}"
trainer_kwargs["method"] = ["init_process_group"] * trainer.world_size
rollout_kwargs["method"] = ["init_process_group"] * rollout.world_size
# 3. init process group between all workers
ray.get(
trainer.execute_checkpoint_engine(**trainer_kwargs) + rollout.execute_checkpoint_engine(**rollout_kwargs)
)
def add_replicas(self, replicas: list[RolloutReplica]):
"""Add rollout replicas to the manager for elastic scale up, will rebuild process group.
Args:
replicas: The list of rollout replicas to add.
"""
self.replicas.extend(replicas)
def remove_replicas(self, replicas: list[RolloutReplica]):
"""Remove rollout replicas from the manager for elastic scale down, will rebuild process group.
Args:
replicas: The list of rollout replicas to remove.
"""
replicas_set = set(replicas)
self.replicas = [r for r in self.replicas if r not in replicas_set]
@auto_await
async def sleep_replicas(self):
"""Sleep all rollout replicas: free weight and kv_cache device memory."""
# skip sleep replicas for disaggregated rollout
if self.backend != "naive":
return
await asyncio.gather(*[r.sleep() for r in self.replicas])
@auto_await
async def update_weights(self):
"""Update weights from trainer to rollout replicas."""
# 0. update weights for sync training with colocated trainer and rollout
if self.backend == "naive":
ray.get(self.trainer.update_weights())
return
# 1. abort and save all unfinished requests for partial rollout
await asyncio.gather(*[r.abort_all_requests() for r in self.replicas])
# 2. create a temporay worker group for all replicas
workers = []
for replica in self.replicas:
workers.extend(replica.workers)
rollout = RayWorkerGroup(worker_handles=workers, ray_cls_with_init=RayClassWithInitArgs(cls=_worker_cls))
trainer = self.trainer
# 3. build process group
self.build_process_group(rollout)
# 4. update weights of all workers
ray.get(trainer.update_weights() + rollout.update_weights())
# 5. finalize all workers
ray.get(
trainer.execute_checkpoint_engine(["finalize"] * trainer.world_size)
+ rollout.execute_checkpoint_engine(["finalize"] * rollout.world_size)
)
# 6. resume all unfinished requests for partial rollout
await asyncio.gather(*[r.resume_all_requests() for r in self.replicas])
|
verl__checkpoint_engine__nixl_checkpoint_engine.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import time
import uuid
from collections import defaultdict, deque
from dataclasses import dataclass
from typing import AsyncGenerator, Generator
from unittest.mock import patch
with patch("importlib.metadata.distributions", return_value=[]):
import cupy as cp
import nixl._api as nixl_api
import nixl._bindings as nixl_bindings
import ray
import torch
import zmq
import zmq.asyncio
from verl.checkpoint_engine.base import CheckpointEngine, CheckpointEngineRegistry, TensorMeta
from verl.utils.net_utils import get_free_port, is_valid_ipv6_address
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@dataclass
class NixlAgentMetadata:
agent_name: str
agent_metadata: bytes
zmq_ip: str
zmq_port: int
class NixlAgent:
"""This is a wrapper class for nixl_agent, the main purpose is to use ZeroMQ instead of
`nixl_agent.send_notif` to send bucket tensor metadata.
"""
def __init__(self):
self.agent_name = str(uuid.uuid4())
self.agent = nixl_api.nixl_agent(self.agent_name)
self.notifications: dict[str, deque[bytes]] = defaultdict(deque)
self.start_zmq_server()
self.zmq_clients: dict[str, zmq.Socket] = {}
self.messages: dict[str, deque[bytes]] = defaultdict(deque)
def __getattr__(self, name):
attr = getattr(self.agent, name)
if callable(attr):
def wrapper(*args, **kwargs):
return attr(*args, **kwargs)
return wrapper
else:
return attr
def get_agent_metadata(self) -> NixlAgentMetadata:
return NixlAgentMetadata(
agent_name=self.agent_name,
agent_metadata=self.agent.get_agent_metadata(),
zmq_ip=self.ip,
zmq_port=self.listen_port,
)
def start_zmq_server(self):
self.ip = ray.util.get_node_ip_address().strip("[]")
self.listen_port, self.listen_sock = get_free_port(self.ip)
context = zmq.asyncio.Context()
self.socket = context.socket(zmq.PULL)
if is_valid_ipv6_address(self.ip):
address = f"tcp://[{self.ip}]:{self.listen_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{self.ip}:{self.listen_port}"
self.socket.bind(address)
def add_remote_agent(self, metadata: NixlAgentMetadata) -> str:
agent_name = self.agent.add_remote_agent(metadata.agent_metadata).decode("utf-8")
assert agent_name == metadata.agent_name, f"Agent name {agent_name} not equal to {metadata.agent_name}"
context = zmq.Context()
socket = context.socket(zmq.PUSH)
if is_valid_ipv6_address(metadata.zmq_ip):
address = f"tcp://[{metadata.zmq_ip}]:{metadata.zmq_port}"
socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{metadata.zmq_ip}:{metadata.zmq_port}"
socket.connect(address)
self.zmq_clients[agent_name] = socket
return agent_name
def remove_remote_agent(self, agent_name: str):
self.agent.remove_remote_agent(agent_name)
socket = self.zmq_clients.pop(agent_name)
socket.close()
def send_message(self, agent_name, message: dict):
socket = self.zmq_clients[agent_name]
socket.send_pyobj((self.agent_name, message), zmq.DONTWAIT)
async def read_message(self, agent_name: str) -> dict:
while len(self.messages[agent_name]) == 0:
recv_agent_name, message = await self.socket.recv_pyobj()
self.messages[recv_agent_name].append(message)
return self.messages[agent_name].popleft()
async def get_notification(self, remote_name: str) -> bytes:
while len(self.notifications[remote_name]) == 0:
notifs = self.agent.get_new_notifs()
for remote_name, notif in notifs.items():
self.notifications[remote_name].extend(notif)
await asyncio.sleep(0)
return self.notifications[remote_name].popleft()
class ReadableOperation:
"""Encapsulates a readable operation to remote agent.
1. send metadata to remote agent
2. wait until remote agent read complete.
Args:
agent (NixlAgent): The Nixl agent.
remote_agent (str): The name of the remote agent.
local_descs (nixl_bindings.nixlXferDList): The local transfer descriptors.
metadata (dict): Metadata for the read operation.
bucket_size (int): The size of the bucket in bytes.
"""
def __init__(
self,
agent: NixlAgent,
remote_agent: str,
local_descs: nixl_bindings.nixlXferDList,
metadata: dict,
):
self.agent = agent
self.remote_agent = remote_agent
self.local_descs = local_descs
self.notify_key = uuid.uuid4().bytes
message = {"notify_key": self.notify_key, "remote_descs": self.local_descs, **metadata}
self.agent.send_message(self.remote_agent, message)
async def wait_for_complete(self):
"""Block until remote agent read complete."""
notification = await self.agent.get_notification(self.remote_agent)
assert self.notify_key == notification, f"Notify key {self.notify_key} not equal to {notification}"
logger.debug(f"ReadableOperation to {self.remote_agent} complete")
class ReadOperation:
"""Encapsulates a read operation from remote agent.
1. read medata from remote agent
2. start read transfer operation
3. wait until read complete
Args:
agent (NixlAgent): The Nixl agent.
remote_agent (str): The name of the remote agent.
local_descs (nixl_bindings.nixlXferDList): The local transfer descriptors.
bucket_size (int): The size of the bucket in bytes.
"""
def __init__(self, agent: NixlAgent, remote_agent: str, local_descs: nixl_bindings.nixlXferDList, bucket_size: int):
self.agent = agent
self.remote_agent = remote_agent
self.local_descs = local_descs
self.remote_descs = None
self.xfer_handle = None
self.notify_key = None
self.bucket_size = bucket_size
self.start_time = None
async def read_metadata(self) -> dict:
"""Block until the remote agent sends the metadata.
Returns:
dict: Metadata from the remote agent.
"""
metadata = await self.agent.read_message(self.remote_agent)
self.remote_descs = metadata.pop("remote_descs")
self.notify_key = metadata.pop("notify_key")
return metadata
def begin_read(self):
"""Start the read operation."""
assert self.remote_descs is not None and self.notify_key is not None
self.xfer_handle = self.agent.initialize_xfer(
"READ", self.local_descs, self.remote_descs, self.remote_agent, self.notify_key
)
state = self.agent.transfer(self.xfer_handle)
assert state != "ERR", f"Read from {self.remote_agent} got to {state} state."
self.start_time = time.time()
async def wait_for_complete(self):
"""Block until the read operation complete."""
while True:
state = self.agent.check_xfer_state(self.xfer_handle)
if state == "ERR":
logger.error(f"Read from {self.remote_agent} got to {state} state.")
exit(-1)
elif state == "DONE":
break
else:
await asyncio.sleep(0)
self.agent.release_xfer_handle(self.xfer_handle)
end_time = time.time()
bandwidth = self.bucket_size / (end_time - self.start_time) / (1024 * 1024 * 1024)
logger.debug(f"ReadOperation read data from {self.remote_agent} complete, bandwidth: {bandwidth:.2f} GB/s")
@CheckpointEngineRegistry.register("nixl")
class NIXLCheckpointEngine(CheckpointEngine):
"""NIXL checkpoint engine with p2p communication, support various backends: ucx, uccl, mooncacke, etc.
For UCX backend, some environment variables need to be set: UCX_TLS, UCX_IB_GID_INDEX, UCX_IB_DEVICES, etc.
Please refer to: https://openucx.readthedocs.io/en/master/faq.html
Args:
bucket_size (int): Bucket size in bytes to transfer multiple weights at one time. Note that we use
two buffer to send and recv weights at same time, so the device memory overhead is 2 * bucket_size.
device (str): The device to use for the checkpoint engine, "cpu" or "cuda".
rollout_dtype (torch.dtype): The dtype of the weights received from rollout workers. Defaults to torch.bfloat16.
"""
def __init__(
self,
bucket_size: int,
device: str = "cuda",
rollout_dtype: torch.dtype = torch.bfloat16,
is_master: bool = False,
):
self.bucket_size = bucket_size
self.device = device
self.rollout_dtype = rollout_dtype
self.agent = NixlAgent()
self.is_master = is_master
def prepare(self) -> NixlAgentMetadata:
"""Prepare send and recv bucket.
Returns:
NixlAgentMetadata: The metadata of the current nixl agent.
"""
# For master process, use cupy instead of torch to avoid memory register error
# when `PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True`.
if self.device == "cuda":
send_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
recv_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
self.send_buf = torch.as_tensor(send_buf, dtype=torch.uint8)
self.recv_buf = torch.as_tensor(recv_buf, dtype=torch.uint8)
else:
self.send_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device=self.device, pin_memory=True)
self.recv_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device=self.device, pin_memory=True)
self.send_reg_descs = self.agent.register_memory(self.send_buf)
self.recv_reg_descs = self.agent.register_memory(self.recv_buf)
self.send_descs = self.agent.get_xfer_descs(self.send_buf)
self.recv_descs = self.agent.get_xfer_descs(self.recv_buf)
return self.agent.get_agent_metadata()
@classmethod
def build_topology(cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]):
trainer_kwargs = {
"method": ["init_process_group"] * trainer_world_size,
"rank": [0] + [-1] * (trainer_world_size - 1),
"world_size": [rollout_world_size + 1] * trainer_world_size,
"prev_agent_metadata": [None] * trainer_world_size,
"next_agent_metadata": [metadata[-rollout_world_size]] + [None] * (trainer_world_size - 1),
}
rollout_kwargs = {
"method": ["init_process_group"] * rollout_world_size,
"rank": list(range(1, rollout_world_size + 1)),
"world_size": [rollout_world_size + 1] * rollout_world_size,
"prev_agent_metadata": [metadata[0]] + metadata[-rollout_world_size:-1],
"next_agent_metadata": metadata[-rollout_world_size + 1 :] + [None],
}
return trainer_kwargs, rollout_kwargs
def init_process_group(
self, rank: int, world_size: int, prev_agent_metadata: NixlAgentMetadata, next_agent_metadata: NixlAgentMetadata
):
"""Setup the communication with the previous and next agent.
Args:
rank (int): The rank of the current process.
world_size (int): The total number of processes.
prev_agent_metadata (NixlAgentMetadata): The metadata of the previous nixl agent.
next_agent_metadata (NixlAgentMetadata): The metadata of the next nixl agent.
"""
if rank < 0:
assert not prev_agent_metadata and not next_agent_metadata, (
f"rank {rank} should not have prev_agent_metadata or next_agent_metadata"
)
elif rank == 0:
assert not prev_agent_metadata and next_agent_metadata, f"rank {rank} should have next_agent_metadata"
elif 0 < rank < world_size - 1:
assert prev_agent_metadata and next_agent_metadata, (
f"rank {rank} should have prev_agent_metadata and next_agent_metadata"
)
elif rank == world_size - 1:
assert prev_agent_metadata and not next_agent_metadata, (
f"rank {rank} should have prev_agent_metadata and not next_agent_metadata"
)
self.rank = rank
self.world_size = world_size
self.prev_agent = None
self.next_agent = None
if prev_agent_metadata is not None:
self.prev_agent = self.agent.add_remote_agent(prev_agent_metadata)
if next_agent_metadata is not None:
self.next_agent = self.agent.add_remote_agent(next_agent_metadata)
logger.info(
f"init_process_group rank: {self.rank}, world_size: {self.world_size}, "
f"prev_agent: {self.prev_agent}, next_agent: {self.next_agent}"
)
def finalize(self):
"""Cleanup communication with the previous and next agent, and deregister the memory."""
if self.prev_agent:
self.agent.remove_remote_agent(self.prev_agent)
if self.next_agent:
self.agent.remove_remote_agent(self.next_agent)
self.agent.deregister_memory(self.send_reg_descs)
self.agent.deregister_memory(self.recv_reg_descs)
self.send_buf = None
self.recv_buf = None
self.send_reg_descs = None
self.recv_reg_descs = None
self.send_descs = None
self.recv_descs = None
self.rank = None
self.world_size = None
self.prev_agent = None
self.next_agent = None
@torch.no_grad()
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
assert self.rank <= 0, "Trainer workers other than rank 0 should not send weights."
# For trainer workers other than rank 0, just consume weights and do nothing.
if self.rank < 0:
for name, weight in weights:
pass
return
assert self.next_agent is not None, "Next agent is not set."
send_buf, recv_buf = self.send_buf, self.recv_buf
send_descs, recv_descs = self.send_descs, self.recv_descs
readable_op = None
start_time = time.time()
bucket_meta: dict[str, TensorMeta] = {}
offset = 0
for name, weight in weights:
# fill the tensor bucket
if offset + weight.nbytes > self.bucket_size:
torch.cuda.synchronize()
# wait previous bucket to be received
if readable_op is not None:
await readable_op.wait_for_complete()
# send bucket meta to next agent
readable_op = ReadableOperation(
self.agent,
self.next_agent,
send_descs,
{"bucket_meta": bucket_meta, "is_last": False},
)
# swap send and recv buf
send_buf, recv_buf = recv_buf, send_buf
send_descs, recv_descs = recv_descs, send_descs
bucket_meta = {}
offset = 0
assert offset + weight.nbytes <= self.bucket_size, (
f"Weight {name}({weight.shape}, {weight.dtype}) is too large to fit in the bucket."
)
bucket_meta[name] = {
"name": name,
"shape": weight.shape,
"dtype": weight.dtype,
"offset": offset,
}
send_buf[offset : offset + weight.nbytes].copy_(weight.view(-1).view(torch.uint8), non_blocking=True)
offset += weight.nbytes
# send last bucket meta to next agent
torch.cuda.synchronize()
if readable_op is not None:
await readable_op.wait_for_complete()
readable_op = ReadableOperation(
self.agent, self.next_agent, send_descs, {"bucket_meta": bucket_meta, "is_last": True}
)
await readable_op.wait_for_complete()
logger.info(f"Rank {self.rank} send weights done, time cost: {time.time() - start_time:.2f}s")
@torch.no_grad()
async def receive_weights(self) -> AsyncGenerator[tuple[str, torch.Tensor], None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
assert self.prev_agent is not None, "Previous agent is not set."
send_buf, recv_buf = self.send_buf, self.recv_buf
send_descs, recv_descs = self.send_descs, self.recv_descs
total_bytes, total_params = 0, 0
# receive first bucket from previous agent
start_time = time.time()
read_op = ReadOperation(self.agent, self.prev_agent, recv_descs, self.bucket_size)
metadata = await read_op.read_metadata()
read_op.begin_read()
await read_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# swap send and recv buf
send_buf, recv_buf = recv_buf, send_buf
send_descs, recv_descs = recv_descs, send_descs
while not metadata["is_last"]:
# 1. send bucket to next agent
readable_op = None
if self.next_agent is not None:
readable_op = ReadableOperation(
self.agent,
self.next_agent,
send_descs,
metadata,
)
# 2. receive bucket from previous agent
read_op = ReadOperation(self.agent, self.prev_agent, recv_descs, self.bucket_size)
next_metadata = await read_op.read_metadata()
read_op.begin_read()
# 3. yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# 4. wait for next agent read complete and read from previous agent complete
if readable_op is not None:
await readable_op.wait_for_complete()
await read_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(next_metadata["bucket_meta"])
# 5. swap send and recv buf
torch.cuda.synchronize() # sync non-blocking copy
metadata = next_metadata
send_buf, recv_buf = recv_buf, send_buf
send_descs, recv_descs = recv_descs, send_descs
# send last bucket to next agent
readable_op = None
if self.next_agent is not None:
readable_op = ReadableOperation(
self.agent,
self.next_agent,
send_descs,
metadata,
)
# yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# wait for next agent read complete
if readable_op is not None:
await readable_op.wait_for_complete()
time_cost = time.time() - start_time
bandwidth = total_bytes / time_cost / (1024 * 1024 * 1024)
logger.info(
f"Rank {self.rank} receive weights done, total_params: {total_params}, "
f"time cost: {time_cost:.2f}s, bandwidth: {bandwidth:.2f} GB/s"
)
|
verl__interactions__gsm8k_interaction.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from verl.utils.reward_score import gsm8k
from .base import BaseInteraction
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class Gsm8kInteraction(BaseInteraction):
"""A demo interaction for calculating the reward of gsm8k.
- `start_interaction`: start a interaction instance for a trajectory.
- `generate_response`: generate the response of the assistant.
- `calculate_score`: calculate the score of the interaction.
- `finalize_interaction`: finalize the interaction instance.
"""
def __init__(self, config: dict):
super().__init__(config)
self._instance_dict = {}
async def start_interaction(
self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs
) -> str:
if instance_id is None:
instance_id = str(uuid4())
self._instance_dict[instance_id] = {
"response": "",
"ground_truth": ground_truth,
"reward": 0.0,
}
return instance_id
async def generate_response(
self, instance_id: str, messages: list[dict[str, Any]], **kwargs
) -> tuple[bool, str, float, dict]:
content = ""
for i in range(len(messages) - 1, -1, -1):
item = messages[i]
if item.get("role") == "assistant":
content = item.get("content")
break
self._instance_dict[instance_id]["response"] = content
reward = await self.calculate_score(instance_id)
if reward == 1.0:
response = "Your response is correct!"
should_terminate_sequence = True
else:
response = "Your response is incorrect! You need to reflect on your answer and try again."
should_terminate_sequence = False
return should_terminate_sequence, response, reward, {}
async def calculate_score(self, instance_id: str, **kwargs) -> float:
return gsm8k.compute_score(
self._instance_dict[instance_id]["response"],
self._instance_dict[instance_id]["ground_truth"],
method="strict",
format_score=0.0,
score=1.0,
)
async def finalize_interaction(self, instance_id: str, **kwargs) -> None:
del self._instance_dict[instance_id]
|
verl__interactions__utils__interaction_registry.py
|
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import logging
import os
import sys
from omegaconf import OmegaConf
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_interaction_class(cls_name):
"""Dynamically import and return the interaction class."""
module_name, class_name = cls_name.rsplit(".", 1)
if module_name not in sys.modules:
spec = importlib.util.find_spec(module_name)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
else:
module = sys.modules[module_name]
interaction_cls = getattr(module, class_name)
return interaction_cls
def initialize_interactions_from_config(interaction_config_file):
"""Initialize interactions from configuration file.
Args:
interaction_config_file: Path to the interaction configuration file.
Returns:
dict: A dictionary mapping interaction names to BaseInteraction instances.
"""
interaction_config = OmegaConf.load(interaction_config_file)
interaction_map = {}
for interaction_item in interaction_config.interaction:
cls_name = interaction_item.class_name
interaction_cls = get_interaction_class(cls_name)
# Extract config and name
config = OmegaConf.to_container(interaction_item.config, resolve=True)
# Get the interaction name - either from config or derive from class name
name = interaction_item.get("name", None)
if name is None:
# If no name is specified, use the class name as default
class_simple_name = cls_name.split(".")[-1]
# Remove "Interaction" suffix if present, otherwise use full class name
if class_simple_name.endswith("Interaction"):
name = class_simple_name[:-11].lower() # Remove "Interaction" (11 chars)
else:
name = class_simple_name.lower()
# Check for duplicate names
if name in interaction_map:
raise ValueError(f"Duplicate interaction name '{name}' found. Each interaction must have a unique name.")
# Inject the name into the config
config["name"] = name
# Create the interaction instance
interaction = interaction_cls(config=config)
interaction_map[name] = interaction
logger.info(f"Initialized interaction '{name}' with class '{cls_name}'")
return interaction_map
|
verl__interactions__weather_interaction.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from .base import BaseInteraction
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class WeatherInteraction(BaseInteraction):
"""A demo interaction for handling weather-related queries.
- `start_interaction`: start a interaction instance for a trajectory.
- `generate_response`: generate the response of the assistant.
- `calculate_score`: calculate the score of the interaction.
- `finalize_interaction`: finalize the interaction instance.
"""
def __init__(self, config: dict):
super().__init__(config)
self._instance_dict = {}
async def start_interaction(
self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs
) -> str:
if instance_id is None:
instance_id = str(uuid4())
self._instance_dict[instance_id] = {
"response": "",
"ground_truth": ground_truth,
"reward": 0.0,
}
return instance_id
async def generate_response(
self, instance_id: str, messages: list[dict[str, Any]], **kwargs
) -> tuple[bool, str, float, dict]:
content = "no tool call"
for i in range(len(messages) - 1, -1, -1):
item = messages[i]
if item.get("role") == "tool":
content = item.get("content")
break
self._instance_dict[instance_id]["response"] = content
reward = await self.calculate_score(instance_id)
if reward == 1.0:
response = "Thank you for your weather query!"
should_terminate_sequence = True
else:
response = "Please use the weather tool to get the weather information."
should_terminate_sequence = True
return should_terminate_sequence, response, reward, {}
async def calculate_score(self, instance_id: str, **kwargs) -> float:
# For weather interaction, we can implement a more complex scoring logic
# For now, we'll just return a default score of 1.0
if self._instance_dict[instance_id]["response"] == "no tool call":
return 0.0
return 1.0
async def finalize_interaction(self, instance_id: str, **kwargs) -> None:
del self._instance_dict[instance_id]
|
verl__model_merger__base_model_merger.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Optional
import torch
from accelerate import init_empty_weights
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoModelForTokenClassification,
GenerationConfig,
)
from verl.utils import hf_processor, hf_tokenizer
def parse_args():
parser = argparse.ArgumentParser(description="verl model merger")
subparsers = parser.add_subparsers(dest="operation", required=True, help="Specify 'merge' or 'test' operation.")
base_op_parser = argparse.ArgumentParser(add_help=False)
base_op_parser.add_argument(
"--backend", type=str, required=True, choices=["fsdp", "megatron"], help="The backend of the model"
)
base_op_parser.add_argument("--local_dir", type=str, default=None, help="Path to the saved model checkpoints.")
base_op_parser.add_argument(
"--tie-word-embedding",
action="store_true",
help="Whether to tie word embedding weights (currently only Megatron supported)",
)
base_op_parser.add_argument("--trust-remote-code", action="store_true", help="Whether to trust remote code")
base_op_parser.add_argument(
"--is-value-model",
action="store_true",
help="Whether the model is a value model (currently only Megatron supported)",
)
base_op_parser.add_argument(
"--use_cpu_initialization",
action="store_true",
help="Whether to use CPU initialization for the model. This is useful for large models that cannot "
"fit into GPU memory during initialization.",
)
merge_parser = subparsers.add_parser("merge", parents=[base_op_parser], help="Merge model checkpoints and save.")
merge_parser.add_argument(
"--target_dir", default="tmp", type=str, help="Directory to save the merged huggingface model"
)
merge_parser.add_argument(
"--hf_upload_path", default=None, type=str, help="Hugging Face repository ID to upload the model"
)
merge_parser.add_argument(
"--private", action="store_true", help="Whether to upload the model to a private Hugging Face repository"
)
test_parser = subparsers.add_parser(
"test", parents=[base_op_parser], help="Test merged model against a reference Hugging Face model"
)
test_parser.add_argument(
"--test_hf_dir", type=str, required=True, help="Path to the reference Hugging Face model directory for testing"
)
args = parser.parse_args()
return args
@dataclass
class ModelMergerConfig:
"""Configuration for model merger operations.
Args:
operation (str): Operation type - 'merge' or 'test'.
backend (str): Backend type for the model ('fsdp' or 'megatron').
target_dir (Optional[str]): Directory to save the merged huggingface model. Defaults to "tmp".
hf_upload_path (Optional[str]): Hugging Face repository ID to upload the model. Defaults to None.
private (bool): Whether to upload the model to a private Hugging Face repository. Defaults to False.
test_hf_dir (Optional[str]): Path to the reference Hugging Face model directory for testing. Defaults to None.
tie_word_embedding (bool): Whether to tie word embedding weights (currently only Megatron
supported). Defaults to False.
trust_remote_code (bool): Whether to trust remote code. Defaults to False.
is_value_model (bool): Whether the model is a value model (currently only Megatron
supported). Defaults to False.
local_dir (Optional[str]): Path to the saved model checkpoints. Defaults to None.
hf_model_config_path (Optional[str]): Path to HuggingFace model configuration files. Defaults to None.
hf_upload (bool): Whether to upload to HuggingFace (computed automatically). Not for initialization.
use_cpu_initialization (bool): Whether to use CPU initialization for large models. Defaults to False.
"""
operation: str # 'merge' or 'test'
backend: str
target_dir: Optional[str] = "tmp"
hf_upload_path: Optional[str] = None
private: bool = False
test_hf_dir: Optional[str] = None
tie_word_embedding: bool = False
trust_remote_code: bool = False
is_value_model: bool = False
local_dir: Optional[str] = None
hf_model_config_path: Optional[str] = None
hf_upload: bool = field(init=False)
use_cpu_initialization: bool = False
def __post_init__(self):
self.hf_upload = self.operation == "merge" and bool(self.hf_upload_path)
if self.operation == "test":
self.target_dir = None
self.hf_upload_path = None
self.private = False
def generate_config_from_args(args: argparse.Namespace) -> ModelMergerConfig:
common_config_args = {
"operation": args.operation,
"backend": args.backend,
"tie_word_embedding": args.tie_word_embedding,
"trust_remote_code": args.trust_remote_code,
"is_value_model": args.is_value_model,
"local_dir": args.local_dir,
"hf_model_config_path": os.path.join(args.local_dir, "huggingface"),
"use_cpu_initialization": args.use_cpu_initialization,
}
if args.operation == "merge":
config = ModelMergerConfig(
**common_config_args,
target_dir=args.target_dir,
hf_upload_path=args.hf_upload_path,
private=args.private,
test_hf_dir=None,
)
os.makedirs(config.target_dir, exist_ok=True)
elif args.operation == "test":
config = ModelMergerConfig(
**common_config_args,
test_hf_dir=args.test_hf_dir,
# the following args are not used by test operation
target_dir=None,
hf_upload_path=None,
private=False,
)
else:
raise NotImplementedError(f"Unknown operation: {args.operation}")
return config
class BaseModelMerger(ABC):
"""
Abstract base class for merging distributed model checkpoints into HuggingFace format.
This class provides common functionality for converting model checkpoints from different
distributed training backends (FSDP, Megatron) into standard HuggingFace format that
can be easily loaded and used for inference or further training.
The merger supports two main operations:
- merge: Convert and save checkpoints to HuggingFace format
- test: Validate merged checkpoints against a reference model
Args:
config (ModelMergerConfig): Configuration object containing paths, backend type,
and operation parameters.
Attributes:
config (ModelMergerConfig): The configuration object passed during initialization.
hf_model_config_path (str): Path to the HuggingFace model configuration files.
model_config (PretrainedConfig): Loaded HuggingFace model configuration.
"""
def __init__(self, config: ModelMergerConfig):
self.config = config
self.hf_model_config_path = config.hf_model_config_path
self.model_config = AutoConfig.from_pretrained(
self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code
)
def get_transformers_auto_model_class(self):
has_remote_code = hasattr(self.model_config, "auto_map") and any(
self.model_config.architectures[0] in val for val in self.model_config.auto_map.values()
)
if has_remote_code:
auto_class = next(
k for k, v in self.model_config.auto_map.items() if self.model_config.architectures[0] in v
)
match auto_class:
case "AutoModelForCausalLM":
return AutoModelForCausalLM
case "AutoModelForTokenClassification":
return AutoModelForTokenClassification
case "AutoModelForVision2Seq":
# Handle different transformers versions for Vision2Seq models
import transformers
from packaging import version
if version.parse(transformers.__version__) >= version.parse("4.54.0"):
# transformers >= 4.54.0 uses AutoModelForImageTextToText
from transformers import AutoModelForImageTextToText
return AutoModelForImageTextToText
else:
# transformers < 4.54.0 uses AutoModelForVision2Seq
from transformers import AutoModelForVision2Seq
return AutoModelForVision2Seq
case _:
raise NotImplementedError(f"Unknown auto class {auto_class}")
else:
if "ForTokenClassification" in self.model_config.architectures[0]:
return AutoModelForTokenClassification
elif "ForCausalLM" in self.model_config.architectures[0]:
return AutoModelForCausalLM
elif "ForConditionalGeneration" in self.model_config.architectures[0]:
return AutoModelForVision2Seq
raise NotImplementedError(f"Unknown architecture {self.model_config.architectures}")
def patch_model_generation_config(self, model):
"""
The generation_config created from model config may be different to the pretrained model,
this may lead to error when generating: https://github.com/volcengine/verl/issues/1246
This function patch the generation_config created from model config to the pretrained model.
"""
if model.can_generate():
try:
model.generation_config = GenerationConfig.from_pretrained(self.hf_model_config_path)
except OSError:
print(
f"Warning: Generation config file not found in {self.hf_model_config_path}, using a "
f"generation config created from the model config."
)
return model
def save_lora_adapter(self, state_dict: dict[str, torch.Tensor]):
"""
Save lora adapter to safetensors.
Returns:
lora_path: str, the path to the lora adapter. None if no lora adapter found.
Note:
This function change the 'state_dict' in place.
"""
lora_params_names = [name for name in state_dict.keys() if "lora_" in name]
if len(lora_params_names) == 0:
return None
import json
from typing import OrderedDict
import peft
from safetensors.torch import save_file
lora_params = OrderedDict()
target_modules = set()
lora_key = None
for name in lora_params_names:
lora_key = name.replace(".default.weight", ".weight")
target_modules.add(lora_key.split(".")[-3])
lora_params[lora_key] = state_dict.pop(name)
lora_rank = min(lora_params[lora_key].shape[0], lora_params[lora_key].shape[1])
peft_dict = {
"r": lora_rank,
"lora_alpha": 0, # lora_alpha is not set. An error should be raised to inform the user to set it manually.
"target_modules": list(target_modules),
}
peft_config = peft.LoraConfig(**peft_dict).to_dict()
peft_config["task_type"] = peft_config["task_type"].value if peft_config["task_type"] else None
peft_config["peft_type"] = peft_config["peft_type"].value if peft_config["peft_type"] else None
peft_config["target_modules"] = list(peft_config["target_modules"])
lora_path = os.path.join(self.config.target_dir, "lora_adapter")
os.makedirs(lora_path, exist_ok=True)
with open(os.path.join(lora_path, "adapter_config.json"), "w", encoding="utf-8") as f:
json.dump(peft_config, f, ensure_ascii=False, indent=4)
save_file(lora_params, os.path.join(lora_path, "adapter_model.safetensors"))
for name in list(state_dict.keys()):
key = (
name.replace("base_model.model.", "")
.replace(".base_layer.weight", ".weight")
.replace(".base_layer.bias", ".bias")
)
state_dict[key] = state_dict.pop(name)
return lora_path
def save_hf_model_and_tokenizer(self, state_dict: dict[str, torch.Tensor]):
auto_model_class = self.get_transformers_auto_model_class()
with init_empty_weights():
model = auto_model_class.from_config(
self.model_config, torch_dtype=torch.bfloat16, trust_remote_code=self.config.trust_remote_code
)
model.to_empty(device="cpu")
model = self.patch_model_generation_config(model)
lora_path = self.save_lora_adapter(state_dict)
if lora_path:
print(f"Saving lora adapter to {lora_path}")
print(f"Saving model to {self.config.target_dir}")
model.save_pretrained(self.config.target_dir, state_dict=state_dict)
del state_dict
del model
processor = hf_processor(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
tokenizer = hf_tokenizer(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
if processor is not None:
print(f"Saving processor to {self.config.target_dir}")
processor.save_pretrained(self.config.target_dir)
if tokenizer is not None:
print(f"Saving tokenizer to {self.config.target_dir}")
tokenizer.save_pretrained(self.config.target_dir)
def upload_to_huggingface(self):
import requests
from huggingface_hub import HfApi
from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError
api = HfApi()
try:
# Attempt to create repository
api.create_repo(repo_id=self.config.hf_upload_path, private=self.config.private, exist_ok=True)
except HfHubHTTPError as e:
# Handle authentication/API errors
if e.response.status_code == 401:
raise PermissionError(
"Hugging Face authentication failed. Verify your token is valid and has write permissions."
) from e
elif e.response.status_code == 404:
raise RepositoryNotFoundError(f"Repository path not found: {self.config.hf_upload_path}") from e
else:
raise ConnectionError(f"Failed to create repository ({e.response.status_code}): {e}") from e
except requests.exceptions.ConnectionError as e:
raise ConnectionError("Network connection failed. Check your internet connection.") from e
try:
# Attempt folder upload
api.upload_folder(folder_path=self.config.target_dir, repo_id=self.config.hf_upload_path, repo_type="model")
except HfHubHTTPError as e:
if e.response.status_code == 401:
raise PermissionError("Authentication failed during upload. Token may have expired.") from e
else:
raise RuntimeError(f"Upload failed ({e.response.status_code}): {e}") from e
except requests.exceptions.ConnectionError as e:
raise ConnectionError("Network interruption during upload. Try again with stable connection.") from e
except OSError as e:
raise FileNotFoundError(f"Local folder error: {self.config.target_dir} - {str(e)}") from e
except Exception as e:
raise RuntimeError(f"Unexpected error during upload: {str(e)}") from e
@abstractmethod
def merge_and_save(self):
raise NotImplementedError("Subclasses should implement this method")
@abstractmethod
def cleanup(self):
raise NotImplementedError("Subclasses should implement this method to clean up resources if needed")
|
verl__model_merger__fsdp_model_merger.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
import numpy as np
import torch
from torch.distributed._tensor import Placement, Shard
try:
# for torch 2.5+
from torch.distributed.tensor import DTensor
except ImportError:
from torch.distributed._tensor import DTensor
from tqdm import tqdm
from .base_model_merger import BaseModelMerger
class FSDPModelMerger(BaseModelMerger):
"""
Model merger for FSDP (Fully Sharded Data Parallel) checkpoints.
This class handles the conversion of FSDP distributed checkpoints into HuggingFace format.
FSDP shards model parameters across multiple processes, and this merger reconstructs
the full model by loading and concatenating the sharded parameters from all ranks.
The merger supports various FSDP configurations including:
- Pure FSDP (single dimension sharding)
- FSDP + DDP (data parallel + fully sharded data parallel)
- DTensor-based sharding with custom device meshes
Key features:
- Automatic detection of world size from checkpoint filenames
- Support for DTensor and non-DTensor checkpoints
- Parallel loading of checkpoint shards for efficiency
- Validation against reference HuggingFace models
Example:
To merge FSDP checkpoints:
```python
config = ModelMergerConfig(
operation="merge",
backend="fsdp",
local_dir="path/to/fsdp/checkpoints",
target_dir="path/to/output"
)
merger = FSDPModelMerger(config)
merger.merge_and_save()
```
"""
def _get_world_size(self) -> int:
"""_summary_
From FSDP json config file, extract the world size.
Returns:
int: world size
"""
config_path = Path(self.config.local_dir) / "fsdp_config.json"
if not config_path.exists():
raise FileNotFoundError(f"Config file {config_path} does not exist.")
with open(config_path) as f:
config = json.load(f)
# Extract world size from the config
world_size = config.get("world_size", None)
if world_size is None:
raise ValueError("World size not found in the config file.")
return world_size
def _load_rank_zero_state_dict(self, world_size: int) -> dict:
return torch.load(
Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_0.pt",
map_location="cpu",
weights_only=False,
)
def _extract_device_mesh_info(self, state_dict: dict, world_size: int) -> tuple[np.ndarray, tuple[str, ...]]:
"""
Retrieves sharding information (device_mesh, mesh_dim_names) from a DTensor in the state_dict.
If no DTensor is found, infers a simple FSDP mesh based on world_size.
"""
pivot_key = sorted(list(state_dict.keys()))[0]
weight = state_dict[pivot_key]
if isinstance(weight, DTensor):
# get sharding info
device_mesh = weight.device_mesh
mesh = device_mesh.mesh
mesh_dim_names = device_mesh.mesh_dim_names
else:
# for non-DTensor
mesh = np.array([world_size], dtype=np.int64)
mesh_dim_names = ("fsdp",)
return mesh, mesh_dim_names
def _calculate_shard_configuration(
self, mesh: np.ndarray, mesh_dim_names: tuple[str, ...]
) -> tuple[int, tuple[int, ...]]:
"""Calculates the total number of shards and the shape of the device mesh."""
assert mesh_dim_names in (("fsdp",), ("ddp", "fsdp")), f"Unsupported mesh_dim_names {mesh_dim_names}"
if "tp" in mesh_dim_names:
# TODO: "tp" is not supported yet due to the above assert
total_shards = mesh.shape[-1] * mesh.shape[-2]
mesh_shape = (mesh.shape[-2], mesh.shape[-1])
else:
total_shards = mesh.shape[-1]
mesh_shape = (mesh.shape[-1],)
return total_shards, mesh_shape
def _merge_by_placement(self, tensors: list[torch.Tensor], placement: Placement) -> torch.Tensor:
"""Merges a list of tensors based on their DTensor placement"""
if placement.is_replicate():
return tensors[0]
elif placement.is_partial():
raise NotImplementedError("Partial placement is not supported yet")
elif placement.is_shard():
return torch.cat(tensors, dim=placement.dim).contiguous()
raise NotImplementedError(f"Unsupported placement: {placement}")
def _load_and_merge_state_dicts(
self, world_size: int, total_shards: int, mesh_shape: tuple[int, ...], mesh_dim_names: tuple[str, ...]
) -> dict[str, torch.Tensor]:
model_state_dict_lst = [None] * total_shards
def process_one_shard(rank: int, model_state_dict_lst: list):
model_path = Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_{rank}.pt"
state_dict = torch.load(model_path, map_location="cpu", weights_only=False)
model_state_dict_lst[rank] = state_dict
return state_dict
with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor:
futures = [executor.submit(process_one_shard, rank, model_state_dict_lst) for rank in range(total_shards)]
for future in tqdm(futures, desc=f"Loading {total_shards} FSDP shards", total=total_shards):
future.result()
# Merge state dicts from all shards
state_dict = {}
param_placements: dict[str, list] = {}
for key in set(model_state_dict_lst[0].keys()):
state_dict[key] = []
for model_state_shard in model_state_dict_lst:
# add tensor shard in order of rank to state_dict[key]
tensor = model_state_shard.pop(key)
if isinstance(tensor, DTensor):
state_dict[key].append(tensor._local_tensor.bfloat16())
placements = tuple(tensor.placements)
# replicated placement at dp dimension can be discarded
if mesh_dim_names[0] in ("dp", "ddp"):
placements = placements[1:]
if key not in param_placements:
param_placements[key] = placements
else:
assert param_placements[key] == placements
else:
state_dict[key].append(tensor.bfloat16())
del model_state_dict_lst
# Merge tensors
for key in sorted(state_dict):
if not isinstance(state_dict[key], list):
print(f"No need to merge key {key}")
continue
if key in param_placements:
# merge shards
placements: tuple[Shard] = param_placements[key]
if len(mesh_shape) == 1:
# 1-D list, FSDP without TP
assert len(placements) == 1
shards = state_dict[key]
state_dict[key] = self._merge_by_placement(shards, placements[0])
else:
# 2-D list, FSDP + TP
raise NotImplementedError("FSDP + TP is not supported yet")
else:
state_dict[key] = torch.cat(state_dict[key], dim=0)
return state_dict
def merge_and_save(self):
world_size = self._get_world_size()
rank_zero_state_dict = self._load_rank_zero_state_dict(world_size)
mesh, mesh_dim_names = self._extract_device_mesh_info(rank_zero_state_dict, world_size)
print(f"Got device mesh {mesh}, mesh_dim_names {mesh_dim_names}")
total_shards, mesh_shape = self._calculate_shard_configuration(mesh, mesh_dim_names)
print(f"Processing model shards with {total_shards} {mesh_shape} in total")
merged_state_dict = self._load_and_merge_state_dicts(world_size, total_shards, mesh_shape, mesh_dim_names)
if self.config.operation == "test":
if not self.config.test_hf_dir:
raise ValueError("test_hf_dir must be provided for test operation")
self._validate_state_dict(merged_state_dict)
elif self.config.operation == "merge":
self.save_hf_model_and_tokenizer(merged_state_dict)
if self.config.hf_upload:
self.upload_to_huggingface()
else:
raise ValueError(f"Unknown operation: {self.config.operation}")
def _validate_state_dict(self, state_dict: dict[str, torch.Tensor]):
auto_model_class = self.get_transformers_auto_model_class()
hf_model = auto_model_class.from_pretrained(self.config.test_hf_dir, torch_dtype=torch.bfloat16)
hf_state_dict = hf_model.state_dict()
del hf_model
hf_model_keys = set(hf_state_dict.keys())
collected_keys = set(state_dict.keys())
missing_keys = hf_model_keys - collected_keys
assert len(missing_keys) == 0, f"Missing keys in collected state dict: {list(sorted(missing_keys))}"
extra_keys = collected_keys - hf_model_keys
assert len(extra_keys) == 0, f"Extra keys in collected state dict: {list(sorted(extra_keys))}"
for key in hf_model_keys:
hf_shape = hf_state_dict[key].shape
collected_shape = state_dict[key].shape
assert hf_shape == collected_shape, (
f"Shape mismatch for key '{key}': original {hf_shape} vs collected {collected_shape}"
)
hf_dtype = hf_state_dict[key].dtype
collected_dtype = state_dict[key].dtype
assert hf_dtype == collected_dtype, (
f"Dtype mismatch for key '{key}': original {hf_dtype} vs collected {collected_dtype}"
)
torch.testing.assert_close(hf_state_dict[key], state_dict[key], atol=1e-6, rtol=1e-6)
print("FSDP checks passed: The merged state_dict matches the hf model saved by FSDPCheckpointManager.")
def cleanup(self):
"""Cleanup temporary files if needed."""
# FSDP merger does not create temporary files, so no cleanup is needed.
pass
|
verl__model_merger__megatron_model_merger.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Callable, ContextManager
import numpy as np
import torch
import torch.distributed as dist
try:
# NPU patch
import mindspeed.megatron_adaptor # noqa: F401
except ImportError:
pass
from accelerate import init_empty_weights
from megatron.core import mpu
from megatron.core.models.gpt.gpt_model import ModelType
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from safetensors.torch import load_file
from transformers import (
AutoConfig,
PretrainedConfig,
)
from verl.models.mcore import hf_to_mcore_config
from verl.utils.device import get_device_name, get_nccl_backend, get_torch_device
from verl.utils.distributed import set_numa_affinity
from verl.utils.megatron.dist_checkpointing import load_dist_checkpointing
from verl.utils.megatron_utils import get_model
from verl.utils.tokenizer import hf_processor, hf_tokenizer
from .base_model_merger import BaseModelMerger, ModelMergerConfig
@contextmanager
def noop_context() -> Any:
yield
def get_dynamic_pipeline_shards(layer_num: int, pp_size: int) -> list[int]:
"""Calculate the pipeline sharding configuration for Megatron-LM.
Args:
layer_num: Total number of layers in the model.
pp_size: Number of pipeline parallel ranks.
Returns:
layer number of each pp rank. Make the sharding of the pipeline as uniform as possible.
"""
if layer_num < pp_size:
raise ValueError(f"layer_num {layer_num} must be greater than pp_size {pp_size}.")
if pp_size < 1:
raise ValueError(f"pp_size must be at least 1, got {pp_size}.")
if pp_size == 1:
return [layer_num]
if pp_size == 2:
return [
layer_num // 2,
layer_num - layer_num // 2,
]
middle_size = pp_size - 2
shards_strategy = []
for middle_layer_num in range(layer_num):
first_last_layer_num = layer_num - middle_layer_num * middle_size
first_layer_num = first_last_layer_num // 2
last_layer_num = first_last_layer_num - first_last_layer_num // 2
if 0 < first_layer_num <= middle_layer_num and 0 < last_layer_num <= middle_layer_num:
shards_strategy.append(
(
[first_layer_num] + [middle_layer_num] * middle_size + [last_layer_num],
abs(first_layer_num - middle_layer_num),
)
)
# sort by diff of layer_num, to make it as uniform as possible
res = sorted(shards_strategy, key=lambda x: x[1])[0][0]
assert sum(res) == layer_num, f"sum(res)={sum(res)} != layer_num={layer_num}, pp_size={pp_size}"
return res
class MegatronModelMerger(BaseModelMerger):
"""
Model merger for Megatron-LM distributed checkpoints.
This class handles the conversion of Megatron-LM distributed checkpoints into HuggingFace format.
Megatron-LM uses tensor parallelism, pipeline parallelism, and data parallelism to distribute
large language models across multiple GPUs. This merger reconstructs the full model by
loading distributed checkpoints and applying the necessary transformations.
Key features:
- Support for tensor parallel, pipeline parallel, and data parallel configurations
- Automatic parameter name mapping from Megatron to HuggingFace conventions
- Handling of QKV and gate-up tensor splitting/merging
- Support for tied word embeddings and value models
- Integration with Megatron's distributed checkpointing system
The merger handles various model architectures and configurations:
- Standard transformer models (GPT-style)
- Models with tied word embeddings
- Value models for reinforcement learning
- Multi-layer attention (MLA) architectures
- Mixture of Experts (MoE) models
Args:
config (ModelMergerConfig): Configuration object with Megatron-specific settings
including tie_word_embedding and is_value_model flags.
Example:
To merge Megatron checkpoints:
```python
config = ModelMergerConfig(
operation="merge",
backend="megatron",
local_dir="path/to/megatron/checkpoints",
target_dir="path/to/output",
tie_word_embedding=True
)
merger = MegatronModelMerger(config)
merger.merge_and_save()
```
"""
def __init__(self, config: ModelMergerConfig):
super().__init__(config)
# Currently we use only 1 rank to merge the dist_ckpt, we will move to multi-process save shortly afterwards
if "WORLD_SIZE" not in os.environ:
os.environ["RANK"] = "0"
os.environ["LOCAL_RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
set_numa_affinity()
torch.distributed.init_process_group(get_nccl_backend())
self.rank = torch.distributed.get_rank()
self.world_size = torch.distributed.get_world_size()
local_rank = os.environ.get("LOCAL_RANK", 0)
get_torch_device().set_device(f"{get_device_name()}:{local_rank}")
mpu.initialize_model_parallel(
tensor_model_parallel_size=1,
pipeline_model_parallel_size=self.world_size,
virtual_pipeline_model_parallel_size=None,
context_parallel_size=1,
expert_model_parallel_size=1,
)
model_parallel_cuda_manual_seed(0)
self.hf_config = AutoConfig.from_pretrained(
self.config.hf_model_config_path, trust_remote_code=self.config.trust_remote_code
)
print(self.hf_config, flush=True)
self.params_mapping = {
# megatron core gpt model name, huggingface model name
# NOTICE: It's a little bit tricky, when 2 keys have the same prefix, we need to make sure the
# longer key within the containing relationship is processed first.
"embedding.word_embeddings": "model.embed_tokens",
# input layer norm for dpskv3
"input_layernorm.weight": "input_layernorm.weight",
"input_layernorm.bias": "input_layernorm.bias",
# attn
"self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight",
"self_attention.linear_qkv.layer_norm_bias": "input_layernorm.bias",
"self_attention.linear_qkv": "self_attn.qkv_proj",
"self_attention.q_layernorm": "self_attn.q_norm",
"self_attention.k_layernorm": "self_attn.k_norm",
"self_attention.linear_proj": "self_attn.o_proj",
# mla
"self_attention.linear_q_proj": "self_attn.q_proj",
"self_attention.linear_q_down_proj": "self_attn.q_a_proj",
"self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight",
"self_attention.linear_q_up_proj": "self_attn.q_b_proj",
"self_attention.linear_kv_down_proj": "self_attn.kv_a_proj_with_mqa",
"self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight",
"self_attention.linear_kv_up_proj": "self_attn.kv_b_proj",
# mlp
"pre_mlp_layernorm": "post_attention_layernorm",
"mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight",
"mlp.linear_fc1.layer_norm_bias": "post_attention_layernorm.bias",
"mlp.linear_fc1": "mlp.gate_up_proj",
"mlp.linear_fc2": "mlp.down_proj",
# moe
"mlp.router.expert_bias": "mlp.gate.e_score_correction_bias",
"mlp.router": "mlp.gate",
"mlp.shared_experts.linear_fc1": "mlp.shared_experts.gate_up_proj",
"mlp.shared_experts.linear_fc2": "mlp.shared_experts.down_proj",
"linear_fc1": "gate_up_proj",
"linear_fc2": "down_proj",
# output
"final_layernorm": "norm",
"output_layer": "lm_head",
}
if "Qwen2MoeForCausalLM" in self.hf_config.architectures:
self.params_mapping["mlp.shared_experts.linear_fc1"] = "mlp.shared_expert.gate_up_proj"
self.params_mapping["mlp.shared_experts.linear_fc2"] = "mlp.shared_expert.down_proj"
self.params_mapping["mlp.shared_experts.gate_weight"] = "mlp.shared_expert_gate.weight"
def _load_state_dicts(self, model_ckpt_path: str) -> dict[str, Any]:
"""_summary_
Use Megatron dist_checkpointing to load the model state dicts from the checkpoint directory.
Args:
model_ckpt_path (str): Path to the model checkpoint directory.
Returns:
State dict containing the model parameters.
"""
# init hf config
self.pipeline_shards = get_dynamic_pipeline_shards(self.hf_config.num_hidden_layers, self.world_size)
print(f"Pipeline shards: {self.pipeline_shards}, total layers: {sum(self.pipeline_shards)}")
tf_config = hf_to_mcore_config(
self.hf_config,
torch.bfloat16,
num_layers_in_first_pipeline_stage=self.pipeline_shards[0] if len(self.pipeline_shards) > 1 else None,
num_layers_in_last_pipeline_stage=self.pipeline_shards[-1] if len(self.pipeline_shards) > 2 else None,
)
tf_config.use_cpu_initialization = self.config.use_cpu_initialization
tie_word_embeddings = getattr(self.hf_config, "tie_word_embeddings", False)
# init megatron model
def megatron_model_provider(pre_process, post_process):
from verl.models.mcore import init_mcore_model
parallel_model = init_mcore_model(
tf_config,
self.hf_config,
pre_process,
post_process,
share_embeddings_and_output_weights=tie_word_embeddings,
value=False,
)
return parallel_model
context: Callable[..., ContextManager] = (
init_empty_weights if self.config.use_cpu_initialization else noop_context
)
with context():
whole_model = get_model(
model_provider_func=megatron_model_provider,
model_type=ModelType.encoder_or_decoder,
wrap_with_ddp=False,
transformer_config=tf_config,
)
if self.config.use_cpu_initialization:
# convert meta device to empty tensor so it can use `copy_` function
whole_model[0].module = whole_model[0].module.to_empty(device="cpu")
# load state dicts
sharded_state_dict = {}
for vpp_rank, model in enumerate(whole_model):
key = f"model{vpp_rank}" if len(whole_model) > 1 else "model"
mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank)
sharded_state_dict[key] = model.sharded_state_dict()
model_state_dict = load_dist_checkpointing(sharded_state_dict, model_ckpt_path)
model_state_dict_list = []
for vpp_rank, model in enumerate(whole_model):
key = f"model{vpp_rank}" if len(whole_model) > 1 else "model"
mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank)
model_state_dict_list.append(model_state_dict[key])
return model_state_dict_list
def _check_megatron_state_key(self, key: str) -> bool:
"""
Checks if the key is a valid Megatron state key.
Now the model merger only supports keys that start with "decoder/embedding/output_layer" in TransformerLayer.
Shall not use key starts with "model."
"""
if key.startswith("model."):
raise ValueError(
f"Invalid key {key} in Megatron state_dict. Expected keys to start with "
f"'decoder/embedding/output_layer' in TransformerLayer."
)
skip_checking_keys = ["embedding.word_embeddings", "output_layer"]
for skip_key in skip_checking_keys:
if skip_key in key:
print(f"skip checking key {key}")
return
# Exclude extra state keys
if not key.startswith("decoder"):
raise ValueError(
f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder' in TransformerLayer."
)
def _split_tensors(
self, key: str, tensor: torch.Tensor, config: PretrainedConfig, is_value_model: bool = False
) -> list[torch.Tensor]:
"""
Splits a tensor into multiple tensors based on the name.
This is used to handle qkv and gate_up tensors.
"""
if "linear_fc1.weight" in key:
# if the tensor is gate and proj
gate_lst = []
up_lst = []
gate, up = tensor.chunk(2)
gate_lst.append(gate)
up_lst.append(up)
gate = torch.cat(gate_lst, dim=0)
up = torch.cat(up_lst, dim=0)
return [gate, up]
elif "self_attention.linear_qkv." in key and "layer_norm" not in key:
# if the tensor is qkv, for each param on tp, split into q, k, v
# concat q, k, v separately.
q_lst, k_lst, v_lst = [], [], []
assert config.num_attention_heads % config.num_key_value_heads == 0
num_q_per_kv = config.num_attention_heads // config.num_key_value_heads
assert tensor.shape[0] % (num_q_per_kv + 2) == 0, (
f"Tensor shape {tensor.shape} is not divisible by {num_q_per_kv + 2}"
)
kv_size = tensor.shape[0] // (num_q_per_kv + 2)
split_size = [kv_size * num_q_per_kv, kv_size, kv_size]
num_query_groups_per_partition = config.num_key_value_heads
for chunk in tensor.chunk(num_query_groups_per_partition):
split_size = [
kv_size * num_q_per_kv // num_query_groups_per_partition,
kv_size // num_query_groups_per_partition,
kv_size // num_query_groups_per_partition,
]
q, k, v = chunk.split(split_size)
q_lst.append(q)
k_lst.append(k)
v_lst.append(v)
return [torch.cat(q_lst, dim=0), torch.cat(k_lst, dim=0), torch.cat(v_lst, dim=0)]
else:
return [tensor]
def _merge_state_dicts(self, model_state_dict_list: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
state_dict = {}
layers_cum = 0
if self.world_size > 1:
pipeline_cumsum = np.cumsum(self.pipeline_shards)
layers_cum = 0 if self.rank == 0 else pipeline_cumsum[self.rank - 1]
print(f"{layers_cum=}")
for model_state_dict in model_state_dict_list:
layers_handled = 0
keys = model_state_dict.keys()
for key in keys:
if "extra_state" in key:
continue
if self.config.tie_word_embedding and ("output_layer" in key):
print("skip lm_head and reward_head loading because of tie_word_embeddings")
continue
self._check_megatron_state_key(key)
hf_name = self._replace_name(key, self.params_mapping)
assert hf_name is not None, f"Failed to convert layer name [{key}] from megatron to huggingface."
if "model.layers." in hf_name:
local_layer_no = int(hf_name.split(".")[2])
layers_handled = max(local_layer_no, layers_handled)
global_layer_no = local_layer_no + layers_cum
new_key_list = hf_name.split(".")
new_key_list[2] = str(global_layer_no)
hf_name = ".".join(new_key_list)
else:
warnings.warn(f"hf_name {hf_name} will not be fixed with layer number", stacklevel=2)
if "mlp.experts." in hf_name and ".weight" in hf_name:
name_prefix, expert_id = hf_name.split(".weight")
for proj in ["gate_up", "down"]:
if f"{proj}_proj" in hf_name:
hf_name = hf_name.replace(
f"mlp.experts.{proj}_proj.weight{expert_id}",
f"mlp.experts.{expert_id}.{proj}_proj.weight",
)
tensor = model_state_dict[key]
split_tensor = self._split_tensors(
key, tensor, self.hf_config, is_value_model=self.config.is_value_model
)
if len(split_tensor) == 1:
state_dict[hf_name] = split_tensor[0]
elif len(split_tensor) == 3:
# split qkv
for n, d in zip(["q", "k", "v"], split_tensor, strict=True):
state_dict[hf_name.replace("qkv", n)] = d
elif len(split_tensor) == 2:
# split gate up
state_dict[hf_name.replace("gate_up", "gate")] = split_tensor[0]
state_dict[hf_name.replace("gate_up", "up")] = split_tensor[1]
shape_info = (
split_tensor.shape if isinstance(split_tensor, torch.Tensor) else [t.shape for t in split_tensor]
)
print(f"converted {key} to {hf_name} with shape {shape_info}")
layers_cum += layers_handled + 1 # zero based
return state_dict
def save_hf_model_and_tokenizer(self, merged_state_dict):
if self.world_size == 1:
return super().save_hf_model_and_tokenizer(merged_state_dict)
from safetensors.torch import save_file
layer_num = self.hf_config.num_hidden_layers
# FIXME: make configurable
saves_per_layer = 1 if layer_num < 30 else 2
saves_total = saves_per_layer * layer_num
saves_indexes = {}
# calculate the layer start index and key chunks
layer_this_rank = self.pipeline_shards[self.rank]
pipeline_cumsum = np.cumsum(self.pipeline_shards)
layer_start = 0 if self.rank == 0 else pipeline_cumsum[self.rank - 1]
keys = list(merged_state_dict.keys())
keys_chunk = np.array_split(np.array(keys), layer_this_rank * saves_per_layer)
numel = 0
assert len(keys_chunk) == layer_this_rank * saves_per_layer, (
f"Expected {len(keys_chunk)} chunks, but got {layer_this_rank * saves_per_layer} for rank {self.rank}."
)
# save to model shards manually
target_dir = Path(self.config.target_dir)
for i, keys in enumerate(keys_chunk):
sd_to_save = {k: merged_state_dict[k] for k in keys}
numel += sum([sd_to_save[i].numel() for i in sd_to_save])
save_idx = layer_start * saves_per_layer + i
save_path = target_dir / f"model-{save_idx + 1:05d}-of-{saves_total:05d}.safetensors"
save_file(sd_to_save, save_path)
for k in keys:
saves_indexes[k] = str(save_path.name)
tensor = torch.tensor([numel]).to(get_device_name())
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
numel = tensor.cpu().item()
all_save_indexes = [{} for _ in range(self.world_size)]
dist.all_gather_object(all_save_indexes, saves_indexes)
saves_indexes = {k: v for i in all_save_indexes for k, v in i.items()}
if self.rank == 0:
with open(target_dir / "model.safetensors.index.json", "w") as f:
json.dump(
{
"metadata": {
"total_size": numel,
},
"weight_map": saves_indexes,
},
f,
indent=4,
)
print(f"model saved to {target_dir} with {numel=}")
self.model_config.save_pretrained(self.config.target_dir)
processor = hf_processor(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
tokenizer = hf_tokenizer(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
if processor is not None:
print(f"Saving processor to {self.config.target_dir}")
processor.save_pretrained(self.config.target_dir)
if tokenizer is not None:
print(f"Saving tokenizer to {self.config.target_dir}")
tokenizer.save_pretrained(self.config.target_dir)
def merge_and_save(self):
from verl.utils.megatron_utils import get_dist_checkpoint_path
model_ckpt_path = get_dist_checkpoint_path(self.config.local_dir)
model_state_dict = self._load_state_dicts(model_ckpt_path)
merged_state_dict = self._merge_state_dicts(model_state_dict)
del model_state_dict
if self.config.operation == "test":
if not self.config.test_hf_dir:
raise ValueError("test_hf_dir must be provided for test operation")
self._validate_state_dict(merged_state_dict)
elif self.config.operation == "merge":
self.save_hf_model_and_tokenizer(merged_state_dict)
if self.config.hf_upload:
self.upload_to_huggingface()
else:
raise ValueError(f"Unknown operation: {self.config.operation}")
def _validate_state_dict(self, state_dict: dict[str, torch.Tensor]):
"""
Compares the merged Megatron state_dict against a reference safetensors model.
Applies necessary name mappings from Megatron to Hugging Face conventions using _replace_name.
"""
ref_state_dict = load_file(Path(self.config.test_hf_dir) / "model.safetensors")
for name, loaded_weight in state_dict.items():
# name = self._replace_name(original_name, self.params_mapping)
if not name or name.endswith(".bias") and name not in ref_state_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
if "lm_head.weight" in name:
if self.config.is_value_model or self.config.tie_word_embedding:
continue
if name not in ref_state_dict:
raise RuntimeError(f"key: {name} not exist in state_dict")
param = ref_state_dict[name]
assert loaded_weight.dtype == param.dtype
torch.testing.assert_close(loaded_weight.to("cpu"), param, atol=1e-2, rtol=5e-2)
def _replace_name(self, megatron_name: str, name_mapping: dict[str, str]) -> str:
for m_name, v_name in name_mapping.items():
if m_name not in megatron_name:
continue
megatron_name = megatron_name.replace("decoder", "model")
param_name = megatron_name.replace(m_name, v_name)
return param_name
return None # Return None if no mapping found
def cleanup(self):
torch.distributed.destroy_process_group()
|
verl__models__llama__megatron__checkpoint_utils__llama_loader.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
import torch.distributed as dist
from verl.utils.device import get_device_id, get_torch_device
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
from megatron.core import mpu
print(f"get megatron data parallel size: {mpu.get_data_parallel_world_size()}")
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (
virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model
)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def load_state_dict_to_megatron_llama(
state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False
):
"""Load merged state_dict to sharded Megatron module in training."""
from megatron.core import DistributedDataParallel as LocalDDP
from megatron.core import mpu
from megatron.core.transformer.module import Float16Module
from torch.nn.parallel import DistributedDataParallel as torchDDP
from verl.utils.logger import print_rank_0
from verl.utils.megatron_utils import unwrap_model
start_time = time.time()
def _get_gpt_model(model):
return model
def fetch_params(module):
for param in module.parameters():
torch.distributed.fetch(
param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group()
)
dp_rank = mpu.get_data_parallel_rank()
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if torch.distributed.get_rank() == 0:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, list | tuple):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, (
f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size "
f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}"
)
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
gpt_model_module = _get_gpt_model(models[i])
assert len(gpt_model_module.model.layers) == num_layers_per_model
def _fetch_tensor(tensor, name) -> torch.Tensor:
"""fetch tensor"""
nonlocal state_dict
if tensor is not None:
tensor.data.copy_(state_dict[name])
def _fetch_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""fetch tensor in tp shards"""
nonlocal state_dict
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
if tensor is not None:
tensor.data.copy_(tensor_chunk[tp_rank])
else:
print(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
def _fetch_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""fetch tensor in tp shards"""
nonlocal state_dict
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
if tensor is not None:
tensor.data.copy_(tensor_chunk[tp_rank])
else:
print(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
def _fetch_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor:
"""fetch gate_up tensor in tp shards"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if gate_name in state_dict and up_name in state_dict:
gate_weight = state_dict[gate_name]
up_weight = state_dict[up_name]
new_gate_up_weight = torch.empty(
config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
for i in range(tp_size):
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp]
up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp]
new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_(
torch.cat([gate_weight_tp, up_weight_tp], dim=0)
)
tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0)
if tensor is not None:
tensor.data.copy_(tensor_chunk[tp_rank])
else:
print(f"tp_shard tensor:[{gate_name}, {up_name}] not in state_dict, skip loading")
def _fetch_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name) -> torch.Tensor:
"""fetch tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
assert q_name in state_dict and k_name in state_dict and v_name in state_dict
full_weight_q = state_dict[q_name]
full_weight_k = state_dict[k_name]
full_weight_v = state_dict[v_name]
hidden_size_per_head = config.hidden_size // config.num_attention_heads
if config.num_key_value_heads >= tp_size:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
new_weight_qkv = torch.empty(
total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp]
k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp]
v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp]
new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0))
else:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
new_weight_qkv = torch.empty(
total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp]
start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head
end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head
k_part = full_weight_k[start_idx:end_idx]
v_part = full_weight_v[start_idx:end_idx]
new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0))
tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0)
if tensor is not None:
tensor.data.copy_(tensor_chunk[tp_rank])
# Embeddings
# -------------------
print_rank_0("loading embeddings...")
gpt_model_module = _get_gpt_model(models[0])
embed_tokens_weight = None
if pp_rank == 0:
embed_tokens_weight = gpt_model_module.model.embed_tokens.weight
_fetch_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight")
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
num_layer_per_pp = config.num_hidden_layers // pp_size
vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size()
layer_list = []
if vpp_size is not None:
for vpp_rank in range(vpp_size):
num_layer_vpp_chunk = num_layer_per_pp // vpp_size
num_layer_this_model = num_layer_vpp_chunk
offset = vpp_rank * (config.num_hidden_layers // mpu.get_virtual_pipeline_model_parallel_world_size()) + (
mpu.get_pipeline_model_parallel_rank() * num_layer_vpp_chunk
)
layer_list.extend(list(range(offset, offset + num_layer_this_model)))
else:
num_layer_this_model = num_layer_per_pp
offset = pp_rank * num_layer_per_pp
layer_list.extend(list(range(offset, offset + num_layer_this_model)))
for layer in layer_list:
print_rank_0(f"loading layer #{layer}...")
layer_name = f"model.layers.{layer}"
dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer]
gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank])
sync_layer = gpt_model_module.model.layers[dst_layer_idx]
_fetch_tensor(
sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.input_layernorm.weight",
)
_fetch_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
)
_fetch_tp_shard_tensor(
sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.o_proj.weight",
chunk_dim=1,
)
_fetch_tensor(
sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.post_attention_layernorm.weight",
)
_fetch_tp_shard_tensor_gate_up(
sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.gate_proj.weight",
f"{layer_name}.mlp.up_proj.weight",
)
_fetch_tp_shard_tensor(
sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.down_proj.weight",
chunk_dim=1,
)
# Final Layernorm
# -------------------
print_rank_0("loading final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_fetch_tensor(
getattr(gpt_model_module.model.norm, "weight", None),
"model.norm.weight",
)
print_rank_0("loading lm_head...")
if pp_rank + 1 == pp_size:
lm_head_weight = gpt_model_module.lm_head.weight
if is_value_model:
if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1:
_fetch_tensor(lm_head_weight, "lm_head.weight")
print_rank_0("load lm_head weight")
elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1:
_fetch_tensor(lm_head_weight, "reward_head.weight")
print_rank_0("load lm_head from value_head weight")
else:
_fetch_tensor(None, "lm_head.weight")
print_rank_0("fail to match lm_head in value_model")
else:
_fetch_tp_shard_tensor(lm_head_weight, "lm_head.weight")
dist.barrier()
get_torch_device().empty_cache()
print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s")
|
verl__models__llama__megatron__checkpoint_utils__llama_saver.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
import torch.distributed as dist
from megatron.core import mpu
from megatron.core.distributed import DistributedDataParallel as LocalDDP
from megatron.core.transformer.module import Float16Module
from torch.nn.parallel import DistributedDataParallel as torchDDP
from verl.utils.device import get_device_id, get_torch_device
from verl.utils.logger import print_rank_0
from verl.utils.megatron_utils import unwrap_model
def _megatron_calc_global_rank(tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0):
"""given TP,DP,PP rank to get the global rank."""
tp_size = mpu.get_tensor_model_parallel_world_size()
dp_size = mpu.get_data_parallel_world_size()
pp_size = mpu.get_pipeline_model_parallel_world_size()
assert tp_size * dp_size * pp_size == torch.distributed.get_world_size(), (
f"{tp_size} x {dp_size} x {pp_size} != {torch.distributed.get_world_size()}"
)
# We only support TP-DP-PP grouping, for correctness when resharding
return (pp_rank * dp_size + dp_rank) * tp_size + tp_rank
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
from megatron.core import mpu
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (
virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model
)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def merge_megatron_ckpt_llama(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False):
"""Merge sharded parameters of a Megatron module into a merged checkpoint.
Args:
wrapped_models (list of megatron.core.distributed.DistributedDataParallel):
The local DDP wrapped megatron modules.
config (str or None):
HF config for model
dtype: model params type
is_value_model: if model is value model
tie_word_embeddings: tie_word_embeddings, not used in llama, only to keep same interface with qwen2
Returns:
state_dict (dict):
The merged state_dict in rank 0, and an empty dictionary in other ranks.
"""
start_time = time.time()
def _get_gpt_model(model):
return model
dp_rank = mpu.get_data_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
pp_rank = mpu.get_pipeline_model_parallel_rank()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if dist.get_rank() == 0:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, list | tuple):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
assert len(models[i].model.layers) == num_layers_per_model, (
"len model layers {} not equal to num_layers_per_model {}".format(
len(models[i].model.layers), num_layers_per_model
)
)
state_dict = dict()
def _get_cpu_tensor(tensor: torch.Tensor):
if tensor is None:
return None
if tensor.device == torch.device("cpu"):
return tensor.detach().clone()
return tensor.detach().cpu()
def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor:
"""broadcast tensor across mp_group"""
nonlocal state_dict
nonlocal mp_group
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
if torch.distributed.get_rank() == src_rank:
if tensor is None:
weight = None
tensor_shape = None
else:
weight = tensor
tensor_shape = weight.shape
else:
weight = None
tensor_shape = None
obj_list = [tensor_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
tensor_shape = obj_list[0]
if tensor_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tensor:[{name}] not exist, skip collect")
return
if weight is None:
weight = torch.empty(
tensor_shape,
dtype=dtype,
device=get_device_id(),
requires_grad=False,
)
dist.broadcast(weight, src=src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
state_dict[name] = _get_cpu_tensor(weight)
def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=dtype,
device=get_device_id(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=concat_dim)
if mutate_func is not None:
full_tensor = mutate_func(full_tensor)
state_dict[name] = full_tensor
def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=dtype,
device=get_device_id(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=0)
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_list = []
up_weight_list = []
for i in range(tp_size):
gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)]
gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp]
up_weight_tp = gate_up_weight_tp[intermediate_size_tp:]
gate_weight_list.append(gate_weight_tp)
up_weight_list.append(up_weight_tp)
state_dict[gate_name] = torch.cat(gate_weight_list, dim=0)
state_dict[up_name] = torch.cat(up_weight_list, dim=0)
def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank):
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=dtype,
device=get_device_id(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=0)
q_weight_list = []
k_weight_list = []
v_weight_list = []
hidden_size_per_head = config.hidden_size // config.num_attention_heads
if config.num_key_value_heads >= tp_size:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
qkv_part = full_tensor[i * total_size : (i + 1) * total_size]
q_part = qkv_part[:q_size_tp]
k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp]
v_part = qkv_part[q_size_tp + kv_size_tp : total_size]
q_weight_list.append(q_part)
k_weight_list.append(k_part)
v_weight_list.append(v_part)
else:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
qkv_part = full_tensor[i * total_size : (i + 1) * total_size]
q_part = qkv_part[:q_size_tp]
k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp]
v_part = qkv_part[q_size_tp + kv_size_tp : total_size]
q_weight_list.append(q_part)
if i * config.num_key_value_heads % tp_size == 0:
k_weight_list.append(k_part)
v_weight_list.append(v_part)
state_dict[q_name] = torch.cat(q_weight_list, dim=0)
state_dict[k_name] = torch.cat(k_weight_list, dim=0)
state_dict[v_name] = torch.cat(v_weight_list, dim=0)
# empty cache before collecting weights
get_torch_device().empty_cache()
# Embeddings
# -------------------
if dp_rank == 0:
# Embeddings
# -------------------
print_rank_0("collecting embeddings...")
gpt_model_module = _get_gpt_model(models[0])
_broadcast_tp_shard_tensor(
gpt_model_module.model.embed_tokens.weight if pp_rank == 0 else None,
"model.embed_tokens.weight",
src_pp_rank=0,
)
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
for layer in range(config.num_hidden_layers):
print_rank_0(f"collecting layer #{layer}...")
layer_name = f"model.layers.{layer}"
src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer]
gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank])
sync_layer = gpt_model_module.model.layers[src_layer_idx]
_broadcast_tensor(
sync_layer.input_layernorm.weight,
f"{layer_name}.input_layernorm.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.weight,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor(
sync_layer.self_attn.o_proj.weight,
f"{layer_name}.self_attn.o_proj.weight",
concat_dim=1,
src_pp_rank=src_pp_rank,
)
_broadcast_tensor(
sync_layer.post_attention_layernorm.weight,
f"{layer_name}.post_attention_layernorm.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor_gate_up(
sync_layer.mlp.gate_up_proj.weight,
f"{layer_name}.mlp.gate_proj.weight",
f"{layer_name}.mlp.up_proj.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor(
sync_layer.mlp.down_proj.weight,
f"{layer_name}.mlp.down_proj.weight",
concat_dim=1,
src_pp_rank=src_pp_rank,
)
# Final Layernorm
# -------------------
print_rank_0("collecting final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_broadcast_tensor(
getattr(gpt_model_module.model.norm, "weight", None),
"model.norm.weight",
src_pp_rank=pp_size - 1,
)
print_rank_0("collecting lm_head...")
if is_value_model:
if pp_rank == pp_size - 1:
print(f"gpt_model_module.lm_head.weight: {gpt_model_module.lm_head.weight.shape}")
_broadcast_tensor(
gpt_model_module.lm_head.weight if pp_rank == pp_size - 1 else None,
"lm_head.weight",
src_pp_rank=pp_size - 1,
)
_broadcast_tensor(
gpt_model_module.reward_head.weight
if pp_rank == pp_size - 1 and getattr(gpt_model_module, "reward_weight", None) is not None
else None,
"reward_head.weight",
src_pp_rank=pp_size - 1,
)
else:
_broadcast_tp_shard_tensor(
getattr(gpt_model_module.lm_head, "weight", None) if pp_rank == pp_size - 1 else None,
"lm_head.weight",
src_pp_rank=pp_size - 1,
)
dist.barrier()
get_torch_device().empty_cache()
if torch.distributed.get_rank() == 0:
if dtype not in [torch.float16, torch.bfloat16, torch.float32]:
print(f'Unknown/unsupported dtype to save: {dtype}"')
exit(1)
for k, v in state_dict.items():
if dtype != v.dtype:
state_dict[k] = v.to(dtype)
print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s")
return state_dict
|
verl__models__llama__megatron__layers__parallel_decoder.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from megatron.core import ModelParallelConfig
from torch import nn
from transformers import LlamaConfig
from verl.utils.megatron_utils import TransformerConfig, convert_config
from .parallel_attention import ParallelLlamaAttention, ParallelLlamaAttentionRmPad
from .parallel_mlp import ParallelLlamaMLP
from .parallel_rmsnorm import ParallelLlamaRMSNorm
class ParallelLlamaDecoderLayer(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, layer_idx: int):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.self_attn = ParallelLlamaAttention(config=config, megatron_config=megatron_config)
self.mlp = ParallelLlamaMLP(config, megatron_config=megatron_config)
self.input_layernorm = ParallelLlamaRMSNorm(config, megatron_config)
self.post_attention_layernorm = ParallelLlamaRMSNorm(config, megatron_config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Note: sequence parallel is hidden inside ColumnParallelLinear
# reduce scatter is hidden inside RowParallelLinear
# Self Attention
hidden_states = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
)
# TODO: add sequence parallel operator reduce_scatter here
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
# TODO: add sequence parallel operator all_gather here
hidden_states = self.mlp(hidden_states)
# TODO: add sequence parallel operator reduce_scatter here
hidden_states = residual + hidden_states
outputs = hidden_states
return outputs
class ParallelLlamaDecoderLayerRmPad(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, layer_idx: int):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.self_attn = ParallelLlamaAttentionRmPad(config=config, megatron_config=megatron_config)
self.mlp = ParallelLlamaMLP(config, megatron_config=megatron_config)
self.input_layernorm = ParallelLlamaRMSNorm(config, megatron_config)
self.post_attention_layernorm = ParallelLlamaRMSNorm(config, megatron_config)
def forward(
self,
hidden_states: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states # (total_nnz // sp, 1, hidden_size)
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
# (total_nnz // sp, 1, hidden_size) -> all-gather (total_nnz, 1, hidden_size)
# -> col + row -> reduce-scatter -> (total_nnz // sp, 1, hidden_size)
hidden_states = self.self_attn(
hidden_states=hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch,
)
hidden_states = residual + hidden_states
# Fully Connected
# shape changes same as attn
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = hidden_states
return outputs
|
verl__models__llama__megatron__layers__parallel_linear.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/linear.py
import torch
from megatron.core import tensor_parallel
class QKVParallelLinear(tensor_parallel.ColumnParallelLinear):
def __init__(
self,
input_size,
num_heads,
num_key_value_heads,
head_dim,
*,
bias=True,
gather_output=True,
skip_bias_add=False,
**kwargs,
):
# Keep input parameters, and already restrict the head numbers
self.input_size = input_size
self.q_output_size = num_heads * head_dim
self.kv_output_size = num_key_value_heads * head_dim
self.head_dim = head_dim
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
input_size = self.input_size
output_size = (num_heads + 2 * num_key_value_heads) * self.head_dim
super().__init__(
input_size=input_size,
output_size=output_size,
bias=bias,
gather_output=gather_output,
skip_bias_add=skip_bias_add,
**kwargs,
)
class MergedColumnParallelLinear(tensor_parallel.ColumnParallelLinear):
def __init__(
self,
input_size,
gate_ouput_size,
up_output_size,
*,
bias=True,
gather_output=True,
skip_bias_add=False,
**kwargs,
):
# Keep input parameters, and already restrict the head numbers
self.input_size = input_size
self.output_size = gate_ouput_size + up_output_size
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
super().__init__(
input_size=self.input_size,
output_size=self.output_size,
bias=bias,
gather_output=gather_output,
skip_bias_add=skip_bias_add,
**kwargs,
)
class LinearForLastLayer(torch.nn.Linear):
def __init__(
self,
input_size,
output_size,
*,
config,
bias=True,
):
super().__init__(in_features=input_size, out_features=output_size, bias=bias)
self.sequence_parallel = config.sequence_parallel
if self.sequence_parallel:
self.weight.sequence_parallel = True
def forward(
self,
input_,
weight=None,
runtime_gather_output=None,
):
logits = super().forward(input_)
logits = logits.float()
if self.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits, None
|
verl__models__llama__megatron__layers__parallel_mlp.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from megatron.core import ModelParallelConfig, tensor_parallel
from megatron.core import parallel_state as mpu
from torch import nn
from transformers.activations import ACT2FN
from verl.models.llama.megatron.layers.parallel_linear import MergedColumnParallelLinear
from verl.utils.megatron import tensor_parallel as tp_utils
class ParallelLlamaMLP(nn.Module):
def __init__(self, config, megatron_config: ModelParallelConfig = None) -> None:
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
# The weight is only [hidden_size, intermediate_size // model_parallel_world_size]
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
assert row_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(row_kwargs, megatron_config)
tp_utils.update_kwargs_with_config(column_kwargs, megatron_config)
tp_size = mpu.get_tensor_model_parallel_world_size()
self.gate_up_proj = MergedColumnParallelLinear(
input_size=self.hidden_size,
gate_ouput_size=self.intermediate_size,
up_output_size=self.intermediate_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
self.gate_size = self.intermediate_size // tp_size
self.down_proj = tensor_parallel.RowParallelLinear(
input_size=self.intermediate_size,
output_size=self.hidden_size,
bias=False,
input_is_parallel=True,
skip_bias_add=False,
**row_kwargs,
)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
gate_up = self.gate_up_proj(x)[0]
gate, up = gate_up.split(self.gate_size, dim=-1)
return self.down_proj(self.act_fn(gate) * up)[0]
|
verl__models__llama__megatron__layers__parallel_rmsnorm.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import torch
from megatron.core import ModelParallelConfig
from torch import nn
from transformers import LlamaConfig
from verl.utils.megatron import sequence_parallel as sp_utils
class ParallelLlamaRMSNorm(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
if isinstance(config.hidden_size, numbers.Integral):
normalized_shape = (config.hidden_size,)
self.normalized_shape = torch.Size(normalized_shape)
self.weight = nn.Parameter(torch.ones(self.normalized_shape))
self.variance_epsilon = config.rms_norm_eps
if megatron_config.sequence_parallel:
sp_utils.mark_parameter_as_sequence_parallel(self.weight)
def forward(self, hidden_states):
from apex.normalization.fused_layer_norm import fused_rms_norm_affine
return fused_rms_norm_affine(
input=hidden_states,
weight=self.weight,
normalized_shape=self.normalized_shape,
eps=self.variance_epsilon,
memory_efficient=True,
)
|
verl__models__llama__megatron__modeling_llama_megatron.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LLaMA model with Megatron-style acceleration."""
from typing import Optional
import torch
import torch.utils.checkpoint
from megatron.core import ModelParallelConfig, mpu, tensor_parallel
from torch import nn
from transformers.modeling_outputs import BaseModelOutputWithPast
from transformers.models.llama.configuration_llama import LlamaConfig
from transformers.models.llama.modeling_llama import CausalLMOutputWithPast
from verl.utils.megatron import sequence_parallel as sp_utils
from verl.utils.megatron import tensor_parallel as tp_utils
from verl.utils.megatron_utils import TransformerConfig, convert_config
from .layers import ParallelLlamaDecoderLayer, ParallelLlamaDecoderLayerRmPad, ParallelLlamaRMSNorm
"""
TODO:
1. Add weight initialization. Here we need to be careful on TP weight init.
2. Add sequence parallel
3. Load checkpoint from meta LLama pretrained checkpoint
"""
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class ParallelLlamaModel(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
Args:
config: LlamaConfig
"""
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
if megatron_config is not None:
assert embedding_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(
num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs
)
self.layers = nn.ModuleList(
[ParallelLlamaDecoderLayer(config, megatron_config) for _ in range(config.num_hidden_layers)]
)
self.norm = ParallelLlamaRMSNorm(config, megatron_config)
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | BaseModelOutputWithPast:
"""
Args:
input_ids: input ids. shape (batch_size, seq_length)
attention_mask: attention_mask. shape (batch_size, seq_length)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
batch_size, seq_length = input_ids.shape
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelLlamaForCausalLM(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.model = ParallelLlamaModel(config, megatron_config=megatron_config)
self.vocab_size = config.vocab_size
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(
input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
)
hidden_states = outputs
logits = self.lm_head(hidden_states)[0]
logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits)
logits = logits.float()
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa: F401, E402
class ParallelLlamaModelRmPad(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
Args:
config: LlamaConfig
"""
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
self.megatron_config = megatron_config
if megatron_config is not None:
assert embedding_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(
num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs
)
self.layers = nn.ModuleList(
[ParallelLlamaDecoderLayerRmPad(config, megatron_config) for _ in range(config.num_hidden_layers)]
)
self.norm = ParallelLlamaRMSNorm(config, megatron_config)
def forward(
self,
input_ids: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None,
) -> tuple | BaseModelOutputWithPast:
"""
Args:
input_ids: input ids. shape (1, totol_nnz)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size)
# (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size)
inputs_embeds = inputs_embeds.transpose(0, 1)
if self.megatron_config.sequence_parallel:
inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(
hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelLlamaForCausalLMRmPad(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.megatron_config = megatron_config
self.model = ParallelLlamaModelRmPad(config, megatron_config=megatron_config)
self.vocab_size = config.vocab_size
self._init_head(config)
def _init_head(self, config):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(
input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
def _forward_head(self, hidden_states):
# all_gather from sequence parallel region is performed inside lm_head
logits = self.lm_head(hidden_states)[0]
logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp)
logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) # (total_nnz_padded, 1, vocab_size)
return logits
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
batch_size, sequence_length = input_ids.shape
# remove padding here
input_ids, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input(
input_ids.unsqueeze(dim=-1), attention_mask
) # (total_nnz, 1)
# pad input_ids to multiple of tp for all tp ranks
# TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap
if self.megatron_config.sequence_parallel:
input_ids = sp_utils.pad_to_sequence_parallel(input_ids)
input_ids = input_ids.transpose(0, 1) # (1, total_nnz+pad)
outputs = self.model(
input_ids=input_ids,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch,
)
hidden_states = outputs
logits = self._forward_head(hidden_states)
# remove padding from sequence parallel
if self.megatron_config.sequence_parallel:
totol_nnz = cu_seqlens[-1]
logits = logits[:totol_nnz] # (total_nnz_padded)
logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension
# add removed padding back
logits = pad_input(
logits, indices, batch_size, seqlen=sequence_length
) # (batch_size, sequence_length, vocab_size)
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
class ParallelLlamaForValueRmPad(ParallelLlamaForCausalLMRmPad):
def _init_head(self, config):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False)
# lm_head is effectively the same as sequence parallel
sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight)
def _forward_head(self, hidden_states):
logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1)
logits = logits.float()
if self.megatron_config.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
output = super().forward(input_ids, attention_mask, position_ids)
output.logits = torch.squeeze(output.logits, dim=-1)
return output
"""
Support pipeline parallelism
"""
class ParallelLlamaModelRmPadPP(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
This model definition supports pipeline parallelism. To support pp and vpp,
- This model only contains layer in this pp stage and vpp chunk
- When calling get_model in Megatron, this rank will instantiate all the vpp chunks in this pp.
Args:
config: LlamaConfig
"""
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, pre_process, post_process):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.pre_process = pre_process
self.post_process = post_process
self.megatron_config = megatron_config
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
if megatron_config is not None:
assert embedding_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
if pre_process:
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(
num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs
)
else:
self.embed_tokens = None
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = megatron_config.pipeline_model_parallel_size
self.num_layer_per_pp = config.num_hidden_layers // pp_size
vpp_size = megatron_config.virtual_pipeline_model_parallel_size
vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank()
if vpp_size is not None:
self.layers = nn.ModuleList()
self.num_layer_vpp_chunk = self.num_layer_per_pp // vpp_size
self.num_layer_this_model = self.num_layer_vpp_chunk
offset = vpp_rank * (config.num_hidden_layers // vpp_size) + (pp_rank * self.num_layer_vpp_chunk)
else:
self.num_layer_this_model = self.num_layer_per_pp
offset = pp_rank * self.num_layer_per_pp
self.layers = nn.ModuleList()
for i in range(self.num_layer_this_model):
layer = ParallelLlamaDecoderLayerRmPad(config, megatron_config, layer_idx=offset + i)
self.layers.add_module(f"{i}", layer)
if post_process:
self.norm = ParallelLlamaRMSNorm(config, megatron_config)
else:
self.norm = None
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(
self,
input_ids: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None,
) -> tuple | BaseModelOutputWithPast:
"""
Args:
input_ids: input ids. shape (1, totol_nnz)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
if self.pre_process:
inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size)
# vocab parallel embedding will not do sequence parallel reduce-scatter in open source megatron
# so need to deal with it by handle here:
# (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size)
inputs_embeds = inputs_embeds.transpose(0, 1)
if self.megatron_config.sequence_parallel:
inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds)
hidden_states = inputs_embeds
else:
# self.hidden_states should be passed by Megatron
hidden_states = self.input_tensor
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(
hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch,
)
hidden_states = layer_outputs
if self.post_process:
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelLlamaForCausalLMRmPadPP(nn.Module):
def __init__(
self,
config: LlamaConfig,
megatron_config: ModelParallelConfig,
pre_process,
post_process,
share_embeddings_and_output_weights=False,
):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.megatron_config = megatron_config
self.model = ParallelLlamaModelRmPadPP(
config, megatron_config=megatron_config, pre_process=pre_process, post_process=post_process
)
assert share_embeddings_and_output_weights is False, (
"Llama Model not supports sharing embedding and output weights"
)
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
self.vocab_size = config.vocab_size
self.pre_process = pre_process
self.post_process = post_process
if post_process:
self._init_head(config)
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
assert len(input_tensor) == 1
self.model.set_input_tensor(input_tensor[0])
def _init_head(self, config):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(
input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
def _forward_head(self, hidden_states):
# all_gather from sequence parallel region is performed inside lm_head
# logits shape before forward_head hidden_states.shape: [4, 32, 4096]
logits = self.lm_head(hidden_states)[0]
# logits shape after forward_head logits.shape: [8, 32, 8]
logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp)
return logits
def forward(
self,
# original input
*,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
# Note that input_ids, attention_mask and position_ids should be passed to every pp layer.
# In the first pp, input_ids will be used, in other pp layers hidden_states will be used inside self.model
batch_size, sequence_length = input_ids.shape
# remove padding here
input_ids_rmpad, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input(
input_ids.unsqueeze(dim=-1), attention_mask
) # (total_nnz, 1)
# pad input_ids to multiple of tp for all tp ranks
# TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap
if self.megatron_config.sequence_parallel:
input_ids_rmpad = sp_utils.pad_to_sequence_parallel(input_ids_rmpad)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz+pad)
outputs = self.model(
input_ids=input_ids_rmpad,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch,
)
if self.post_process:
hidden_states = outputs
# print(f'hidden_states.shape = {hidden_states.shape}') # torch.Size([4, 32, 4096])
logits = self._forward_head(hidden_states)
logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # torch.Size([8, 32, 16])
# remove padding from sequence parallel
if self.megatron_config.sequence_parallel:
totol_nnz = cu_seqlens[-1]
logits = logits[:totol_nnz] # (total_nnz_padded)
# add removed padding back. If input is already rmpad, we let the caller pad_input
logits = pad_input(
logits, indices, batch_size, seqlen=sequence_length
) # (batch_size, sequence_length, vocab_size)
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
else:
return outputs
class ParallelLlamaForValueRmPadPP(ParallelLlamaForCausalLMRmPadPP):
def _init_head(self, config):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False)
# lm_head is effectively the same as sequence parallel
sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight)
def _forward_head(self, hidden_states):
logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1)
logits = logits.float()
if self.megatron_config.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits
def forward(
self,
*,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
output = super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
if self.post_process:
output.logits = torch.squeeze(output.logits, dim=-1)
return output
else:
return output
|
verl__models__mcore__config_converter.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# convert huggingface config to mcore transformer config
import warnings
from typing import TypeVar
import torch
import torch.nn.functional as F
from megatron.core import parallel_state as mpu
from megatron.core.transformer import MLATransformerConfig, TransformerConfig
from transformers import PretrainedConfig
T = TypeVar("T", bound=TransformerConfig)
def _get_base_transformer_config(
hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs
) -> dict:
"""
Create a base TransformerConfig with common parameters across different model architectures.
TODO: (ycl) use dataclass or converter config?
Args:
hf_config: HuggingFace model configuration
dtype: Data type for the model
override_transformer_config_kwargs: Additional parameters to override defaults
Returns:
TransformerConfig with common parameters
"""
# Common parallel state parameters
overlap_p2p_comm = (
mpu.get_virtual_pipeline_model_parallel_world_size() is not None
and mpu.get_virtual_pipeline_model_parallel_world_size() > 1
)
batch_p2p_comm = False
# Base configuration with common parameters
base_config = {
# Model architecture parameters
"num_layers": hf_config.num_hidden_layers,
"hidden_size": hf_config.hidden_size,
"num_attention_heads": hf_config.num_attention_heads,
"num_query_groups": hf_config.num_key_value_heads,
"ffn_hidden_size": hf_config.intermediate_size,
"attention_dropout": hf_config.attention_dropout,
"hidden_dropout": getattr(hf_config, "hidden_dropout", 0.0),
"kv_channels": getattr(hf_config, "head_dim", None),
"layernorm_epsilon": hf_config.rms_norm_eps,
"add_bias_linear": True,
# Activation and normalization
"activation_func": F.silu,
"normalization": "RMSNorm",
"gated_linear_unit": True,
# Data types
"pipeline_dtype": dtype,
"params_dtype": dtype,
"bf16": dtype is torch.bfloat16,
# Parallel configuration
"tensor_model_parallel_size": mpu.get_tensor_model_parallel_world_size(),
"pipeline_model_parallel_size": mpu.get_pipeline_model_parallel_world_size(),
"expert_model_parallel_size": mpu.get_expert_model_parallel_world_size(),
"expert_tensor_parallel_size": mpu.get_expert_tensor_parallel_world_size(),
"virtual_pipeline_model_parallel_size": mpu.get_virtual_pipeline_model_parallel_world_size(),
"context_parallel_size": mpu.get_context_parallel_world_size(),
"overlap_p2p_comm": overlap_p2p_comm,
"batch_p2p_comm": batch_p2p_comm,
"sequence_parallel": mpu.get_tensor_model_parallel_world_size() > 1,
# Common settings
"variable_seq_lengths": True,
"masked_softmax_fusion": True,
"moe_token_dispatcher_type": "alltoall",
}
# Update with any provided overrides
# override_transformer_config_kwargs as kwargs shall never be none
base_config.update(override_transformer_config_kwargs)
return base_config
def _get_mla_transformer_config(
hf_config: PretrainedConfig, mla_rope_config: dict, dtype: torch.dtype, **override_transformer_config_kwargs
) -> dict:
"""
Create a MLATransformerConfig with common parameters across different model architectures.
This is specifically for MLA models like DeepseekV3.
Args:
hf_config: HuggingFace model configuration
mla_rope_config: MLA specific RoPE configuration
dtype: Data type for the model
override_transformer_config_kwargs: Additional parameters to override defaults
Returns:
MLATransformerConfig with common parameters
"""
base_config = _get_base_transformer_config(hf_config=hf_config, dtype=dtype, **override_transformer_config_kwargs)
mla_config = {
# MLA specific parameters
"q_lora_rank": hf_config.q_lora_rank,
"kv_lora_rank": hf_config.kv_lora_rank,
"qk_head_dim": hf_config.qk_nope_head_dim,
"qk_pos_emb_head_dim": hf_config.qk_rope_head_dim,
"v_head_dim": hf_config.v_head_dim,
"rotary_base": hf_config.rope_theta,
"rotary_scaling_factor": mla_rope_config["factor"],
"rope_type": mla_rope_config["type"],
"max_position_embeddings": mla_rope_config["original_max_position_embeddings"],
"beta_fast": mla_rope_config["beta_fast"],
"beta_slow": mla_rope_config["beta_slow"],
"mscale": mla_rope_config["mscale"],
"mscale_all_dim": mla_rope_config["mscale_all_dim"],
}
base_config.update(mla_config)
return base_config
def check_and_construct_configs(original_config: dict, cls: type[T]) -> T:
"""
Check and disable incompatible configurations for older Megatron version.
Args:
original_config (dict): The original model configuration.
Returns:
dict: The updated model configuration with incompatible settings disabled.
"""
removed_keys = []
for key in original_config.keys():
if not hasattr(cls, key):
removed_keys.append(key)
if removed_keys:
warnings.warn(
f"The following keys are not supported in the current Megatron version and will be removed: {removed_keys}",
stacklevel=2,
)
for key in removed_keys:
original_config.pop(key)
original_config = mapping_string_to_attn_backend(original_config)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
print(f"Overridden {cls.__name__} init config: {original_config}")
return cls(**original_config)
def hf_to_mcore_config_dense(
hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs
) -> TransformerConfig:
# for LlamaForCausalLM or Qwen2ForCausalLM
qkv_bias = True if "Qwen2" in hf_config.architectures[0] else getattr(hf_config, "attention_bias", False)
qk_layernorm = True if "Qwen3" in hf_config.architectures[0] else False
args: dict = _get_base_transformer_config(
hf_config=hf_config,
dtype=dtype,
use_cpu_initialization=False,
add_bias_linear=False,
add_qkv_bias=qkv_bias,
qk_layernorm=qk_layernorm,
)
# override_transformer_config_kwargs as kwargs shall never be none
args.update(override_transformer_config_kwargs)
return check_and_construct_configs(args, TransformerConfig)
def hf_to_mcore_config_qwen2moe(
hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs
) -> TransformerConfig:
args: dict = _get_base_transformer_config(
hf_config=hf_config,
dtype=dtype,
use_cpu_initialization=False,
add_bias_linear=False,
layernorm_epsilon=hf_config.rms_norm_eps,
# MoE specific
moe_ffn_hidden_size=hf_config.moe_intermediate_size,
moe_router_bias_update_rate=0.001,
moe_router_topk=hf_config.num_experts_per_tok,
num_moe_experts=hf_config.num_experts,
moe_shared_expert_intermediate_size=hf_config.shared_expert_intermediate_size,
moe_aux_loss_coeff=hf_config.router_aux_loss_coef,
# moe_aux_loss_coeff=0.0,
moe_router_load_balancing_type="none", # turn off aux_loss as it hurts perf in RL
moe_shared_expert_overlap=True,
moe_grouped_gemm=True,
moe_router_score_function="softmax",
# Other optimizations
persist_layer_norm=True,
bias_activation_fusion=True,
bias_dropout_fusion=True,
# Qwen specific
moe_router_pre_softmax=True,
add_qkv_bias=True,
)
# override_transformer_config_kwargs as kwargs shall never be none
args.update(override_transformer_config_kwargs)
return check_and_construct_configs(args, TransformerConfig)
def hf_to_mcore_config_mixtral(
hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs
) -> TransformerConfig:
args: dict = _get_base_transformer_config(
hf_config=hf_config,
dtype=dtype,
use_cpu_initialization=False,
add_bias_linear=False,
layernorm_epsilon=hf_config.rms_norm_eps,
# MoE specific
num_moe_experts=hf_config.num_local_experts,
moe_aux_loss_coeff=hf_config.router_aux_loss_coef,
moe_router_topk=hf_config.num_experts_per_tok,
moe_router_pre_softmax=True,
moe_router_load_balancing_type="none", # turn off aux_loss as it hurts perf in RL
moe_router_score_function="softmax",
moe_shared_expert_intermediate_size=None, # mixtral has no shared expert
moe_shared_expert_overlap=False, # mixtral has no shared expert
moe_ffn_hidden_size=hf_config.intermediate_size,
moe_router_bias_update_rate=0.001,
# moe_permute_fusion=True, # need TE 2.1+
moe_grouped_gemm=True,
# Other optimizations
persist_layer_norm=True,
apply_rope_fusion=True,
bias_activation_fusion=True,
bias_dropout_fusion=True,
)
# override_transformer_config_kwargs as kwargs shall never be none
args.update(override_transformer_config_kwargs)
return check_and_construct_configs(args, TransformerConfig)
def hf_to_mcore_config_qwen3moe(
hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs
) -> TransformerConfig:
args: dict = _get_base_transformer_config(
hf_config=hf_config,
dtype=dtype,
use_cpu_initialization=False,
add_bias_linear=False,
layernorm_epsilon=hf_config.rms_norm_eps,
# MoE specific
moe_ffn_hidden_size=hf_config.moe_intermediate_size,
moe_router_bias_update_rate=0.001,
moe_router_topk=hf_config.num_experts_per_tok,
num_moe_experts=hf_config.num_experts,
moe_aux_loss_coeff=hf_config.router_aux_loss_coef,
# moe_aux_loss_coeff=0.0,
moe_router_load_balancing_type="none", # turn off aux_loss as it hurts perf in RL
moe_grouped_gemm=True,
moe_router_score_function="softmax",
# Other optimizations
persist_layer_norm=True,
bias_activation_fusion=True,
bias_dropout_fusion=True,
# Qwen specific
moe_router_pre_softmax=False,
qk_layernorm=True,
)
# override_transformer_config_kwargs as kwargs shall never be none
args.update(override_transformer_config_kwargs)
return check_and_construct_configs(args, TransformerConfig)
def hf_to_mcore_config_dpskv3(
hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs
) -> MLATransformerConfig:
# DeepseekV3ForCausalLM
from megatron.core.config import set_experimental_flag
from megatron.core.transformer.enums import AttnBackend
set_experimental_flag(True)
from .patch import apply_patch
apply_patch()
mla_rope_config = {
"beta_fast": 32,
"beta_slow": 1,
"factor": 1,
"mscale": 1.0,
"mscale_all_dim": 1.0,
"original_max_position_embeddings": 4096,
"type": "rope",
}
if "rope_scaling" in hf_config and hf_config.rope_scaling is not None:
mla_rope_config.update(hf_config.rope_scaling)
moe_layer_freq = [1] * hf_config.num_hidden_layers
for i in range(min(hf_config.first_k_dense_replace, hf_config.num_hidden_layers)):
moe_layer_freq[i] = 0
# disable MTP and quantization for now
if "num_nextn_predict_layers" in hf_config:
assert hf_config.num_nextn_predict_layers == 0, (
"MTP is not supported for now, please modify the config.json to set num_nextn_predict_layers to 0"
)
assert "quantization_config" not in hf_config or not hf_config.quantization_config, (
"quantization is not supported for now, please modify the config.json to remove quantization_config"
)
args: dict = _get_mla_transformer_config(
hf_config=hf_config,
mla_rope_config=mla_rope_config,
dtype=dtype,
# Additional parameters
use_cpu_initialization=False,
add_bias_linear=False,
attention_backend=AttnBackend.fused,
qk_layernorm=True,
# Standard MoE parameters
moe_ffn_hidden_size=hf_config.moe_intermediate_size,
moe_token_dispatcher_type="alltoall",
moe_router_bias_update_rate=0.001,
moe_router_enable_expert_bias=True,
moe_router_topk=hf_config.num_experts_per_tok,
num_moe_experts=hf_config.n_routed_experts,
moe_shared_expert_intermediate_size=hf_config.moe_intermediate_size * hf_config.n_shared_experts,
moe_aux_loss_coeff=getattr(hf_config, "aux_loss_alpha", 0.001),
moe_router_load_balancing_type="seq_aux_loss",
moe_shared_expert_overlap=True,
# moe_permute_fusion=True, # need TE 2.1+
moe_grouped_gemm=True,
moe_router_score_function="sigmoid",
moe_router_pre_softmax=True,
moe_router_topk_scaling_factor=hf_config.routed_scaling_factor,
moe_layer_freq=moe_layer_freq,
# mcore 0.12 moe
moe_router_dtype="fp64",
disable_bf16_reduced_precision_matmul=True,
# Other optimizations
# deallocate_pipeline_outputs=True,
# gradient_accumulation_fusion=True,
persist_layer_norm=True,
bias_activation_fusion=True,
bias_dropout_fusion=True,
)
# override_transformer_config_kwargs as kwargs shall never be none
args.update(override_transformer_config_kwargs)
transformer_config = check_and_construct_configs(args, MLATransformerConfig)
# MTP
if "num_nextn_predict_layers" in hf_config:
transformer_config.mtp_num_layers = hf_config.num_nextn_predict_layers
transformer_config.mtp_loss_scaling_factor = 0.1
return transformer_config
def hf_to_mcore_config_qwen2_5_vl(
hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs
) -> TransformerConfig:
# Qwen2_5_VLForConditionalGeneration
args = _get_base_transformer_config(
hf_config=hf_config,
dtype=dtype,
add_bias_linear=False,
# qwen specific
add_qkv_bias=True,
mrope_section=hf_config.rope_scaling["mrope_section"],
)
# override_transformer_config_kwargs as kwargs shall never be none
args.update(override_transformer_config_kwargs)
args = mapping_string_to_attn_backend(args)
return TransformerConfig(**args)
def hf_to_mcore_config_llama4(
hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs
) -> TransformerConfig:
# Llama4ForConditionalGeneration
raise NotImplementedError("Llama4ForConditionalGeneration is not supported yet")
def mapping_string_to_attn_backend(args: dict) -> dict:
if "attention_backend" in args and isinstance(args["attention_backend"], str):
from megatron.core.transformer.enums import AttnBackend
args["attention_backend"] = AttnBackend[args["attention_backend"]]
return args
|
verl__models__mcore__loader.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
import torch.distributed as dist
from verl.utils.device import get_device_id, get_torch_device
from .saver import _megatron_calc_global_rank
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
from megatron.core import mpu
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (
virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model
)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def load_state_dict_to_megatron_gptmodel(state_dict, wrapped_models, config, params_dtype, is_value_model=False):
"""Load merged state_dict to sharded Megatron module in training."""
from megatron.core import DistributedDataParallel as LocalDDP
from megatron.core import mpu
from megatron.core.transformer.module import Float16Module
from torch.nn.parallel import DistributedDataParallel as torchDDP
from verl.utils.logger import print_rank_0
from verl.utils.megatron_utils import unwrap_model
start_time = time.time()
def _get_gpt_model(model):
return model
def broadcast_params(module):
for param in module.parameters():
torch.distributed.broadcast(
param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group()
)
dp_rank = mpu.get_data_parallel_rank()
pp_rank = mpu.get_pipeline_model_parallel_rank()
cp_rank = mpu.get_context_parallel_rank()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=0, cp_rank=cp_rank)
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if torch.distributed.get_rank() == src_rank:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, list | tuple):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
gpt_model_module = _get_gpt_model(models[i])
assert len(gpt_model_module.decoder.layers) == num_layers_per_model
def _broadcast_tensor(tensor, name) -> torch.Tensor:
"""broadcast tensor from rank0 across mp_group"""
nonlocal state_dict
nonlocal mp_group
if torch.distributed.get_rank() == src_rank:
if name in state_dict:
weight = state_dict[name]
tensor_shape = weight.shape
else:
tensor_shape = None
else:
weight = None
tensor_shape = None
obj_list = [tensor_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
tensor_shape = obj_list[0]
if tensor_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tensor:[{name}] not in state_dict, skip load")
return
if tensor is None:
tensor = torch.empty(
tensor_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
if torch.distributed.get_rank() == src_rank:
tensor.data.copy_(weight)
dist.broadcast(tensor, src=src_rank, group=mp_group)
def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == src_rank:
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
else:
assert tensor.shape == chunk_shape, (
f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}"
)
sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == src_rank:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=src_rank, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == src_rank:
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
else:
assert tensor.shape == chunk_shape, (
f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}"
)
sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == src_rank:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=src_rank, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == src_rank:
gate_weight = state_dict[gate_name]
up_weight = state_dict[up_name]
new_gate_up_weight = torch.empty(
config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
for i in range(tp_size):
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp]
up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp]
new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_(
torch.cat([gate_weight_tp, up_weight_tp], dim=0)
)
tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
else:
assert tensor.shape == chunk_shape, (
f"rank #{torch.distributed.get_rank() == src_rank:} tensor {gate_name, up_name} shape "
f"{tensor.shape} != {chunk_shape}"
)
sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == src_rank:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=src_rank, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == src_rank:
assert q_name in state_dict and k_name in state_dict and v_name in state_dict
full_weight_q = state_dict[q_name]
full_weight_k = state_dict[k_name]
full_weight_v = state_dict[v_name]
hidden_size_per_head = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
if config.num_key_value_heads >= tp_size:
q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
sizes = [total_size * tp_size]
if not bias:
sizes.append(config.hidden_size)
new_weight_qkv = torch.empty(*sizes, dtype=params_dtype, device=get_device_id())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp]
k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp]
v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp]
num_query_groups_per_partition = models[0].config.num_query_groups // tp_size
new_weight_qkv_this_tp = new_weight_qkv[i * total_size : (i + 1) * total_size]
q_part_per_head = torch.chunk(q_part, num_query_groups_per_partition, dim=0)
k_part_per_head = torch.chunk(k_part, num_query_groups_per_partition, dim=0)
v_part_per_head = torch.chunk(v_part, num_query_groups_per_partition, dim=0)
total_size_per_head = total_size // num_query_groups_per_partition
for j in range(num_query_groups_per_partition):
new_weight_qkv_this_tp[j * total_size_per_head : (j + 1) * total_size_per_head].copy_(
torch.cat([q_part_per_head[j], k_part_per_head[j], v_part_per_head[j]], dim=0)
)
else:
q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
sizes = [total_size * tp_size]
if not bias:
sizes.append(config.hidden_size)
new_weight_qkv = torch.empty(*sizes, dtype=params_dtype, device=get_device_id())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp]
start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head
end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head
k_part = full_weight_k[start_idx:end_idx]
v_part = full_weight_v[start_idx:end_idx]
new_weight_qkv_this_tp = new_weight_qkv[i * total_size : (i + 1) * total_size]
q_part_per_head = torch.chunk(q_part, config.num_attention_heads, dim=0)
k_part_per_head = torch.chunk(k_part, config.num_attention_heads, dim=0)
v_part_per_head = torch.chunk(v_part, config.num_attention_heads, dim=0)
total_size_per_head = total_size // config.num_attention_heads
for j in range(config.num_attention_heads):
new_weight_qkv_this_tp[j * total_size_per_head : (j + 1) * total_size_per_head].copy_(
torch.cat([q_part_per_head[j], k_part_per_head[j], v_part_per_head[j]], dim=0)
)
tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{q_name, k_name, v_name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
else:
assert tensor.shape == chunk_shape, (
f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}"
)
sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == src_rank:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=src_rank, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
if dp_rank == 0:
# Embeddings
# -------------------
print_rank_0("loading embeddings...")
gpt_model_module = _get_gpt_model(models[0])
embed_tokens_weight = None
if pp_rank == 0:
embed_tokens_weight = gpt_model_module.embedding.word_embeddings.weight
_broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight")
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
for layer in range(config.num_hidden_layers):
layer_name = f"model.layers.{layer}"
print_rank_0(f"loading layer #{layer}, with layer_name model.layers.{layer}...")
dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer]
gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank])
sync_layer = gpt_model_module.decoder.layers[dst_layer_idx]
_broadcast_tensor(
sync_layer.self_attention.linear_qkv.layer_norm_weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.input_layernorm.weight",
)
if f"{layer_name}.self_attn.q_norm.weight" in state_dict:
_broadcast_tensor(
sync_layer.self_attention.q_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_norm.weight",
)
_broadcast_tensor(
sync_layer.self_attention.k_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.k_norm.weight",
)
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attention.linear_qkv.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
)
if f"{layer_name}.self_attn.q_proj.bias" in state_dict:
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attention.linear_qkv.bias if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.bias",
f"{layer_name}.self_attn.k_proj.bias",
f"{layer_name}.self_attn.v_proj.bias",
bias=True,
)
_broadcast_tp_shard_tensor(
sync_layer.self_attention.linear_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.o_proj.weight",
chunk_dim=1,
)
_broadcast_tensor(
sync_layer.mlp.linear_fc1.layer_norm_weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.post_attention_layernorm.weight",
)
_broadcast_tp_shard_tensor_gate_up(
sync_layer.mlp.linear_fc1.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.gate_proj.weight",
f"{layer_name}.mlp.up_proj.weight",
)
_broadcast_tp_shard_tensor(
sync_layer.mlp.linear_fc2.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.down_proj.weight",
chunk_dim=1,
)
# Final Layernorm
# -------------------
print_rank_0("loading final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_broadcast_tensor(
getattr(gpt_model_module.decoder.final_layernorm, "weight", None),
"model.norm.weight",
)
print_rank_0("loading lm_head...")
lm_head_weight = None
if pp_rank + 1 == pp_size:
lm_head_weight = gpt_model_module.output_layer.weight
if is_value_model:
# if torch.distributed.get_rank() == src_rank:
if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1:
_broadcast_tensor(lm_head_weight, "lm_head.weight")
elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1:
_broadcast_tensor(lm_head_weight, "reward_head.weight")
print_rank_0("load lm_head from value_head weight")
elif "score.weight" in state_dict and state_dict["score.weight"].shape[0] == 1:
_broadcast_tensor(lm_head_weight, "score.weight")
print_rank_0("load lm_head from score weight")
else:
_broadcast_tensor(None, "lm_head.weight")
print_rank_0("fail to match lm_head in value_model")
# else:
# _broadcast_tensor(lm_head_weight, "lm_head.weight")
else:
_broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight")
dist.barrier()
# Broadcast weights inside data parallel groups
for wrapped_model in wrapped_models:
broadcast_params(wrapped_model)
pass
get_torch_device().empty_cache()
print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s")
|
verl__models__mcore__model_forward.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from verl.utils.megatron_utils import unwrap_model
from verl.workers.config import MtpConfig
from .util import (
postprocess_bshd,
postprocess_bshd_no_padding,
postprocess_packed_seqs,
postprocess_thd_no_padding,
preprocess_bshd,
preprocess_bshd_no_padding,
preprocess_packed_seqs,
preprocess_thd_no_padding,
)
def model_forward_gen(vision_model: bool = False):
def model_forward(
model,
input_ids,
attention_mask,
position_ids,
multi_modal_inputs: dict,
logits_processor=None,
logits_processor_args: dict = None,
value_model=False,
data_format: str = "thd",
mtp_config: MtpConfig = None,
):
"""Forward pass for models with sequence packing."""
assert data_format in ["thd", "bshd"], "data_format must be 'thd' or 'bshd'"
pre_process = (
unwrap_model(model).pre_process if not vision_model else False
) # vision model does not need pre_process, because we pack the input_ids to thd in the forward function
post_process = unwrap_model(model).post_process
sp = unwrap_model(model).config.sequence_parallel
fp8 = unwrap_model(model).config.fp8
use_fp8_padding = fp8 in ["e4m3", "hybrid"]
model_kwargs = {}
if "pixel_values" in multi_modal_inputs:
model_kwargs["pixel_values"] = multi_modal_inputs["pixel_values"].to(input_ids.device)
if "image_grid_thw" in multi_modal_inputs:
model_kwargs["image_grid_thw"] = multi_modal_inputs["image_grid_thw"].to(input_ids.device)
if "pixel_values_videos" in multi_modal_inputs:
model_kwargs["pixel_values_videos"] = multi_modal_inputs["pixel_values_videos"].to(input_ids.device)
if "video_grid_thw" in multi_modal_inputs:
model_kwargs["video_grid_thw"] = multi_modal_inputs["video_grid_thw"].to(input_ids.device)
batch_size, seq_len = attention_mask.shape[:2]
if data_format == "thd":
input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(
input_ids, attention_mask, pre_process=pre_process or post_process, use_fp8_padding=use_fp8_padding
)
input_ids_rmpad = input_ids_rmpad.contiguous()
# when pp > 1 and processor is not None, we need to pass the labels and loss_mask to the model
if mtp_config and mtp_config.enable_train and post_process:
args = {
k: preprocess_packed_seqs(v, attention_mask, pre_process=True, use_fp8_padding=use_fp8_padding)[0]
for k, v in logits_processor_args.items()
}
model_kwargs["labels"] = args["label"].contiguous()
model_kwargs["loss_mask"] = args["label_mask"].contiguous()
input_args = dict(
input_ids=input_ids_rmpad,
attention_mask=None,
position_ids=position_ids if not vision_model else None, # vision models will calculate position_ids
packed_seq_params=packed_seq_params,
**model_kwargs,
)
if vision_model:
# workaround for supporting sequence packing with context parallelism
# cp split with sequence packing will make model lose vision token information, so we need to keep
# the original input_ids and pack them after vision embedding is calculated,
# cooporate with mbridge
input_args["input_ids"] = input_ids
input_args["attention_mask"] = attention_mask
output_orig = model(**input_args)
if post_process and logits_processor is not None:
args = {
k: preprocess_packed_seqs(v, attention_mask, pre_process=True, use_fp8_padding=use_fp8_padding)[0]
for k, v in logits_processor_args.items()
}
output_dict = logits_processor(output_orig, **args)
output = {
k: postprocess_packed_seqs(
v, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process
)
for k, v in output_dict.items()
}
else:
output = postprocess_packed_seqs(
output_orig, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process
)
elif data_format == "bshd":
"""
data_format: "thd" or "bshd", default is "thd",
why we need this?
for some new models, GPT-OSS, the thd format is not supported, so we need to use the bshd format.
When using the bshd format, we have to add paddings to the input_ids to meet the longest sequence length,
so it is recommended to disable dynamic batch size and set batch size to 1
"""
assert not vision_model, "vision model does not support bshd format"
assert fp8 is None, "fp8 is not supported for bshd format yet"
batch_size, sequence_length = attention_mask.shape[:2]
new_input_ids, new_attention_mask, new_position_ids = preprocess_bshd(
input_ids, attention_mask, position_ids, sequence_parallel=sp, pre_process=pre_process
)
output_orig = model(
input_ids=new_input_ids,
position_ids=new_position_ids,
attention_mask=new_attention_mask,
**model_kwargs,
)
if post_process and logits_processor is not None:
args = {
k: preprocess_bshd(v, attention_mask, position_ids, sequence_parallel=sp, pre_process=True)[0]
for k, v in logits_processor_args.items()
}
output_dict = logits_processor(output_orig, **args)
output = {
k: postprocess_bshd(
v, new_attention_mask, attention_mask, sequence_length, post_process=post_process
)
for k, v in output_dict.items()
}
else:
output = postprocess_bshd(
output_orig, new_attention_mask, attention_mask, sequence_length, post_process=post_process
)
if value_model and post_process:
output = output[..., 0]
return output
return model_forward
def gptmodel_forward_no_padding(
model,
input_ids,
multi_modal_inputs: dict,
logits_processor=None,
logits_processor_args: dict = None,
value_model=False,
vision_model=False,
pad_token_id=None,
data_format: str = "thd",
enable_mtp: bool = False,
):
"""Default forward pass for GPT models with optional sequence packing."""
assert data_format in ["thd", "bshd"], "data_format must be 'thd' or 'bshd'"
pre_process = unwrap_model(model).pre_process
post_process = unwrap_model(model).post_process
model_kwargs = {}
if "pixel_values" in multi_modal_inputs:
model_kwargs["pixel_values"] = multi_modal_inputs["pixel_values"].to(input_ids.device)
if "image_grid_thw" in multi_modal_inputs:
model_kwargs["image_grid_thw"] = multi_modal_inputs["image_grid_thw"].to(input_ids.device)
if "pixel_values_videos" in multi_modal_inputs:
model_kwargs["pixel_values_videos"] = multi_modal_inputs["pixel_values_videos"].to(input_ids.device)
if "video_grid_thw" in multi_modal_inputs:
model_kwargs["video_grid_thw"] = multi_modal_inputs["video_grid_thw"].to(input_ids.device)
batch_size = input_ids.shape[0]
if data_format == "thd":
input_ids_rmpad, packed_seq_params = preprocess_thd_no_padding(input_ids, pre_process=pre_process)
input_ids_rmpad = input_ids_rmpad.contiguous()
if enable_mtp and post_process:
args = {
k: preprocess_thd_no_padding(v, pre_process=True, need_roll=(k == "label" or k == "loss_mask"))[0]
for k, v in logits_processor_args.items()
}
model_kwargs["labels"] = args["label"].contiguous()
model_kwargs["loss_mask"] = args["loss_mask"].contiguous()
if logits_processor_args and "loss_mask" in logits_processor_args:
logits_processor_args.pop("loss_mask")
# For VLM model, need to pass bshd format `input_ids` and `attention_mask`.
attention_mask = None
if vision_model:
input_ids_rmpad = input_ids.to_padded_tensor(pad_token_id)
seqlens_in_batch = input_ids.offsets().diff()
attention_mask = torch.zeros_like(input_ids_rmpad, dtype=torch.bool)
for i, seqlen in enumerate(seqlens_in_batch):
attention_mask[i, :seqlen] = True
output_orig = model(
input_ids=input_ids_rmpad,
attention_mask=attention_mask,
position_ids=None,
packed_seq_params=packed_seq_params,
**model_kwargs,
)
if post_process and logits_processor is not None:
args = {
k: preprocess_thd_no_padding(v, pre_process=True, need_roll=(k == "label"))[0]
for k, v in logits_processor_args.items()
}
output_dict = logits_processor(output_orig, **args)
output = {
k: postprocess_thd_no_padding(v, packed_seq_params, input_ids, batch_size, post_process=post_process)
for k, v in output_dict.items()
}
else:
output = postprocess_thd_no_padding(
output_orig, packed_seq_params, input_ids, batch_size, post_process=post_process
)
else:
"""
data_format: "thd" or "bshd", default is "thd",
why we need this?
for some new models, GPT-OSS, the thd format is not supported, so we need to use the bshd format.
When using the bshd format, we have to add paddings to the input_ids to meet the longest sequence length,
so it is recommended to disable dynamic batch size and set batch size to 1
"""
input_ids_bshd, attention_mask_bshd, position_ids_bshd = preprocess_bshd_no_padding(
input_ids, pre_process=pre_process
)
if enable_mtp and post_process:
args = {
k: preprocess_bshd_no_padding(v, pre_process=True, need_roll=(k == "label" or k == "loss_mask"))[0]
for k, v in logits_processor_args.items()
}
model_kwargs["labels"] = args["label"].contiguous()
model_kwargs["loss_mask"] = args["loss_mask"].contiguous()
if logits_processor_args and "loss_mask" in logits_processor_args:
logits_processor_args.pop("loss_mask")
output_orig = model(
input_ids=input_ids_bshd,
attention_mask=attention_mask_bshd,
position_ids=position_ids_bshd,
**model_kwargs,
)
if post_process and logits_processor is not None:
args = {
k: preprocess_bshd_no_padding(v, pre_process=True, need_roll=(k == "label"))[0]
for k, v in logits_processor_args.items()
}
output_dict = logits_processor(output_orig, **args)
output = {
k: postprocess_bshd_no_padding(v, attention_mask_bshd, post_process=post_process)
for k, v in output_dict.items()
}
else:
output = postprocess_bshd_no_padding(output_orig, attention_mask_bshd, post_process=post_process)
if value_model and post_process:
# output = output[..., 0]
# while using nested tensor, the advanced indexing operation above will result in an error at backward, i.e.
# ValueError: NestedTensor _nested_select_backward_default(grad_output: t, self: jt_all, dim: any, index: any)
# so we use `squeeze` to remove the last dimension
output = output.squeeze(-1)
return output
|
verl__models__mcore__model_forward_1f1b_overlap.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import torch
from megatron.core.models.common.model_chunk_schedule_plan import TransformerModelChunkSchedulePlan
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.utils import make_viewless_tensor
from torch import Tensor
from verl.models.mcore.util import preprocess_packed_seqs
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
from verl.utils.megatron_utils import unwrap_model
from verl.utils.model import CausalLMOutputForPPO
from .util import postprocess_packed_seqs, postprocess_packed_seqs_for_dict_output
def gptmodel_forward_1f1b_overlap(
model: GPTModel,
input_ids: Tensor,
position_ids: Tensor,
attention_mask: Tensor,
labels: Tensor = None,
labels_mask: Tensor = None,
multi_modal_inputs: Optional[dict] = None,
logits_processor: Optional[Callable] = None,
logits_processor_args: Optional[dict] = None,
temperature: float = 1.0,
) -> TransformerModelChunkSchedulePlan:
pre_process: bool = unwrap_model(model).pre_process
post_process: bool = unwrap_model(model).post_process
assert logits_processor is None, "only support fused kernel"
batch_size, seq_len = attention_mask.shape[:2]
input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=pre_process)
input_ids_rmpad = input_ids_rmpad.contiguous()
schedule_plan = model.build_schedule_plan(
input_ids=input_ids_rmpad,
attention_mask=attention_mask,
labels=labels,
position_ids=position_ids,
packed_seq_params=packed_seq_params,
)
if post_process:
attention_mask_out = attention_mask
def _postprocess(
self,
hidden_states,
input_ids,
position_ids,
labels,
rotary_pos_emb,
rotary_pos_cos,
rotary_pos_sin,
mtp_in_postprocess=None,
loss_mask=None,
decoder_input=None,
attention_mask=None,
inference_params=None,
packed_seq_params=None,
sequence_len_offset=None,
runtime_gather_output=None,
extra_block_kwargs=None,
inference_context=None,
):
"""patched from https://github.com/NVIDIA/Megatron-LM/blob/core_r0.14.0/megatron/core/models/gpt/gpt_model.py#L412"""
"""Postprocesses decoder hidden states to generate logits or compute loss.
Applies Multi-Token Prediction if enabled, generates output logits through
the output layer, and computes language model loss when labels are provided.
"""
from megatron.core import parallel_state
from megatron.core.tensor_parallel import gather_from_sequence_parallel_region
in_inference_mode = inference_context is not None and not self.training
if in_inference_mode:
assert runtime_gather_output, "Inference must always gather TP logits"
# logits and loss
output_weight = None
if self.share_embeddings_and_output_weights:
output_weight = self.shared_embedding_or_output_weight()
if mtp_in_postprocess:
hidden_states = self.mtp(
input_ids=input_ids,
position_ids=position_ids,
hidden_states=hidden_states,
attention_mask=attention_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb,
rotary_pos_cos=rotary_pos_cos,
rotary_pos_sin=rotary_pos_sin,
packed_seq_params=packed_seq_params,
sequence_len_offset=sequence_len_offset,
embedding=self.embedding,
**(extra_block_kwargs or {}),
)
if not self.post_process:
return hidden_states
if self.mtp_process:
from megatron.core.transformer.multi_token_prediction import (
MTPLossAutoScaler,
MTPLossLoggingHelper,
roll_tensor,
)
mtp_labels = labels.clone()
hidden_states_list = torch.chunk(hidden_states, 1 + self.config.mtp_num_layers, dim=0)
hidden_states = hidden_states_list[0]
if loss_mask is None:
# if loss_mask is not provided, use all ones as loss_mask
loss_mask = torch.ones_like(mtp_labels)
for mtp_layer_number in range(self.config.mtp_num_layers):
# output
mtp_logits, _ = self.output_layer(
hidden_states_list[mtp_layer_number + 1],
weight=output_weight,
runtime_gather_output=runtime_gather_output,
)
# Calc loss for the current Multi-Token Prediction (MTP) layers.
mtp_labels, _ = roll_tensor(mtp_labels, shifts=-1, dims=-1, cp_group=self.cp_group)
loss_mask, num_tokens = roll_tensor(loss_mask, shifts=-1, dims=-1, cp_group=self.cp_group)
mtp_loss = self.compute_language_model_loss(mtp_labels, mtp_logits)
mtp_loss = loss_mask * mtp_loss
if self.training:
# TODO(shifangx): remove the use of parallel_state here
# after moving loss logging to loss_func in pretrain_gpt.py
MTPLossLoggingHelper.save_loss_to_tracker(
torch.sum(mtp_loss) / num_tokens,
mtp_layer_number,
self.config.mtp_num_layers,
avg_group=parallel_state.get_data_parallel_group(with_context_parallel=True),
)
mtp_loss_scale = self.config.mtp_loss_scaling_factor / self.config.mtp_num_layers
if self.config.calculate_per_token_loss:
hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss)
else:
hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss / num_tokens)
if logits_processor is not None:
logits, _ = self.output_layer(
hidden_states, weight=output_weight, runtime_gather_output=runtime_gather_output
)
output_orig = logits.transpose(0, 1).contiguous()
args = {
k: preprocess_packed_seqs(v, attention_mask_out, pre_process=True)[0]
for k, v in logits_processor_args.items()
}
output_dict = logits_processor(output_orig, **args)
output = {
k: postprocess_packed_seqs(
v, packed_seq_params, attention_mask_out, batch_size, seq_len, post_process=post_process
)
for k, v in output_dict.items()
}
else:
# fused kernel
labels_rmpad, _ = preprocess_packed_seqs(labels, attention_mask, pre_process=True)
labels_mask_rmpad, _ = preprocess_packed_seqs(labels_mask, attention_mask, pre_process=True)
labels_rmpad = labels_rmpad.contiguous()
labels_mask_rmpad = labels_mask_rmpad.contiguous()
output = CausalLMOutputForPPO(
loss=None,
logits=None,
past_key_values=None,
hidden_states=hidden_states,
attentions=None,
)
if self.config.sequence_parallel:
hidden_states = gather_from_sequence_parallel_region(hidden_states)
logprobs, entropy = linear_cross_entropy(
hidden_states,
self.output_layer.weight,
labels_rmpad,
temperature,
"none",
parallel_state.get_tensor_model_parallel_group(),
)
output.entropy = entropy
output.log_probs = logprobs
output = postprocess_packed_seqs_for_dict_output(
labels_mask_rmpad,
output,
packed_seq_params,
attention_mask,
batch_size,
seq_len,
post_process=post_process,
)
output_ = [output["log_probs"]]
# TODO NOW 1f1b overlap only support one tensor output
# if "entropy" in output:
# output_.append(output["entropy"])
output_ = tuple(output_)
return output_
def _custom_post_process_node_forward_impl(self, hidden_states):
if self.gpt_model.decoder.final_layernorm and not self.gpt_model.mtp_process:
hidden_states = self.gpt_model.decoder.final_layernorm(hidden_states)
# TENorm produces a "viewed" tensor. This will result in schedule.py's
# deallocate_output_tensor() throwing an error, so a viewless tensor is
# created to prevent this.
hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True)
# Run GPTModel._postprocess
output = self.gpt_model._postprocess(
hidden_states=hidden_states,
input_ids=self.chunk_state.input_ids,
position_ids=self.chunk_state.position_ids,
labels=self.chunk_state.labels,
decoder_input=self.chunk_state.decoder_input,
rotary_pos_emb=self.chunk_state.rotary_pos_emb,
rotary_pos_cos=self.chunk_state.rotary_pos_cos,
rotary_pos_sin=self.chunk_state.rotary_pos_sin,
mtp_in_postprocess=False,
loss_mask=self.chunk_state.loss_mask,
attention_mask=self.chunk_state.attention_mask,
packed_seq_params=self.chunk_state.packed_seq_params,
sequence_len_offset=self.chunk_state.sequence_len_offset,
runtime_gather_output=self.chunk_state.runtime_gather_output,
extra_block_kwargs=self.chunk_state.extra_block_kwargs,
)
return output
schedule_plan.post_process.forward_impl = _custom_post_process_node_forward_impl.__get__(
schedule_plan.post_process, schedule_plan.post_process.__class__
)
unwrap_model(model)._postprocess = _postprocess.__get__(unwrap_model(model), unwrap_model(model).__class__)
return schedule_plan
|
verl__models__mcore__model_forward_fused.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import Optional
import megatron.core as mcore
import torch
from megatron.core import parallel_state
from megatron.core.config_logger import has_config_logger_enabled, log_config_to_disk
from megatron.core.inference.contexts import BaseInferenceContext
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.packed_seq_params import PackedSeqParams
from megatron.core.tensor_parallel.mappings import gather_from_sequence_parallel_region
from megatron.core.utils import deprecate_inference_params
from packaging import version
from torch import Tensor
from verl.models.mcore.util import preprocess_packed_seqs, preprocess_thd_no_padding
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
from verl.utils.megatron_utils import unwrap_model
from verl.utils.model import CausalLMOutputForPPO
from .util import postprocess_packed_seqs_for_dict_output, postprocess_thd_no_padding
def _get_patching_model(model: torch.nn.Module):
model = unwrap_model(model)
if isinstance(model, GPTModel):
return model
if not (hasattr(model, "language_model") and isinstance(model.language_model, GPTModel)):
print(f"Model {model.__class__.__name__} is not a supported for fused forward")
return None
return model.language_model
def patch_fused_forward(model: torch.nn.Module):
assert version.parse(mcore.__version__) >= version.parse("0.13.0"), (
"Fused forward patching requires mecore >= 0.13.0"
)
model = _get_patching_model(model)
if model is not None:
model.forward_backup = model.forward
model.forward = _fused_GPTModel_forward.__get__(model, model.__class__)
def unpatch_fused_forward(model: torch.nn.Module):
model = _get_patching_model(model)
if model is not None:
model.forward = model.forward_backup
def fused_forward_model_gen(vision_model: bool = False):
def fused_forward_model(
model,
input_ids: Tensor,
position_ids: Tensor,
attention_mask: Tensor,
labels: Tensor,
labels_mask: Tensor,
temperature: float,
multi_modal_inputs: dict,
):
pre_process: bool = (
unwrap_model(model).pre_process if not vision_model else False
) # vision model does not need pre_process, because we pack the input_ids to thd in the forward function
post_process: bool = unwrap_model(model).post_process
model_kwargs = {}
if "pixel_values" in multi_modal_inputs:
model_kwargs["pixel_values"] = multi_modal_inputs["pixel_values"].to(input_ids.device)
if "image_grid_thw" in multi_modal_inputs:
model_kwargs["image_grid_thw"] = multi_modal_inputs["image_grid_thw"].to(input_ids.device)
if "pixel_values_videos" in multi_modal_inputs:
model_kwargs["pixel_values_videos"] = multi_modal_inputs["pixel_values_videos"].to(input_ids.device)
if "video_grid_thw" in multi_modal_inputs:
model_kwargs["video_grid_thw"] = multi_modal_inputs["video_grid_thw"].to(input_ids.device)
batch_size, seq_len = attention_mask.shape[:2]
input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=pre_process)
input_ids_rmpad = input_ids_rmpad.contiguous()
labels_rmpad, _ = preprocess_packed_seqs(labels, attention_mask, pre_process=True)
labels_mask_rmpad, _ = preprocess_packed_seqs(labels_mask, attention_mask, pre_process=True)
labels_rmpad = labels_rmpad.contiguous()
labels_mask_rmpad = labels_mask_rmpad.contiguous()
input_args = dict(
input_ids=input_ids_rmpad,
attention_mask=None,
position_ids=position_ids if not vision_model else None, # vision models will calculate position_ids
packed_seq_params=packed_seq_params,
labels=labels_rmpad,
temperature=temperature,
**model_kwargs,
)
if vision_model:
# workaround for supporting sequence packing with context parallelism
# cp split with sequence packing will make model lose vision token information, so we need to keep
# the original input_ids and pack them after vision embedding is calculated,
# cooporate with mbridge
input_args["input_ids"] = input_ids
input_args["attention_mask"] = attention_mask
output_orig: CausalLMOutputForPPO = model(**input_args)
if post_process:
# output_orig is in type of CausalLMOutputForPPO
output = postprocess_packed_seqs_for_dict_output(
labels_mask_rmpad,
output_orig,
packed_seq_params,
attention_mask,
batch_size,
seq_len,
post_process=post_process,
)
else:
output = output_orig
return output
return fused_forward_model
def fused_forward_no_padding_gen(vision_model: bool = False):
def fused_forward_no_padding(
model,
input_ids: Tensor,
labels: Tensor,
multi_modal_inputs: dict,
temperature: float,
calculate_entropy: bool,
pad_token_id: int,
):
pre_process = unwrap_model(model).pre_process
post_process = unwrap_model(model).post_process
input_ids_rmpad, packed_seq_params = preprocess_thd_no_padding(input_ids, pre_process=pre_process)
input_ids_rmpad = input_ids_rmpad.contiguous()
model_kwargs = {}
if "pixel_values" in multi_modal_inputs:
model_kwargs["pixel_values"] = multi_modal_inputs["pixel_values"].to(input_ids.device)
if "image_grid_thw" in multi_modal_inputs:
model_kwargs["image_grid_thw"] = multi_modal_inputs["image_grid_thw"].to(input_ids.device)
if "pixel_values_videos" in multi_modal_inputs:
model_kwargs["pixel_values_videos"] = multi_modal_inputs["pixel_values_videos"].to(input_ids.device)
if "video_grid_thw" in multi_modal_inputs:
model_kwargs["video_grid_thw"] = multi_modal_inputs["video_grid_thw"].to(input_ids.device)
attention_mask = None
if vision_model:
input_ids_rmpad = input_ids.to_padded_tensor(pad_token_id)
seqlens_in_batch = input_ids.offsets().diff().to(input_ids.device)
max_seq_len = input_ids_rmpad.shape[1]
attention_mask = torch.arange(max_seq_len, device=input_ids.device).unsqueeze(
0
) < seqlens_in_batch.unsqueeze(1)
labels_rmpad, _ = preprocess_thd_no_padding(labels, pre_process=True, need_roll=True)
labels_rmpad = labels_rmpad.contiguous()
output_orig: CausalLMOutputForPPO = model(
input_ids=input_ids_rmpad,
attention_mask=attention_mask,
position_ids=None,
packed_seq_params=packed_seq_params,
labels=labels_rmpad,
temperature=temperature,
**model_kwargs,
)
if not post_process:
return output_orig
log_probs = output_orig.log_probs
if log_probs.dim() == 1:
log_probs = log_probs.unsqueeze(0)
log_probs = postprocess_thd_no_padding(
log_probs, packed_seq_params, input_ids, input_ids.shape[0], post_process=post_process
)
output = {"log_probs": log_probs}
if calculate_entropy:
entropy = output_orig.entropy
if entropy.dim() == 1:
entropy = entropy.unsqueeze(0)
entropy = postprocess_thd_no_padding(
entropy, packed_seq_params, input_ids, input_ids.shape[0], post_process=post_process
)
output["entropy"] = entropy
return output
return fused_forward_no_padding
def _fused_GPTModel_forward(
model,
input_ids: Tensor,
position_ids: Tensor,
attention_mask: Tensor,
decoder_input: Tensor = None,
labels: Tensor = None,
inference_context: BaseInferenceContext = None,
packed_seq_params: PackedSeqParams = None,
extra_block_kwargs: dict = None,
runtime_gather_output: Optional[bool] = None,
*,
inference_params: Optional[BaseInferenceContext] = None,
loss_mask: Optional[Tensor] = None,
temperature: float = 1.0,
**kwargs,
) -> CausalLMOutputForPPO:
"""
Patch self._postprocess in forward for GPT models to enable fused kernel support.
https://github.com/NVIDIA/Megatron-LM/blob/core_v0.13.0/megatron/core/models/gpt/gpt_model.py
TODO: Currently we still need to patch `forward` because we need to pass `temperature`
explicitly to `self._postprocess` when calling, maybe there can be a better way to handle this?
"""
inference_context = deprecate_inference_params(inference_context, inference_params)
preproc_output = model._preprocess(
input_ids=input_ids,
position_ids=position_ids,
decoder_input=decoder_input,
inference_context=inference_context,
packed_seq_params=packed_seq_params,
)
(decoder_input, rotary_pos_emb, rotary_pos_cos, rotary_pos_sin, sequence_len_offset) = preproc_output[:5]
# Run decoder.
hidden_states = model.decoder(
hidden_states=decoder_input,
attention_mask=attention_mask,
inference_context=inference_context,
rotary_pos_emb=rotary_pos_emb,
rotary_pos_cos=rotary_pos_cos,
rotary_pos_sin=rotary_pos_sin,
packed_seq_params=packed_seq_params,
sequence_len_offset=sequence_len_offset,
**(extra_block_kwargs or {}),
**kwargs,
)
if not model.post_process:
return hidden_states
output = CausalLMOutputForPPO(
loss=None,
logits=None,
past_key_values=None,
hidden_states=hidden_states,
attentions=None,
)
if model.config.sequence_parallel:
hidden_states = gather_from_sequence_parallel_region(hidden_states)
# Get the output weight - use embedding weight if output_layer is None or weight is shared
if hasattr(model, "output_layer") and model.output_layer is not None and model.output_layer.weight is not None:
output_weight = model.output_layer.weight
else:
# When embeddings are tied, use the embedding weight
output_weight = model.embedding.word_embeddings.weight
logprobs, entropy = linear_cross_entropy(
hidden_states,
output_weight,
labels,
temperature,
"none",
parallel_state.get_tensor_model_parallel_group(),
)
if has_config_logger_enabled(model.config):
payload = OrderedDict(
{
"input_ids": input_ids,
"position_ids": position_ids,
"attention_mask": attention_mask,
"decoder_input": decoder_input,
"logprobs": logprobs,
"entropy": entropy,
}
)
log_config_to_disk(model.config, payload, prefix="input_and_logits")
output.entropy = entropy
output.log_probs = logprobs
return output
|
verl__models__mcore__model_initializer.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# use mcore transformer config to initialize the model
import inspect
from abc import ABC, abstractmethod
from megatron.core.models.gpt.gpt_layer_specs import get_gpt_decoder_block_spec, get_gpt_mtp_block_spec
from megatron.core.models.gpt.gpt_model import GPTModel
from .config_converter import PretrainedConfig, TransformerConfig
class BaseModelInitializer(ABC):
"""Base class for model initializers."""
def __init__(self, tfconfig: TransformerConfig, hf_config: PretrainedConfig):
self.tfconfig = tfconfig
self.hf_config = hf_config
self.has_vp_stage = inspect.signature(get_gpt_decoder_block_spec).parameters.get("vp_stage", None) is not None
@abstractmethod
def get_transformer_layer_spec(self, vp_stage=None):
"""Get the transformer layer specification.
https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/models/gpt/gpt_layer_specs.py"""
pass
def get_rope_scaling_args(self) -> dict:
"""Get rope scaling args."""
rope_scaling_args = {}
if "rope_scaling" in self.hf_config:
if self.hf_config.rope_scaling is not None:
# assert self.hf_config.rope_scaling["type"] == "linear", "only linear scaling is supported for now"
rope_scaling_args["seq_len_interpolation_factor"] = self.hf_config.rope_scaling["factor"]
return rope_scaling_args
def initialize(
self,
pre_process: bool = True,
post_process: bool = True,
share_embeddings_and_output_weights: bool = False,
value: bool = False,
**extra_kwargs,
) -> GPTModel:
"""Initialize a GPT model with the given configuration.
https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/models/gpt/gpt_model.py
Args:
pre_process (bool): include embedding layer.
post_process (bool): including an output layer.
share_embeddings_and_output_weights (bool): input embeddings and output logit weights are shared.
value (bool): add an extra linear layer for classification or regression.
Returns:
GPTModel: An initialized GPT model instance
"""
vp_stage = extra_kwargs.get("vp_stage", None)
transformer_layer_spec = self.get_transformer_layer_spec(vp_stage=vp_stage)
rope_scaling_args = self.get_rope_scaling_args()
mtp_block_spec = extra_kwargs.get("mtp_block_spec", None)
model = GPTModel(
config=self.tfconfig,
transformer_layer_spec=transformer_layer_spec,
vocab_size=self.hf_config.vocab_size,
max_sequence_length=self.hf_config.max_position_embeddings,
pre_process=pre_process,
post_process=post_process,
share_embeddings_and_output_weights=share_embeddings_and_output_weights,
position_embedding_type="rope",
rotary_base=self.hf_config.rope_theta,
**rope_scaling_args,
mtp_block_spec=mtp_block_spec,
**({} if not self.has_vp_stage else {"vp_stage": vp_stage}),
)
if post_process and value:
from verl.models.llama.megatron.layers.parallel_linear import LinearForLastLayer
model.output_layer = LinearForLastLayer(
input_size=self.tfconfig.hidden_size, output_size=1, config=self.tfconfig
)
return model
class DenseModel(BaseModelInitializer):
"""Initializer for dense models like Llama and Qwen2."""
def get_transformer_layer_spec(self, vp_stage=None):
assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now"
extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage}
return get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs)
class Qwen2MoEModel(BaseModelInitializer):
"""Initializer for Qwen2 MoE models."""
def get_transformer_layer_spec(self, vp_stage=None):
assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now"
extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage}
transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs)
# Patch layer spec for shared experts
for i in range(len(transformer_layer_spec.layer_specs)):
transformer_layer_spec.layer_specs[i].submodules.mlp.submodules.shared_experts.params["gate"] = True
return transformer_layer_spec
def initialize(self, **kwargs):
# Qwen default freeze_moe_router: true
model = super().initialize(**kwargs)
freeze_moe_router = kwargs.get("freeze_moe_router", True)
if freeze_moe_router:
for layer in model.decoder.layers:
layer.mlp.router.weight.requires_grad = False
return model
class MixtralModel(BaseModelInitializer):
"""Initializer for Mixtral models."""
def get_transformer_layer_spec(self, vp_stage=None):
assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now"
extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage}
transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs)
return transformer_layer_spec
def initialize(self, **kwargs):
model = super().initialize(**kwargs)
freeze_moe_router = kwargs.get("freeze_moe_router", False)
if freeze_moe_router:
for layer in model.decoder.layers:
layer.mlp.router.weight.requires_grad = False
return model
class Qwen3MoEModel(BaseModelInitializer):
"""Initializer for Qwen3 MoE models."""
def get_transformer_layer_spec(self, vp_stage=None):
assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now"
extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage}
transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs)
return transformer_layer_spec
def initialize(self, **kwargs):
# Qwen default freeze_moe_router: true
model = super().initialize(**kwargs)
freeze_moe_router = kwargs.get("freeze_moe_router", True)
if freeze_moe_router:
for layer in model.decoder.layers:
layer.mlp.router.weight.requires_grad = False
return model
class DeepseekV3Model(BaseModelInitializer):
"""Initializer for DeepseekV3 models."""
def get_transformer_layer_spec(self, vp_stage=None):
extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage}
transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs)
return transformer_layer_spec
def get_rope_scaling_args(self) -> dict:
"""Get rope scaling args."""
rope_scaling_args = {}
return rope_scaling_args
def initialize(
self,
**kwargs,
):
vp_stage = kwargs.get("vp_stage", None)
freeze_moe_router = kwargs.get("freeze_moe_router", True)
if freeze_moe_router:
self.tfconfig.moe_router_load_balancing_type = "none"
# MTP
if self.tfconfig.mtp_num_layers is not None and self.tfconfig.mtp_num_layers > 0:
transformer_layer_spec = self.get_transformer_layer_spec(vp_stage=vp_stage)
mtp_block_spec = get_gpt_mtp_block_spec(
self.tfconfig, transformer_layer_spec, use_transformer_engine=True, vp_stage=vp_stage
)
kwargs["mtp_block_spec"] = mtp_block_spec
model = super().initialize(**kwargs)
if freeze_moe_router:
for layer in model.decoder.layers:
if hasattr(layer.mlp, "router"):
layer.mlp.router.weight.requires_grad = False
return model
class Qwen25VLModel(BaseModelInitializer):
"""Initializer for Qwen2.5 VL models."""
def get_transformer_layer_spec(self, vp_stage=None):
extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage}
transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs)
return transformer_layer_spec
def initialize(
self,
pre_process=None,
post_process=None,
share_embeddings_and_output_weights=False,
value=False,
**extra_kwargs,
):
tfconfig = self.tfconfig
hf_config = self.hf_config
# Qwen2_5_VLForConditionalGeneration
from copy import deepcopy
transformer_layer_spec = self.get_transformer_layer_spec()
from megatron.core.extensions.transformer_engine import TEColumnParallelLinear, TERowParallelLinear
from megatron.core.models.gpt.moe_module_specs import MLPSubmodules
from megatron.core.models.vision.vit_layer_specs import get_vit_layer_with_transformer_engine_spec
from .qwen2_5_vl import Qwen2_5VLModel, get_vision_model_config, get_vision_projection_config
vision_transformer_config = get_vision_model_config(deepcopy(tfconfig))
vision_transformer_config.pipeline_model_parallel_size = 1
vision_transformer_config.first_pipeline_num_layers = None
vision_projection_config = get_vision_projection_config(
deepcopy(tfconfig),
vision_transformer_config.hidden_size,
spatial_merge_size=hf_config.vision_config.spatial_merge_size,
)
vision_projection_layer_spec = MLPSubmodules(
linear_fc1=TEColumnParallelLinear,
linear_fc2=TERowParallelLinear,
)
vision_transformer_layer_spec = get_vit_layer_with_transformer_engine_spec()
qwen25_vl_model = Qwen2_5VLModel(
language_transformer_config=tfconfig,
language_transformer_layer_spec=transformer_layer_spec,
language_vocab_size=hf_config.vocab_size,
language_max_sequence_length=hf_config.max_position_embeddings,
vision_transformer_config=vision_transformer_config,
vision_transformer_layer_spec=vision_transformer_layer_spec,
vision_projection_config=vision_projection_config,
vision_projection_layer_spec=vision_projection_layer_spec,
vision_projection_type="mlp",
language_rotary_base=hf_config.rope_theta,
pre_process=pre_process,
post_process=post_process,
add_decoder=True,
add_encoder=True,
parallel_output=True,
language_share_embeddings_and_output_weights=share_embeddings_and_output_weights,
)
if post_process and value:
from verl.models.llama.megatron.layers.parallel_linear import LinearForLastLayer
qwen25_vl_model.language_model.output_layer = LinearForLastLayer(
input_size=tfconfig.hidden_size, output_size=1, config=tfconfig
)
return qwen25_vl_model
|
verl__models__mcore__mtp_patch.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
import torch
from megatron.core import parallel_state
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.transformer.multi_token_prediction import (
MTPLossAutoScaler,
MTPLossLoggingHelper,
roll_tensor,
)
try:
from megatron.core.utils import unwrap_model
except ImportError:
from verl.utils.megatron_utils import unwrap_model
def _get_patching_model(model: torch.nn.Module):
model = unwrap_model(model)
if isinstance(model, GPTModel):
return model
if not (hasattr(model, "language_model") and isinstance(model.language_model, GPTModel)):
print(f"Model {model.__class__.__name__} is not a supported for fused forward")
return None
return model.language_model
def patch_postprocess(model: torch.nn.Module):
model = _get_patching_model(model)
if model is not None:
model._postprocess_backup = model._postprocess
model._postprocess = _megatron_gptmodel_postprocess.__get__(model, model.__class__)
def unpatch_postprocess(model: torch.nn.Module):
model = _get_patching_model(model)
if model is not None:
model._postprocess = model._postprocess_backup
# copy from https://github.com/NVIDIA/Megatron-LM/blob/23e092f41ec8bc659020e401ddac9576c1cfed7e/megatron/core/models/gpt/gpt_model.py
# patch the postprocess method of GPTModel to support advanced features like MTP, 1f1b overlap, etc.
def _megatron_gptmodel_postprocess(
self,
hidden_states,
input_ids,
position_ids,
labels,
rotary_pos_emb,
rotary_pos_cos,
rotary_pos_sin,
mtp_in_postprocess=None,
loss_mask=None,
decoder_input=None,
attention_mask=None,
inference_params=None,
packed_seq_params=None,
sequence_len_offset=None,
runtime_gather_output=None,
extra_block_kwargs=None,
inference_context=None,
):
"""Postprocesses decoder hidden states to generate logits or compute loss.
Applies Multi-Token Prediction if enabled, generates output logits through
the output layer, and computes language model loss when labels are provided.
"""
# logits and loss
output_weight = None
if self.share_embeddings_and_output_weights:
output_weight = self.shared_embedding_or_output_weight()
if mtp_in_postprocess and labels is not None:
hidden_states = self.mtp(
input_ids=input_ids,
position_ids=position_ids,
hidden_states=hidden_states,
attention_mask=attention_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb,
rotary_pos_cos=rotary_pos_cos,
rotary_pos_sin=rotary_pos_sin,
packed_seq_params=packed_seq_params,
sequence_len_offset=sequence_len_offset,
embedding=self.embedding,
**(extra_block_kwargs or {}),
)
if not self.post_process:
return hidden_states
# Skip when mtp_num_layers is None or 0
if self.config.mtp_num_layers and labels is not None:
mtp_labels = labels.clone()
hidden_states_list = torch.chunk(hidden_states, 1 + self.config.mtp_num_layers, dim=0)
hidden_states = hidden_states_list[0]
if loss_mask is None:
# if loss_mask is not provided, use all ones as loss_mask
loss_mask = torch.ones_like(mtp_labels)
for mtp_layer_number in range(self.config.mtp_num_layers):
# Calc loss for the current Multi-Token Prediction (MTP) layers.
mtp_labels, _ = roll_tensor(
mtp_labels,
shifts=-1,
dims=-1,
cp_group=self.cp_group,
packed_seq_params=packed_seq_params,
)
loss_mask, num_tokens = roll_tensor(
loss_mask,
shifts=-1,
dims=-1,
cp_group=self.cp_group,
packed_seq_params=packed_seq_params,
)
# Compute mtp loss without storing logits to save memory.
mtp_loss = self.compute_output_layer_and_language_model_loss(
hidden_states_list[mtp_layer_number + 1],
labels=mtp_labels,
weight=self.shared_embedding_or_output_weight(),
sequence_parallel_enabled=self.output_layer.sequence_parallel,
column_parallel_linear=self.output_layer,
col_linear_kwargs={
"weight": output_weight,
"runtime_gather_output": runtime_gather_output,
},
)
mtp_loss = loss_mask * mtp_loss
if self.training:
# TODO(shifangx): remove the use of parallel_state here
# after moving loss logging to loss_func in pretrain_gpt.py
MTPLossLoggingHelper.save_loss_to_tracker(
torch.sum(mtp_loss) / num_tokens,
mtp_layer_number,
self.config.mtp_num_layers,
avg_group=parallel_state.get_data_parallel_group(with_context_parallel=True),
)
mtp_loss_scale = self.config.mtp_loss_scaling_factor / self.config.mtp_num_layers
if self.config.calculate_per_token_loss:
hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss)
else:
hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss / num_tokens)
logits, _ = self.output_layer(hidden_states, weight=output_weight, runtime_gather_output=runtime_gather_output)
# [s b h] => [b s h]
return logits.transpose(0, 1).contiguous()
def patch_mtp_layer_get_embeddings(model: torch.nn.Module):
"""Patch the _get_embeddings method of MultiTokenPredictionLayer"""
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.transformer.multi_token_prediction import MultiTokenPredictionLayer
# Unwrap each model in the actor_module to get the actual GPTModel
model = _get_patching_model(model)
# Collect all MultiTokenPredictionLayer instances
target_layers = []
if isinstance(model, GPTModel):
# Check if GPTModel has MTP and find the layers
if hasattr(model, "mtp") and hasattr(model.mtp, "layers"):
for layer in model.mtp.layers:
if isinstance(layer, MultiTokenPredictionLayer):
target_layers.append(layer)
elif hasattr(model, "layers"):
# Check if any layer in the model is MultiTokenPredictionLayer
for layer in model.layers:
if isinstance(layer, MultiTokenPredictionLayer):
target_layers.append(layer)
if target_layers:
for layer in target_layers:
layer._get_embeddings_backup = layer._get_embeddings
layer._get_embeddings = _patched_get_embeddings_for_detach.__get__(layer, layer.__class__)
print(f"Found and patched {len(target_layers)} MTP layer(s) in any of the actor modules")
return True
else:
print("No MTP layers found to patch in any of the actor modules")
return False
def unpatch_mtp_layer_get_embeddings(model: torch.nn.Module):
"""Unpatch the _get_embeddings method of MultiTokenPredictionLayer"""
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.transformer.multi_token_prediction import MultiTokenPredictionLayer
# Unwrap each model in the actor_module to get the actual GPTModel
model = _get_patching_model(model)
# Collect all MultiTokenPredictionLayer instances
target_layers = []
if isinstance(model, GPTModel):
# Check if GPTModel has MTP and find the layers
if hasattr(model, "mtp") and hasattr(model.mtp, "layers"):
for layer in model.mtp.layers:
if isinstance(layer, MultiTokenPredictionLayer):
target_layers.append(layer)
elif hasattr(model, "layers"):
# Check if any layer in the model is MultiTokenPredictionLayer
for layer in model.layers:
if isinstance(layer, MultiTokenPredictionLayer):
target_layers.append(layer)
unpatched_count = 0
for layer in target_layers:
if hasattr(layer, "_get_embeddings_backup"):
layer._get_embeddings = layer._get_embeddings_backup
delattr(layer, "_get_embeddings_backup")
unpatched_count += 1
if unpatched_count > 0:
print(f"Unpatched {unpatched_count} MTP layer(s)")
return True
return False
def _patched_get_embeddings_for_detach(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
embedding: Callable,
hidden_states: torch.Tensor,
packed_seq_params=None,
):
"""
Patched version of _get_embeddings method for MultiTokenPredictionLayer.
This is a modified version that you can customize according to your needs.
The original implementation is preserved below with modifications.
"""
# You can modify the logic here as needed
# For example, you could:
# - Change the shift amount in roll_tensor
# - Apply custom transformations to input_ids or position_ids
# - Add debugging information
# - Modify the embedding computation
# Original logic with custom modifications
from megatron.core.transformer.multi_token_prediction import roll_tensor
from megatron.core.utils import make_viewless_tensor
# Calc logits for the current Multi-Token Prediction (MTP) layers.
input_ids, _ = roll_tensor(
input_ids,
shifts=-1, # You can modify this shift value
dims=-1,
cp_group=self.cp_group,
packed_seq_params=packed_seq_params,
)
position_ids, _ = roll_tensor(
position_ids,
shifts=-1, # You can modify this shift value
dims=-1,
cp_group=self.cp_group,
packed_seq_params=packed_seq_params,
)
# embedding computation - you can modify this part
decoder_input = embedding(input_ids=input_ids, position_ids=position_ids)
# Apply custom transformations if needed
# For example: decoder_input = some_custom_function(decoder_input)
hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True)
# detach decoder_input and hidden_states
decoder_input = decoder_input.detach()
hidden_states = hidden_states.detach()
return input_ids, position_ids, decoder_input, hidden_states
|
verl__models__mcore__patch.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# there is some bug in mcore 0.12, so we need to patch it
# 1. `get_query_key_value_tensors` in `multi_latent_attention.py` works wrong when packed_seq_params is not None
def apply_patch():
import megatron.core
import torch
import torch.nn.functional as F
from megatron.core import parallel_state, tensor_parallel
from megatron.core.transformer.multi_latent_attention import (
MLASelfAttention,
MultiLatentAttention,
apply_rotary_pos_emb,
deprecate_inference_params,
gather_from_sequence_parallel_region,
gather_from_tensor_model_parallel_region,
scatter_to_sequence_parallel_region,
)
from packaging import version
mcore_ge_013 = version.parse(megatron.core.__version__) >= version.parse("0.13.0")
def patch_get_query_key_value_tensors(
self,
hidden_states,
key_value_states=None,
position_ids=None,
packed_seq_params=None,
inference_context=None,
*,
inference_params=None,
):
"""
Derives `query`, `key` and `value` tensors from `hidden_states`.
"""
# s = sequence length, b = batch size, h = hidden size, n = num attention heads
# Attention heads [s, b, n*h]
assert hidden_states.ndim == 3, f"hidden_states should be 3D, [s, b, n*h], got {hidden_states.ndim}D"
inference_context = deprecate_inference_params(inference_context, inference_params)
# =========================================
# Prepare RoPE and seqlen related params
# =========================================
rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len(
inference_context, None, hidden_states, self.config, packed_seq_params
)
# rotary_pos_emb:[s, b, 1, 64]
mscale = 1.0
if self.config.rope_type == "rope":
packed_seq = packed_seq_params is not None and packed_seq_params.qkv_format == "thd"
try:
# In case of TypeError: RotaryEmbedding.forward() got an unexpected keyword argument 'packed_seq'
rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len, packed_seq=packed_seq)
except TypeError:
rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len)
else:
rotary_pos_emb, mscale = self.rotary_pos_emb(rotary_seq_len)
# =========================================
# QKV down projection and layernorm
# =========================================
if self.config.q_lora_rank is not None:
# if linear_q_down_proj is ColumnParallelLinear:
# q_compressed: [s, b, q_lora_rank / TP]
# elif linear_q_down_proj is Linear:
# q_compressed: [s / TP, b, q_lora_rank]
q_compressed, _ = self.linear_q_down_proj(hidden_states)
# When output is sharded (ColumnParallelLinear), two things are needed to be
# identical to a normal Linear.
# 1. Manually gather output to restore output dim q_lora_rank;
# 2. Scatter sequence back to s / TP if sequence-parallel since it was
# gathered by ColumnParallelLinear.
if q_compressed.size(-1) != self.config.q_lora_rank:
q_compressed = gather_from_tensor_model_parallel_region(q_compressed)
if self.config.sequence_parallel:
q_compressed = scatter_to_sequence_parallel_region(q_compressed)
q_compressed = self.q_layernorm(q_compressed)
else:
q_compressed = hidden_states
# if linear_kv_down_proj is ColumnParallelLinear:
# kv_combined: [s, b, (kv_lora_rank + qk_pos_emb_head_dim) / TP]
# elif linear_kv_down_proj is Linear:
# kv_combined: [s / TP, b, (kv_lora_rank + qk_pos_emb_head_dim)]
kv_combined, _ = self.linear_kv_down_proj(hidden_states)
if kv_combined.size(-1) != self.config.kv_lora_rank + self.config.qk_pos_emb_head_dim:
# kv_combined: [s, b, (kv_lora_rank + qk_pos_emb_head_dim)]
kv_combined = gather_from_tensor_model_parallel_region(kv_combined)
# kv_compressed:[s, b, kv_lora_rank], k_pos_emb: [s, b, qk_pos_emb_head_dim]
kv_compressed, k_pos_emb = torch.split(
kv_combined, [self.config.kv_lora_rank, self.config.qk_pos_emb_head_dim], dim=-1
)
if self.config.sequence_parallel:
# kv_compressed:[s / TP, b, kv_lora_rank]
kv_compressed = scatter_to_sequence_parallel_region(kv_compressed)
else:
# kv_compressed:[s / TP, b, kv_lora_rank], k_pos_emb: [s / TP, b, qk_pos_emb_head_dim]
kv_compressed, k_pos_emb = torch.split(
kv_combined, [self.config.kv_lora_rank, self.config.qk_pos_emb_head_dim], dim=-1
)
if parallel_state.get_tensor_model_parallel_world_size() > 1:
# k_pos_emb: [s, b, qk_pos_emb_head_dim]
k_pos_emb = gather_from_sequence_parallel_region(k_pos_emb)
kv_compressed = self.kv_layernorm(kv_compressed)
# =========================================
# QKV up projection and RoPE apply
# =========================================
def qkv_up_proj_and_rope_apply(q_compressed, kv_compressed, k_pos_emb, rotary_pos_emb):
if self.config.q_lora_rank is not None:
q, _ = self.linear_q_up_proj(q_compressed)
else:
# hidden_states:[s, b, 2048], q: [s, b, n * 192]
q, _ = self.linear_q_proj(q_compressed)
q_len, bsz, _ = q.size()
# q: [s, b, n, 192]
q = q.view(q_len, bsz, self.num_attention_heads_per_partition, self.q_head_dim)
# kv: [s, b, 2048]
kv, _ = self.linear_kv_up_proj(kv_compressed)
# kv: [s, b, n, 256]
kv = kv.view(
q_len,
bsz,
self.num_attention_heads_per_partition,
self.config.qk_head_dim + self.config.v_head_dim,
)
cp_size = parallel_state.get_context_parallel_world_size()
if inference_context is not None:
# add offset to the sequence start for inference
sequence_start = inference_context.sequence_len_offset
sequence_end = sequence_start + q_len
rotary_pos_emb = rotary_pos_emb[sequence_start:sequence_end]
elif packed_seq_params is None or cp_size == 1:
# Shorten rotary_pos_emb to the sequence length when inference_params
# is not provided. This makes sure we can run forward directly with
# any sequence length. During training, the sequence length is always
# the full rotary_pos_emb length, except for sequence packing + CP.
# When sequence packing and context parallel are both enabled, the
# position embedding will not split rotary_pos_emb, so it may exceed
# the sequence length on this CP rank, but we need the full rotary_pos_emb
# to cover the full sequence, so we do not shorten it here.
rotary_pos_emb = rotary_pos_emb[0:q_len]
# [s, b, 64] -> [s, b, 1, 64]
k_pos_emb = torch.unsqueeze(k_pos_emb, 2)
# q: [s, b, n, 128], q_pos_emb: [s, b, n, 64]
q_no_pe, q_pos_emb = torch.split(q, [self.config.qk_head_dim, self.config.qk_pos_emb_head_dim], dim=-1)
# k_no_pe: [s, b, n, 128], value: [s, b, n, 128]
k_no_pe, value = torch.split(kv, [self.config.qk_head_dim, self.config.v_head_dim], dim=-1)
if packed_seq_params is not None:
cu_seqlens_q = packed_seq_params.cu_seqlens_q
cu_seqlens_kv = packed_seq_params.cu_seqlens_kv
q_pos_emb = q_pos_emb.squeeze(1)
k_pos_emb = k_pos_emb.squeeze(1)
q_no_pe = q_no_pe.squeeze(1)
k_no_pe = k_no_pe.squeeze(1)
value = value.squeeze(1)
else:
cu_seqlens_q = cu_seqlens_kv = None
# q_pos_emb: [s, b, n, 64], k_pos_emb:[s, b, 1, 64]
q_pos_emb = apply_rotary_pos_emb(
q_pos_emb,
rotary_pos_emb,
config=self.config,
cu_seqlens=cu_seqlens_q,
mscale=mscale,
)
k_pos_emb = apply_rotary_pos_emb(
k_pos_emb,
rotary_pos_emb,
config=self.config,
cu_seqlens=cu_seqlens_kv,
mscale=mscale,
)
# query: [s, b, n, 192]
query = torch.cat([q_no_pe, q_pos_emb], dim=-1)
if packed_seq_params is not None:
k_pos_emb = k_pos_emb.expand(-1, self.num_attention_heads_per_partition, -1)
key = torch.cat([k_no_pe, k_pos_emb], dim=-1)
else:
# key: [s, b, n, 192]
k_pos_emb = k_pos_emb.expand(-1, -1, self.num_attention_heads_per_partition, -1)
key = torch.cat([k_no_pe, k_pos_emb], dim=-1)
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
return query, key, value
if self.recompute_up_proj:
self.qkv_up_checkpoint = tensor_parallel.CheckpointWithoutOutput()
query, key, value = self.qkv_up_checkpoint.checkpoint(
qkv_up_proj_and_rope_apply, q_compressed, kv_compressed, k_pos_emb, rotary_pos_emb
)
else:
query, key, value = qkv_up_proj_and_rope_apply(q_compressed, kv_compressed, k_pos_emb, rotary_pos_emb)
return query, key, value
def patch_forward(
self,
hidden_states,
attention_mask,
key_value_states=None,
inference_context=None,
rotary_pos_emb=None,
rotary_pos_cos=None,
rotary_pos_sin=None,
attention_bias=None,
packed_seq_params=None,
position_ids=None,
sequence_len_offset=None,
*,
inference_params=None,
**kwargs,
):
"""Forward pass for multi-latent attention"""
assert attention_bias is None, "Attention bias should not be passed into MLA."
assert rotary_pos_cos is None and rotary_pos_sin is None, "MLA does not support Flash Decoding"
# hidden_states: [sq, b, h]
inference_context = deprecate_inference_params(inference_context, inference_params)
# =====================
# Query, Key, and Value
# =====================
# Get the query, key and value tensors based on the type of attention -
# self or cross attn.
# query: [96, 1, 16, 128], key:[96, 1, 16, 128], value:[96, 1, 16, 128]
query, key, value = self.get_query_key_value_tensors(
hidden_states,
key_value_states,
position_ids,
packed_seq_params,
inference_context=inference_context,
)
# ===================================================
# Adjust key, value for inference
# ===================================================
# rotary_pos_emb = None
if mcore_ge_013:
query, key, value, _, attn_mask_type, _ = self._adjust_key_value_for_inference(
inference_context, query, key, value, rotary_pos_emb=None
)
else:
query, key, value, _, attn_mask_type = self._adjust_key_value_for_inference(
inference_context, query, key, value, rotary_pos_emb=None
)
# TODO: Currently, TE can only accept contiguous tensors for MLA
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
# ==================================
# core attention computation
# ==================================
# Need corresponding TE change
thd_qkv_format = packed_seq_params and packed_seq_params.qkv_format == "thd"
v_dim = value.shape[-1]
if thd_qkv_format and query.shape[-1] != v_dim:
value = F.pad(value, [0, query.shape[-1] - v_dim])
self.core_attention.hidden_size_per_attention_head_v = value.shape[-1]
if self.checkpoint_core_attention and self.training:
core_attn_out = self._checkpointed_attention_forward(
query, key, value, attention_mask, packed_seq_params=packed_seq_params
)
else:
core_attn_out = self.core_attention(
query,
key,
value,
attention_mask,
packed_seq_params=packed_seq_params,
attn_mask_type=attn_mask_type,
)
if thd_qkv_format:
if core_attn_out.ndim == 2:
core_attn_out = core_attn_out.reshape(*core_attn_out.shape[:-1], -1, value.shape[-1])
if query.shape[-1] != v_dim:
core_attn_out = core_attn_out[..., :v_dim]
# reshape to same output shape as unpacked case
# (t, np, hn) -> (t, b=1, h=np*hn)
# t is the pack size = sum (sq_i)
# note that batch is a dummy dimension in the packed case
core_attn_out = core_attn_out.reshape(core_attn_out.size(0), 1, -1)
if self.recompute_up_proj:
assert self.qkv_up_checkpoint is not None
self.qkv_up_checkpoint.discard_output_and_register_recompute(core_attn_out)
self.qkv_up_checkpoint = None
# =================
# Output. [sq, b, h]
# =================
output, bias = self.linear_proj(core_attn_out)
return output, bias
MLASelfAttention.get_query_key_value_tensors = patch_get_query_key_value_tensors
MultiLatentAttention.forward = patch_forward
def apply_patch_mbridge():
try:
from megatron.core.utils import get_tensor_model_parallel_group_if_none
except ImportError:
import warnings
import megatron.core.utils
import torch
from megatron.core import parallel_state
def get_tensor_model_parallel_group_if_none(tp_group, is_expert=False, check_initialized=True):
"""Issue a deprecation warning if tp_group is None and return the default tp group."""
if not torch.distributed.is_initialized():
return None
if tp_group is None:
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
warnings.warn(
"Warning: tp_group is None, using default tp group. Passing tp_group will be mandatory soon",
DeprecationWarning,
stacklevel=2,
)
if is_expert:
tp_group = parallel_state.get_expert_tensor_parallel_group(check_initialized=check_initialized)
else:
tp_group = parallel_state.get_tensor_model_parallel_group(check_initialized=check_initialized)
return tp_group
megatron.core.utils.get_tensor_model_parallel_group_if_none = get_tensor_model_parallel_group_if_none
|
verl__models__mcore__qwen2_5_vl__model.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2024 Alibaba PAI Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import torch
from megatron.core import InferenceParams, mpu, tensor_parallel
from megatron.core.models.gpt.gpt_model import GPTModel
# from .transformer_config import Qwen2VLTransformerConfig
from megatron.core.packed_seq_params import PackedSeqParams
from megatron.core.transformer import MegatronModule
from megatron.core.transformer.spec_utils import ModuleSpec
from megatron.core.transformer.transformer_config import TransformerConfig
from verl.models.mcore.util import preprocess_packed_seqs
from .attention import Qwen2_5VLSelfAttention
from .vision_model import Qwen2_5VisionModel
# Note: This is under development and may be missing features.
class Qwen2_5VLModel(MegatronModule):
"""Qwen2.5VL multi-modal model.
Args:
language_transformer_config (TransformerConfig): Transformer config for the language model.
language_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the
language model.
language_vocab_size (int): Language model vocabulary size.
language_max_sequence_length (int): Language model maximum sequence length. This is used for
positional embedding.
vision_transformer_config (TransformerConfig): Transformer config for the vision model.
vision_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the
vision model.
vision_projection_config (TransformerConfig): Config for the projection from vision model outputs to
language model inputs.
vision_projection_layer_spec (ModuleSpec): Specifies the module to use for the vision
projection.
vision_projection_type (str): Type of the vision projection to use. Default is a 2-layer MLP.
parallel_output (bool): Do not gather the outputs, keep them split across tensor parallel ranks. This
is typically True for training and False for inference.
language_rotary_percent (float): Percent of rotary dimension to use for rotary position embeddings
in the language model. Defaults to 1.0.
pre_process (bool): Include the embedding layer in the gpt decoder (used with pipeline parallelism).
Defaults to True.
post_process (bool): Include an output layer and a layernorm in the gpt decoder (used with pipeline
parallelism). Defaults to True.
add_encoder (bool): Construct the encoder module (used with pipeline parallelism). Defaults to True.
When we use pipelining, the encoder
will live on only a subset of the pipeline stages (specifically, only the first stage).
add_decoder (bool): Construct the decoder module (used with pipeline parallelism). Defaults to True.
When we use pipelining, the decoder
will live on only a subset of the pipeline stages (specifically, every stage after the first one).
img_h (int): The height of each image that the ViT will see.
img_w (int): The width of each image that the ViT will see.
patch_dim (int): The size of each patch side.
img_embedding_idx (int): Index in the language_embeddings tensor where image_embeddings should be
inserted. Defaults to 0.
"""
def __init__(
self,
language_transformer_config: TransformerConfig,
language_transformer_layer_spec: ModuleSpec,
language_vocab_size: int,
language_max_sequence_length: int,
vision_transformer_config: TransformerConfig,
vision_transformer_layer_spec: ModuleSpec,
vision_projection_config: TransformerConfig,
vision_projection_layer_spec: ModuleSpec,
vision_projection_type: str = "mlp",
parallel_output: bool = True,
language_rotary_percent: float = 1.0,
pre_process: bool = True,
post_process: bool = True,
add_encoder: bool = True,
add_decoder: bool = True,
language_rotary_base: int = 10000,
fp16_lm_cross_entropy: bool = False,
language_share_embeddings_and_output_weights: bool = False,
image_token_id: int = 151655,
video_token_id: int = 151656,
) -> None:
super().__init__(config=language_transformer_config)
# patch self_attention to use qwen2_5_vl attention
vision_transformer_layer_spec.submodules.self_attention.module = Qwen2_5VLSelfAttention
for layer_spec in language_transformer_layer_spec.layer_specs:
layer_spec.submodules.self_attention.module = Qwen2_5VLSelfAttention
logging.getLogger(__name__).warning("Qwen2VL model is under development and may be missing features.")
self.pre_process = pre_process
self.post_process = post_process
self.add_encoder = add_encoder
self.add_decoder = add_decoder
self.encoder_hidden_state = None
self.vision_model = None
self.vision_projection = None
self.language_model = None
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.square_merge_size = vision_projection_config.ffn_hidden_size // vision_transformer_config.hidden_size
# This attribute is needed to check if an all-reduce is required
# on the word embeddings inside `finalize_model_grads._allreduce_word_embedding_grads`.
self.share_embeddings_and_output_weights = False
if self.pre_process:
self.vision_model = Qwen2_5VisionModel(
vision_transformer_config,
vision_transformer_layer_spec,
vision_projection_config,
vision_projection_layer_spec,
projection_type=vision_projection_type,
pre_process=True,
post_process=True,
)
self.language_model = GPTModel(
config=language_transformer_config,
transformer_layer_spec=language_transformer_layer_spec,
vocab_size=language_vocab_size,
max_sequence_length=language_max_sequence_length,
parallel_output=parallel_output,
position_embedding_type="mrope",
rotary_percent=language_rotary_percent,
pre_process=self.pre_process,
post_process=self.post_process,
rotary_base=language_rotary_base,
fp16_lm_cross_entropy=fp16_lm_cross_entropy,
share_embeddings_and_output_weights=language_share_embeddings_and_output_weights,
scatter_embedding_sequence_parallel=False,
)
assert mpu.get_context_parallel_world_size() <= 1, "please use mbridge for qwen2_5_vl with context parallelism"
self.share_embeddings_and_output_weights = self.language_model.share_embeddings_and_output_weights
def shared_embedding_or_output_weight(self):
"""This is a convenience method to surface the language model's word embeddings, which is
necessary for `finalize_model_grads._allreduce_word_embedding_grads`."""
if self.add_decoder:
return self.language_model.shared_embedding_or_output_weight()
return None
def set_input_tensor(self, input_tensor) -> None:
# This is usually handled in schedules.py but some inference code still
# gives us non-lists or None
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
assert len(input_tensor) == 1, "input_tensor should only be length 1 for Qwen2VL"
if self.pre_process:
self.encoder_hidden_state = input_tensor[0]
else:
self.language_model.set_input_tensor(input_tensor[0])
def freeze(self, freeze_language_model: bool, freeze_vision_model: bool, freeze_vision_projection: bool):
"""Freeze model modules.
Make specific modules non-trainable by setting requires_grad to False for the module's parameters.
Args:
freeze_language_model (bool): Freeze the language model module.
freeze_vision_model (bool): Freeze the vision model module.
freeze_vision_projection (bool): Freeze the vision projection module.
"""
modules = []
if freeze_language_model and self.language_model is not None:
modules.append(self.language_model)
if freeze_vision_model and self.vision_model is not None:
modules.append(self.vision_model)
if freeze_vision_projection and self.vision_projection is not None:
modules.append(self.vision_projection)
for module in modules:
for param in module.parameters():
param.requires_grad = False
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
attention_mask: torch.Tensor = None,
labels: torch.Tensor = None,
inference_params: InferenceParams = None,
packed_seq_params: PackedSeqParams = None,
extra_block_kwargs: dict = None,
pixel_values: torch.Tensor = None,
pixel_values_videos: torch.Tensor = None,
image_grid_thw: torch.Tensor = None,
video_grid_thw: torch.Tensor = None,
**kwargs,
) -> torch.Tensor:
"""Forward function of the Qwen2VL model.
### there is a workaround for supporting sequence packing with context parallelism
# cp split with sequence packing will make model lose vision token information, so we need to keep
# the original input_ids and pack them after vision embedding is calculated,
# cooporate with verl's models/mcore/model_forward.py
# pack the combined_embeddings to thd here, we check if packed_seq_params is None to determine if
# we need to pack the combined_embeddings to thd
# this function needs the position_ids and attention_mask in BSHD format, no matter use packed_seq or not
Args:
image_data (torch.Tensor): input image of shape [total_thw_size, n_features].
input_ids (torch.Tensor): input text ids [batch, text_seq_len].
position_ids (torch.Tensor): input text position ids [batch, text_seq_len].
attention_mask (torch.Tensor): attention mask for the language model [batch, 1, combined_seq_len,
combined_seq_len].
labels (torch.Tensor): Optional target text labels [batch, combined_seq_len].
inference_params (InferenceParams): Inference-time parameters including KV cache.
video_start_index:
0 -- all video
len(video_seq) -- all image
others -- mixture
*_input_mask: should not be None in the first PP stage
Returns:
output (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape
[b, s, vocab_size].
"""
video_start_index = 0
vision_grid_thw = None
vision_data = None
if image_grid_thw is not None:
image_mask = input_ids == self.image_token_id
vision_grid_thw = image_grid_thw
vision_data = pixel_values
video_start_index = image_mask.sum().item()
if video_grid_thw is not None:
video_mask = input_ids == self.video_token_id
if vision_grid_thw is not None:
vision_grid_thw = torch.cat([vision_grid_thw, video_grid_thw], dim=0)
vision_data = torch.cat([vision_data, pixel_values_videos], dim=0)
else:
vision_grid_thw = video_grid_thw
vision_data = pixel_values_videos
use_inference_kv_cache = (
inference_params is not None and "image_tokens_count" in inference_params.key_value_memory_dict
)
if use_inference_kv_cache:
raise NotImplementedError()
if self.pre_process:
vision_embeds = None
if vision_grid_thw is not None and vision_grid_thw.shape[0] > 0:
vision_embeds = self.vision_model(
vision_data=vision_data, # If None, vision model should use intermediate outputs (EPP > 1)
grid_thw=vision_grid_thw, # should provided in each EPP stage
)
# If running inference, the language model KV cache will be updated for image token positions.
# Here we store the image tokens sequence length, which can be used as an offset to the KV cache later.
if inference_params is not None:
raise NotImplementedError()
# inference_params.key_value_memory_dict["image_tokens_count"] = (
# vision_embeddings.shape[0]
# )
# If running inference, we can skip image token computation if they were computed already earlier
# for this sample.
if use_inference_kv_cache:
language_embeddings: torch.Tensor = self.language_model.embedding(
input_ids=input_ids,
position_ids=None, # NOTE: disable
) # [text_seq_len, b, h_language]
# NOTE: why not cat here? is it the combined embeddings useless?
combined_embeddings = language_embeddings
elif vision_embeds is not None:
if video_start_index == 0:
image_embeds = None
video_embeds = vision_embeds
elif video_start_index == vision_embeds.shape[0]:
image_embeds = vision_embeds
video_embeds = None
elif 0 < video_start_index < vision_embeds.shape[0]:
image_embeds = vision_embeds[:video_start_index]
video_embeds = vision_embeds[video_start_index:]
else:
raise ValueError(
f"Expect video token start index in range [0, {vision_embeds.shape[0]}], but got "
f"{video_start_index}"
)
combined_embeddings = self.language_model.embedding(
input_ids=input_ids,
position_ids=None, # NOTE: disable
) # [text_seq_len, b, h_language]
if image_embeds is not None or video_embeds is not None:
combined_embeddings = combined_embeddings.transpose(0, 1).contiguous()
if image_embeds is not None:
image_mask = (input_ids == self.image_token_id).contiguous()
if image_mask.sum() > 0:
combined_embeddings = combined_embeddings.clone()
combined_embeddings[image_mask] = image_embeds.to(
dtype=combined_embeddings.dtype, device=combined_embeddings.device
)
if video_embeds is not None:
video_mask = (input_ids == self.video_token_id).contiguous()
if video_mask.sum() > 0:
combined_embeddings = combined_embeddings.clone()
combined_embeddings[video_mask] = video_embeds.to(
dtype=combined_embeddings.dtype, device=combined_embeddings.device
)
combined_embeddings = combined_embeddings.transpose(0, 1).contiguous()
else:
combined_embeddings = self.language_model.embedding(
input_ids=input_ids,
position_ids=None, # NOTE: disable
) # [text_seq_len, b, h_language]
if packed_seq_params is not None:
combined_embeddings = (
preprocess_packed_seqs(
combined_embeddings.transpose(0, 1).contiguous(), attention_mask, pre_process=True
)[0]
.transpose(0, 1)
.contiguous()
)
if self.config.sequence_parallel:
combined_embeddings = tensor_parallel.scatter_to_sequence_parallel_region(combined_embeddings)
combined_embeddings = combined_embeddings.contiguous()
else:
combined_embeddings = None
from .rope_utils import get_rope_index
# BSHD
position_ids, _ = get_rope_index(
input_ids,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
attention_mask=attention_mask,
)
# THD
if packed_seq_params is not None:
position_ids = (
preprocess_packed_seqs(position_ids.permute(1, 2, 0), attention_mask, pre_process=True)[0]
.permute(2, 0, 1)
.contiguous()
)
attention_mask = None
output = self.language_model(
input_ids=None,
position_ids=position_ids, # None in encoder
attention_mask=attention_mask, # None in encoder
decoder_input=combined_embeddings, # only not None in the first decoder PP stage
labels=labels, # only not None in the last decoder PP stage
# inference_params=inference_params, # currently always None
packed_seq_params=packed_seq_params, # currently always None
**(extra_block_kwargs or {}),
**kwargs,
)
return output
|
verl__models__mcore__qwen2_5_vl__rope_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2024 Alibaba PAI Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import logging
from typing import Optional
import torch
from megatron.core.models.common.embeddings.rope_utils import *
from megatron.core.models.common.embeddings.rope_utils import _apply_rotary_pos_emb_bshd
from torch import Tensor
logger = logging.getLogger(__name__)
# Slightly modified from Qwen2VLForConditionalGeneration.get_rope_index
def get_rope_index(
input_ids: Optional[torch.LongTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
second_per_grid_ts: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
"""
Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
Explanation:
Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
Examples:
input_ids: [T T T T T], here T is for text.
temporal position_ids: [0, 1, 2, 3, 4]
height position_ids: [0, 1, 2, 3, 4]
width position_ids: [0, 1, 2, 3, 4]
For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
and 1D rotary position embedding for text part.
Examples:
Temporal (Time): 3 patches, representing different segments of the video in time.
Height: 2 patches, dividing each frame vertically.
Width: 2 patches, dividing each frame horizontally.
We also have some important parameters:
fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each
second.
tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal
tokens" are conceptually packed into a one-second interval of the video.
In this case, we have 25 tokens per second. So each second of the video will be
represented with 25 separate time points. It essentially defines the temporal
granularity.
temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
interval: The step size for the temporal position IDs, calculated as tokens_per_second *
temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be
have a difference of 50 in the temporal position IDs.
input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
text temporal position_ids: [101, 102, 103, 104, 105]
text height position_ids: [101, 102, 103, 104, 105]
text width position_ids: [101, 102, 103, 104, 105]
Here we calculate the text start position_ids as the max vision position_ids plus 1.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*):
The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Returns:
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
"""
spatial_merge_size = 2
tokens_per_second = 2
image_token_id = 151655
video_token_id = 151656
vision_start_token_id = 151652
mrope_position_deltas = []
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
total_input_ids = input_ids
if attention_mask is None:
attention_mask = torch.ones_like(total_input_ids)
position_ids = torch.ones(
3,
input_ids.shape[0],
input_ids.shape[1],
dtype=input_ids.dtype,
device=input_ids.device,
)
image_index, video_index = 0, 0
attention_mask = attention_mask.to(total_input_ids.device)
for i, input_ids in enumerate(total_input_ids):
input_ids = input_ids[attention_mask[i] == 1]
image_nums, video_nums = 0, 0
vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1)
vision_tokens = input_ids[vision_start_indices + 1]
image_nums = (vision_tokens == image_token_id).sum()
video_nums = (vision_tokens == video_token_id).sum()
input_tokens = input_ids.tolist()
llm_pos_ids_list: list = []
st = 0
remain_images, remain_videos = image_nums, video_nums
for _ in range(image_nums + video_nums):
if image_token_id in input_tokens and remain_images > 0:
ed_image = input_tokens.index(image_token_id, st)
else:
ed_image = len(input_tokens) + 1
if video_token_id in input_tokens and remain_videos > 0:
ed_video = input_tokens.index(video_token_id, st)
else:
ed_video = len(input_tokens) + 1
if ed_image < ed_video:
t, h, w = (
image_grid_thw[image_index][0],
image_grid_thw[image_index][1],
image_grid_thw[image_index][2],
)
second_per_grid_t = 0
image_index += 1
remain_images -= 1
ed = ed_image
else:
t, h, w = (
video_grid_thw[video_index][0],
video_grid_thw[video_index][1],
video_grid_thw[video_index][2],
)
if second_per_grid_ts is not None:
second_per_grid_t = second_per_grid_ts[video_index]
else:
second_per_grid_t = 1.0
video_index += 1
remain_videos -= 1
ed = ed_video
llm_grid_t, llm_grid_h, llm_grid_w = (
t.item(),
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
text_len = ed - st
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
range_tensor = torch.arange(llm_grid_t).view(-1, 1)
expanded_range = range_tensor.expand(-1, llm_grid_h * llm_grid_w)
time_tensor = expanded_range * second_per_grid_t * tokens_per_second
time_tensor_long = time_tensor.long()
t_index = time_tensor_long.flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx)
st = ed + llm_grid_t * llm_grid_h * llm_grid_w
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i]))
mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
return position_ids, mrope_position_deltas
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
else:
position_ids = (
torch.arange(input_ids.shape[1], device=input_ids.device)
.view(1, 1, -1)
.expand(3, input_ids.shape[0], -1)
)
mrope_position_deltas = torch.zeros(
[input_ids.shape[0], 1],
device=input_ids.device,
dtype=input_ids.dtype,
)
return position_ids, mrope_position_deltas
def apply_rotary_pos_emb_thd_absolute(
t: Tensor, cu_seqlens: Tensor, freqs: Tensor, rotary_interleaved: bool = False
) -> Tensor:
"""A baseline implementation of applying RoPE for `thd` format.
Args:
t (Tensor): Input tensor T is of shape [t, h, d]
cu_seqlens(Tensor): Cumulative sum of sequence lengths in a batch for `t`,
with shape [b + 1] and dtype torch.int32.
freqs (Tensor): Rotary Positional embedding tensor freq is of shape [max_s, 1, 1, d]
Returns:
Tensor: Shape [t, h, d]. The input tensor after applying RoPE.
"""
return _apply_rotary_pos_emb_bshd(t[:, None], freqs, rotary_interleaved=rotary_interleaved).squeeze(1)
def apply_rotary_pos_emb_absolute(
t: Tensor,
freqs: Tensor,
config: TransformerConfig,
cu_seqlens: Optional[Tensor] = None,
):
"""
Reroute to the appropriate apply_rotary_pos_emb function depending on
bshd (conventional) / thd (packed seq) format
In Qwen2-VL, the shape of freqs is (seq_length, bs, 1, 2 * dim) instead of [max_seqlen, 1, 1, 2 * dim]
"""
if config.apply_rope_fusion:
if cu_seqlens is None:
# NOTE: TE backends do not support mRoPE in bshd format when bs > 1
if freqs.shape[1] > 1:
return _apply_rotary_pos_emb_bshd(t, freqs, rotary_interleaved=config.rotary_interleaved)
else:
return fused_apply_rotary_pos_emb(t, freqs)
else:
# NOTE: as expected, thd format can use bshd
return fused_apply_rotary_pos_emb(t[:, None], freqs).squeeze(1)
else:
if cu_seqlens is None:
return _apply_rotary_pos_emb_bshd(t, freqs, rotary_interleaved=config.rotary_interleaved)
else:
return apply_rotary_pos_emb_thd_absolute(t, cu_seqlens, freqs, rotary_interleaved=config.rotary_interleaved)
|
verl__models__mcore__qwen2_5_vl__vision_config.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2024 Alibaba PAI Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from megatron.core import parallel_state
from megatron.core.transformer import TransformerConfig
def get_vision_model_config(config: TransformerConfig) -> TransformerConfig:
# Given a Transformer Config from decoder, build vision encoder config
# diff: out_hidden_size & intermediate_size
# mlp: hidden_size -> intermediate_size -> embed_dim, silu
# NOTE: here we provide a workaround to solve the wrong layer amount when VPP of decoder is on
if config.num_layers in [28, 36]:
config.ffn_hidden_size = 3420
else:
config.ffn_hidden_size = 3456
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
config.num_layers = 32 * parallel_state.get_virtual_pipeline_model_parallel_world_size() # depth
else:
config.num_layers = 32 # depth
config.num_attention_heads = 16 # num_heads
config.add_bias_linear = True # all nn.Linear has bias (MLP, attn)
config.add_qkv_bias = True # qkv_proj in attn has bias
config.hidden_size = 1280 # hidden_size
config.hidden_dropout = 0.0
config.attention_dropout = 0.0
# config.gated_linear_unit = False # no gated
# config.activation_func = quick_gelu # hidden_act
config.kv_channels = config.hidden_size // config.num_attention_heads
config.num_query_groups = config.num_attention_heads # no GQA
config.layernorm_zero_centered_gamma = False # False
config.apply_query_key_layer_scaling = False # factor=math.sqrt(head_dim)
config.bias_activation_fusion = False # no swiglu, set false
config.bias_dropout_fusion = False # no dropout, set false
config.attention_softmax_in_fp32 = True # use True
# config.normalization = 'LayerNorm' # use RMSNorm
config.seq_length = 1
config.tp_comm_overlap = False
config.sequence_parallel = False
config.temporal_patch_size = 2
config.patch_size = 14
config.in_channels = 3
config.spatial_merge_size = 2
config.fullatt_block_indexes = [7, 15, 23, 31]
config._qwen2_5_vl_window_size = 112
return config
def get_vision_projection_config(
config: TransformerConfig, embed_dim: int, spatial_merge_size: int
) -> TransformerConfig:
# merger:
# context_dim = hidden_size * merge_size**2
# out_hidden_size = hidden_size
# context_dim -> context_dim -> out_hidden_size
# MLP:
# input_size -> ffn_hidden_size -> hidden_size
# spec: LN -> Linear(bias=True) -> GELU -> Linear(bias=True)
config.gated_linear_unit = False
config.bias_activation_fusion = False
config.add_bias_linear = True
config.ffn_hidden_size = embed_dim * (spatial_merge_size**2)
config.activation_func = torch.nn.functional.gelu
config.tp_comm_overlap = False
config.sequence_parallel = False
return config
|
verl__models__mcore__qwen2_5_vl__vision_model.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2024 Alibaba PAI Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from megatron.core import InferenceParams
from megatron.core.models.common.vision_module.vision_module import VisionModule
from megatron.core.models.vision.multimodal_projector import MultimodalProjector
from megatron.core.packed_seq_params import PackedSeqParams
from megatron.core.transformer.enums import ModelType
from megatron.core.transformer.spec_utils import ModuleSpec
from megatron.core.transformer.transformer_config import TransformerConfig
from torch import nn
from torch.nn import functional as F
from .vision_transformer_block import Qwen2_5VisionTransformerBlock as TransformerBlock
# copied from https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
class PatchEmbed(nn.Module):
def __init__(
self,
patch_size: int = 14,
temporal_patch_size: int = 2,
in_channels: int = 3,
embed_dim: int = 1152,
) -> None:
super().__init__()
self.patch_size = patch_size
self.temporal_patch_size = temporal_patch_size
self.in_channels = in_channels
self.embed_dim = embed_dim
kernel_size = [temporal_patch_size, patch_size, patch_size]
self.proj = nn.Conv3d(in_channels, embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
target_dtype = self.proj.weight.dtype
hidden_states = hidden_states.view(
-1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size
)
hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim)
return hidden_states
# copied from https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
class VisionRotaryEmbedding(nn.Module):
def __init__(self, dim: int, theta: float = 10000.0) -> None:
super().__init__()
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, seqlen: int) -> torch.Tensor:
seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
freqs = torch.outer(seq, self.inv_freq)
return freqs.float()
class Qwen2_5VisionModel(VisionModule):
"""Qwen2.5 ViT vision model.
Args:
transformer_config (TransformerConfig): Transformer config.
transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers.
ln_pre_impl (ModuleSpec or type): Specifies the layer norm type to use for ln_pre.
add_class_token (bool, optional): Include a class token. Defaults to True.
class_token_len (int): Class token length. Defaults to 1 but 8 may be faster.
patch_dim (int): Image patch size.
img_h (int): Input image height.
img_w (int): Input image width.
"""
def __init__(
self,
transformer_config: TransformerConfig,
transformer_layer_spec: ModuleSpec,
projection_config: TransformerConfig,
projection_layer_spec: ModuleSpec,
projection_type: str = "mlp",
pre_process: bool = True,
post_process: bool = False,
) -> None:
super().__init__(config=transformer_config)
self.spatial_merge_size = transformer_config.spatial_merge_size
embed_dim = transformer_config.hidden_size
num_heads = transformer_config.num_attention_heads
temporal_patch_size = transformer_config.temporal_patch_size
patch_size = transformer_config.patch_size
in_channels = transformer_config.in_channels
self.patch_size = transformer_config.patch_size
self.fullatt_block_indexes = transformer_config.fullatt_block_indexes
self.window_size = transformer_config._qwen2_5_vl_window_size
self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size
self.max_sequence_length = transformer_config.seq_length
self.patch_embed = PatchEmbed(
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
in_channels=in_channels,
embed_dim=embed_dim,
)
head_dim = embed_dim // num_heads
self.rotary_pos_emb = VisionRotaryEmbedding(head_dim // 2)
self.model_type = ModelType.encoder_or_decoder
self.pre_process = pre_process
self.post_process = post_process
# Transformer layers.
# TODO: Follow-up changes will make pre and post_process configurable. They are needed for supporting
# pipeline parallelism.
# NOTE: a final layer norm and/or linear layer present in some implementations are omitted here.
self.decoder = TransformerBlock(
config=transformer_config,
spec=transformer_layer_spec,
pre_process=self.pre_process,
post_process=self.post_process,
post_layer_norm=True,
)
self.merge_hidden_size = projection_config.ffn_hidden_size
self.square_merge_size = self.merge_hidden_size // embed_dim
if self.post_process:
self.projection = MultimodalProjector(
projection_config, projection_layer_spec, projection_type, projection_config.ffn_hidden_size
)
else:
self.projection = None
self.input_tensor = None
def set_input_tensor(self, input_tensor: torch.Tensor) -> None:
"""Sets input tensor to the model.
Args:
input_tensor (Tensor): Sets the input tensor for the model.
"""
if self.pre_process: # always True
self.input_tensor = input_tensor
else:
raise NotImplementedError()
def rot_pos_emb(self, grid_thw):
pos_ids = []
for t, h, w in grid_thw:
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
hpos_ids = hpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
hpos_ids = hpos_ids.flatten()
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
wpos_ids = wpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
wpos_ids = wpos_ids.flatten()
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0).to(grid_thw.device)
max_grid_size = grid_thw[:, 1:].max()
rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size).to(grid_thw.device)
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb
def get_window_index(self, grid_thw):
window_index: list = []
cu_window_seqlens: list = [0]
window_index_id = 0
vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size
for grid_t, grid_h, grid_w in grid_thw:
llm_grid_h, llm_grid_w = (
grid_h // self.spatial_merge_size,
grid_w // self.spatial_merge_size,
)
index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w)
pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size
pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size
num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size
num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size
index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100)
index_padded = index_padded.reshape(
grid_t,
num_windows_h,
vit_merger_window_size,
num_windows_w,
vit_merger_window_size,
)
index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape(
grid_t,
num_windows_h * num_windows_w,
vit_merger_window_size,
vit_merger_window_size,
)
seqlens = (index_padded != -100).sum([2, 3]).reshape(-1)
index_padded = index_padded.reshape(-1)
index_new = index_padded[index_padded != -100]
window_index.append(index_new + window_index_id)
cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1]
cu_window_seqlens.extend(cu_seqlens_tmp.tolist())
window_index_id += (grid_t * llm_grid_h * llm_grid_w).item()
window_index = torch.cat(window_index, dim=0)
return window_index, cu_window_seqlens
def forward(
self,
vision_data: Optional[torch.Tensor],
grid_thw: torch.Tensor,
inference_params: Optional[InferenceParams] = None,
extra_block_kwargs: dict = None,
) -> torch.Tensor:
"""Forward function of the Qwen2 Vision Model. This function passes the input tensors
through the embedding layer and then the transformer.
Args:
x (torch.Tensor): input image/video data of shape [n_tokens, n_dims]
grid_thw (torch.Tensor): the size tensor indicates grid size of each image/frame
packed_seq_params (PackedSeqParams): parameters to build attention mask in the backend
Returns:
x (torch.Tensor): output after final transformer block of shape [b, s, h].
"""
assert grid_thw is not None
assert self.input_tensor is None
assert inference_params is None
# Rotary positional embeddings (embedding is None for PP intermediate devices)
vision_data = self.patch_embed(vision_data)
window_index, cu_window_seqlens = self.get_window_index(grid_thw)
cu_window_seqlens = torch.tensor(
cu_window_seqlens,
device=vision_data.device,
dtype=torch.int32,
)
cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
seq_len, _ = vision_data.size()
vision_data = vision_data.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
vision_data = vision_data[window_index, :, :]
vision_data = vision_data.reshape(seq_len, 1, -1)
rotary_pos_emb = self.rot_pos_emb(grid_thw)
rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
rotary_pos_emb = rotary_pos_emb[window_index, :, :]
rotary_pos_emb = rotary_pos_emb.reshape(seq_len, 1, 1, -1).repeat(1, 1, 1, 2)
hidden_states = self.decoder(
hidden_states=vision_data,
attention_mask=None,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb,
packed_seq_params=self.build_packed_seq_params(None, cu_window_seqlens),
packed_seq_params_full=self.build_packed_seq_params(grid_thw),
fullatt_block_indexes=self.fullatt_block_indexes,
**(extra_block_kwargs or {}),
)
hidden_states = self.projection(hidden_states.view(-1, self.merge_hidden_size))
reverse_indices = torch.argsort(window_index)
return hidden_states[reverse_indices, :]
def build_packed_seq_params(
self,
grid_thw: Optional[torch.Tensor],
cu_seqlens: Optional[torch.Tensor] = None,
) -> PackedSeqParams:
# NOTE: each frame is a sequence (rather than each grid)
if grid_thw is not None:
seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0])
cu_seqlens = seqlens.cumsum(dim=0)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0).int()
else:
seqlens = cu_seqlens[1:] - cu_seqlens[:-1]
max_seqlen_q = seqlens.max()
return PackedSeqParams(
cu_seqlens_q=cu_seqlens,
cu_seqlens_kv=cu_seqlens,
qkv_format="thd",
max_seqlen_q=max_seqlen_q,
max_seqlen_kv=max_seqlen_q,
)
|
verl__models__mcore__qwen2_5_vl__vision_transformer_block.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2024 Alibaba PAI Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from megatron.core.transformer.transformer_block import *
class Qwen2_5VisionTransformerBlock(TransformerBlock):
def _checkpointed_forward(
self,
hidden_states: Tensor,
attention_mask: Tensor,
context: Tensor,
context_mask: Tensor,
rotary_pos_emb: Tensor,
attention_bias: Tensor,
packed_seq_params: PackedSeqParams,
packed_seq_params_full: PackedSeqParams,
fullatt_block_indexes,
):
"""Forward method with activation checkpointing."""
def custom(start: int, end: int):
def custom_forward(hidden_states, attention_mask, context, context_mask, rotary_pos_emb):
for index in range(start, end):
if index in fullatt_block_indexes:
packed_seq_params_now = packed_seq_params_full
else:
packed_seq_params_now = packed_seq_params
layer = self._get_layer(index)
hidden_states, context = layer(
hidden_states=hidden_states,
attention_mask=attention_mask,
context=context,
context_mask=context_mask,
rotary_pos_emb=rotary_pos_emb,
attention_bias=attention_bias,
inference_context=None,
packed_seq_params=packed_seq_params_now,
)
return hidden_states, context
return custom_forward
def checkpoint_handler(forward_func):
"""Determines whether to use the `te_checkpoint` or `tensor_parallel.checkpoint`"""
if self.config.fp8:
return te_checkpoint(
forward_func,
self.config.distribute_saved_activations,
tensor_parallel.random.get_cuda_rng_tracker,
parallel_state.get_tensor_model_parallel_group(),
hidden_states,
attention_mask,
context,
context_mask,
rotary_pos_emb,
)
else:
return tensor_parallel.checkpoint(
forward_func,
self.config.distribute_saved_activations,
hidden_states,
attention_mask,
context,
context_mask,
rotary_pos_emb,
)
if self.config.recompute_method == "uniform":
# Uniformly divide the total number of Transformer layers and checkpoint
# the input activation of each divided chunk.
# A method to further reduce memory usage reducing checkpoints.
layer_idx = 0
while layer_idx < self.num_layers_per_pipeline_rank:
hidden_states, context = checkpoint_handler(
custom(layer_idx, layer_idx + self.config.recompute_num_layers)
)
layer_idx += self.config.recompute_num_layers
elif self.config.recompute_method == "block":
# Checkpoint the input activation of only a set number of individual
# Transformer layers and skip the rest.
# A method fully use the device memory removing redundant re-computation.
recompute_skip_num_layers = 0
for layer_idx in range(self.num_layers_per_pipeline_rank):
# Skip recomputation when input grad computation is not needed.
# Need to have at least one input tensor with gradient computation
# for re-enterant autograd engine.
if self.config.fp8 and not hidden_states.requires_grad:
recompute_skip_num_layers += 1
if (
layer_idx >= recompute_skip_num_layers
and layer_idx < self.config.recompute_num_layers + recompute_skip_num_layers
):
hidden_states, context = checkpoint_handler(custom(layer_idx, layer_idx + 1))
else:
hidden_states, context = custom(layer_idx, layer_idx + 1)(
hidden_states, attention_mask, context, context_mask, rotary_pos_emb
)
else:
raise ValueError("Invalid activation recompute method.")
return hidden_states
def forward(
self,
hidden_states: Union[Tensor, WrappedTensor],
attention_mask: Optional[Tensor],
context: Optional[Tensor] = None,
context_mask: Optional[Tensor] = None,
rotary_pos_emb: Optional[Tensor] = None,
rotary_pos_cos: Optional[Tensor] = None,
rotary_pos_sin: Optional[Tensor] = None,
attention_bias: Optional[Tensor] = None,
inference_context: Optional[BaseInferenceContext] = None,
packed_seq_params: Optional[PackedSeqParams] = None,
sequence_len_offset: Optional[Tensor] = None,
packed_seq_params_full: PackedSeqParams = None,
fullatt_block_indexes=None,
*,
inference_params: Optional[BaseInferenceContext] = None,
):
"""
Perform the forward pass through the transformer block.
This method handles the core computation of the transformer, including
self-attention, optional cross-attention, and feed-forward operations.
Args:
hidden_states (Union[Tensor, WrappedTensor]): Input tensor of shape [s, b, h]
where s is the sequence length, b is the batch size, and h is the hidden size.
Can be passed as a WrappedTensor during inference to avoid an obsolete
reference in the calling function.
attention_mask (Tensor): Boolean tensor of shape [1, 1, s, s] for masking
self-attention.
context (Tensor, optional): Context tensor for cross-attention.
context_mask (Tensor, optional): Mask for cross-attention context
rotary_pos_emb (Tensor, optional): Rotary positional embeddings.
attention_bias (Tensor): Bias tensor for Q * K.T of shape in shape broadcastable
to [b, num_head, sq, skv], e.g. [1, 1, sq, skv].
Used as an alternative to apply attention mask for TE cuDNN attention.
inference_context (BaseInferenceContext, optional): Parameters for inference-time
optimizations.
packed_seq_params (PackedSeqParams, optional): Parameters for packed sequence
processing.
Returns:
Union[Tensor, Tuple[Tensor, Tensor]]: The output hidden states tensor of shape
[s, b, h], and optionally the updated context tensor if cross-attention is used.
"""
inference_context = deprecate_inference_params(inference_context, inference_params)
# Delete the obsolete reference to the initial input tensor if necessary
if isinstance(hidden_states, WrappedTensor):
hidden_states = hidden_states.unwrap()
if not self.pre_process:
# See set_input_tensor()
hidden_states = self.input_tensor
# Update the inference parameters with the current batch size in case it is variable
if inference_context and not self.training:
inference_context.current_batch_size = hidden_states.size(1)
# Viewless tensor.
# - We only need to create a viewless tensor in the case of micro batch
# size (mbs) == 1, since in this case, 'hidden_states.transpose()'
# above creates a view tensor, and '.contiguous()' is a pass-through.
# For mbs >= 2, '.contiguous()' creates a new tensor, eliminating
# the need to make it viewless.
#
# However, we don't explicitly check mbs == 1 here because
# make_viewless_tensor() has negligible overhead when its input
# is already viewless.
#
# - For the 'else' case above, calling make_viewless_tensor() here is
# likely redundant, since p2p_communication.py (likely originator)
# already creates viewless tensors. That said, make_viewless_tensor()
# is called here to be future-proof and corner-case-proof.
hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True)
if self.config.sequence_parallel:
rng_context = tensor_parallel.get_cuda_rng_tracker().fork()
else:
rng_context = nullcontext()
# If fp8_recipe is delayed, wrap the entire pass with get_fp8_context(),
# otherwise do nothing extra at the outer level
# if we are using other fp8 recipes, then the context manager enter&exit are free
# we can wrap fp8_context within the for loop over layers, so that we can fine-grained
# control which layer will be fp8 or bf16
use_outer_fp8_context = self.config.fp8 and self.config.fp8_recipe == Fp8Recipe.delayed
use_inner_fp8_context = self.config.fp8 and self.config.fp8_recipe != Fp8Recipe.delayed
outer_fp8_context = get_fp8_context(self.config) if use_outer_fp8_context else nullcontext()
with rng_context, outer_fp8_context:
# Forward pass.
if self.config.recompute_granularity == "full" and self.training:
hidden_states = self._checkpointed_forward(
hidden_states=hidden_states,
attention_mask=attention_mask,
context=context,
context_mask=context_mask,
rotary_pos_emb=rotary_pos_emb,
attention_bias=attention_bias,
packed_seq_params=packed_seq_params,
packed_seq_params_full=packed_seq_params_full,
fullatt_block_indexes=fullatt_block_indexes,
)
else:
for l_no, layer in enumerate(self.layers):
inner_fp8_context = (
get_fp8_context(self.config, layer.layer_number - 1) if use_inner_fp8_context else nullcontext()
)
if l_no in fullatt_block_indexes:
packed_seq_params_now = packed_seq_params_full
else:
packed_seq_params_now = packed_seq_params
with self.offload_context, inner_fp8_context:
hidden_states, context = layer(
hidden_states=hidden_states,
attention_mask=attention_mask,
context=context,
context_mask=context_mask,
rotary_pos_emb=rotary_pos_emb,
rotary_pos_cos=rotary_pos_cos,
rotary_pos_sin=rotary_pos_sin,
attention_bias=attention_bias,
inference_context=inference_context,
packed_seq_params=packed_seq_params_now,
sequence_len_offset=sequence_len_offset,
)
if (
torch.is_grad_enabled()
and self.config.cpu_offloading
and self.group_prefetch_offload_commit_async is not None
):
hidden_states = self.group_prefetch_offload_commit_async(hidden_states)
# Final layer norm.
if self.final_layernorm is not None:
hidden_states = self.final_layernorm(hidden_states)
# TENorm produces a "viewed" tensor. This will result in schedule.py's
# deallocate_output_tensor() throwing an error, so a viewless tensor is
# created to prevent this.
hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True)
return hidden_states
|
verl__models__mcore__saver.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
import torch.distributed as dist
from megatron.core import mpu
from megatron.core.distributed import DistributedDataParallel as LocalDDP
from megatron.core.transformer.module import Float16Module
from torch.nn.parallel import DistributedDataParallel as torchDDP
from verl.utils.device import get_device_id, get_torch_device
from verl.utils.logger import print_rank_0
from verl.utils.megatron_utils import unwrap_model
def _megatron_calc_global_rank(
tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0, cp_rank: int = 0, ep_rank: int = 0
):
"""Calculate global rank with support for CP/EP parallelism"""
# Get parallel sizes for each dimension
tp_size = mpu.get_tensor_model_parallel_world_size()
dp_size = mpu.get_data_parallel_world_size()
pp_size = mpu.get_pipeline_model_parallel_world_size()
cp_size = mpu.get_context_parallel_world_size()
# ep_size = mpu.get_expert_model_parallel_world_size()
# Verify total GPU count matches (must be consistent with parallel_state.py)
total_size = tp_size * dp_size * pp_size * cp_size
assert total_size == torch.distributed.get_world_size(), (
f"{tp_size}x{dp_size}x{pp_size}x{cp_size} != {torch.distributed.get_world_size()}"
)
# Core calculation logic (corresponds to RankGenerator order parameter)
# Assumes default order is "tp-cp-ep-dp-pp"
return ((pp_rank * dp_size + dp_rank) * cp_size + cp_rank) * tp_size + tp_rank
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
from megatron.core import mpu
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (
virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model
)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def merge_megatron_ckpt_gptmodel(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False):
"""Merge sharded parameters of a Megatron module into a merged checkpoint.
Args:
wrapped_models (list of megatron.core.distributed.DistributedDataParallel):
The local DDP wrapped megatron modules.
config (str or None):
HF config for model
dtype: model params type
is_value_model: if model is value model
tie_word_embeddings: tie_word_embeddings
Returns:
state_dict (dict):
The merged state_dict in rank 0, and an empty dictionary in other ranks.
"""
start_time = time.time()
def _get_gpt_model(model):
return model
dp_rank = mpu.get_data_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
pp_rank = mpu.get_pipeline_model_parallel_rank()
cp_rank = mpu.get_context_parallel_rank()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if dist.get_rank() == 0:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, list | tuple):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
assert len(models[i].decoder.layers) == num_layers_per_model, (
"len model layers {} not equal to num_layers_per_model {}".format(
len(models[i].decoder.layers), num_layers_per_model
)
)
state_dict = dict()
def _get_cpu_tensor(tensor: torch.Tensor):
if tensor is None:
return None
if tensor.device == torch.device("cpu"):
return tensor.detach().clone()
return tensor.detach().cpu()
def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor:
"""broadcast tensor across mp_group"""
nonlocal state_dict
nonlocal mp_group
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank)
if torch.distributed.get_rank() == src_rank:
if tensor is None:
weight = None
tensor_shape = None
else:
weight = tensor
tensor_shape = weight.shape
else:
weight = None
tensor_shape = None
obj_list = [tensor_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
tensor_shape = obj_list[0]
if tensor_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tensor:[{name}] not exist, skip collect")
return
if weight is None:
weight = torch.empty(
tensor_shape,
dtype=dtype,
device=get_device_id(),
requires_grad=False,
)
dist.broadcast(weight, src=src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
state_dict[name] = _get_cpu_tensor(weight)
def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
# tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank)
chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=dtype,
device=get_device_id(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=concat_dim)
if mutate_func is not None:
full_tensor = mutate_func(full_tensor)
state_dict[name] = full_tensor
def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
# tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank)
chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=dtype,
device=get_device_id(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=0)
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_list = []
up_weight_list = []
for i in range(tp_size):
gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)]
gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp]
up_weight_tp = gate_up_weight_tp[intermediate_size_tp:]
gate_weight_list.append(gate_weight_tp)
up_weight_list.append(up_weight_tp)
state_dict[gate_name] = torch.cat(gate_weight_list, dim=0)
state_dict[up_name] = torch.cat(up_weight_list, dim=0)
def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank):
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
# tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank)
chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=dtype,
device=get_device_id(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=0)
q_weight_list = []
k_weight_list = []
v_weight_list = []
hidden_size_per_head = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
if config.num_key_value_heads >= tp_size:
q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
num_query_groups_per_partition = wrapped_models[0].config.num_query_groups // tp_size
qkv_part = full_tensor[i * total_size : (i + 1) * total_size]
q_size_chunk = q_size_tp // num_query_groups_per_partition
kv_size_chunk = kv_size_tp // num_query_groups_per_partition
for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition):
q_part = qkv_part_chunk[:q_size_chunk]
k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk]
v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :]
q_weight_list.append(q_part)
k_weight_list.append(k_part)
v_weight_list.append(v_part)
else:
q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
num_query_groups_per_partition = wrapped_models[0].config.num_query_groups // tp_size
qkv_part = full_tensor[i * total_size : (i + 1) * total_size]
q_size_chunk = q_size_tp // num_query_groups_per_partition
kv_size_chunk = kv_size_tp // num_query_groups_per_partition
for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition):
q_part = qkv_part_chunk[:q_size_chunk]
k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk]
v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :]
q_weight_list.append(q_part)
if i * config.num_key_value_heads % tp_size == 0:
k_weight_list.append(k_part)
v_weight_list.append(v_part)
state_dict[q_name] = torch.cat(q_weight_list, dim=0)
state_dict[k_name] = torch.cat(k_weight_list, dim=0)
state_dict[v_name] = torch.cat(v_weight_list, dim=0)
# empty cache before collecting weights
get_torch_device().empty_cache()
# Embeddings
# -------------------
if dp_rank == 0 and cp_rank == 0: # models are identical across cp ranks
# Embeddings
# -------------------
print_rank_0("collecting embeddings...")
gpt_model_module = _get_gpt_model(models[0])
_broadcast_tp_shard_tensor(
gpt_model_module.embedding.word_embeddings.weight if pp_rank == 0 else None,
"model.embed_tokens.weight",
src_pp_rank=0,
)
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
for layer in range(config.num_hidden_layers):
print_rank_0(f"collecting layer #{layer}...")
layer_name = f"model.layers.{layer}"
src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer]
gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank])
sync_layer = gpt_model_module.decoder.layers[src_layer_idx]
_broadcast_tensor(
sync_layer.self_attention.linear_qkv.layer_norm_weight,
f"{layer_name}.input_layernorm.weight",
src_pp_rank=src_pp_rank,
)
if gpt_model_module.config.qk_layernorm:
_broadcast_tensor(
sync_layer.self_attention.q_layernorm.weight,
f"{layer_name}.self_attn.q_norm.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tensor(
sync_layer.self_attention.k_layernorm.weight,
f"{layer_name}.self_attn.k_norm.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attention.linear_qkv.weight,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
src_pp_rank=src_pp_rank,
)
if gpt_model_module.config.add_qkv_bias:
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attention.linear_qkv.bias,
f"{layer_name}.self_attn.q_proj.bias",
f"{layer_name}.self_attn.k_proj.bias",
f"{layer_name}.self_attn.v_proj.bias",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor(
sync_layer.self_attention.linear_proj.weight,
f"{layer_name}.self_attn.o_proj.weight",
concat_dim=1,
src_pp_rank=src_pp_rank,
)
_broadcast_tensor(
sync_layer.mlp.linear_fc1.layer_norm_weight,
f"{layer_name}.post_attention_layernorm.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor_gate_up(
sync_layer.mlp.linear_fc1.weight,
f"{layer_name}.mlp.gate_proj.weight",
f"{layer_name}.mlp.up_proj.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor(
sync_layer.mlp.linear_fc2.weight,
f"{layer_name}.mlp.down_proj.weight",
concat_dim=1,
src_pp_rank=src_pp_rank,
)
# Final Layernorm
# -------------------
print_rank_0("collecting final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_broadcast_tensor(
getattr(gpt_model_module.decoder.final_layernorm, "weight", None),
"model.norm.weight",
src_pp_rank=pp_size - 1,
)
if tie_word_embeddings:
print_rank_0("tie word embedding skip load lm_head...")
else:
print_rank_0("collecting lm_head...")
if is_value_model:
lm_head_weight = None
if pp_rank == pp_size - 1:
lm_head_weight = getattr(gpt_model_module.output_layer, "weight", None)
_broadcast_tensor(lm_head_weight, "lm_head.weight", src_pp_rank=pp_size - 1)
else:
_broadcast_tp_shard_tensor(
getattr(gpt_model_module.output_layer, "weight", None) if pp_rank == pp_size - 1 else None,
"lm_head.weight",
src_pp_rank=pp_size - 1,
)
dist.barrier()
get_torch_device().empty_cache()
if torch.distributed.get_rank() == 0:
for k, v in state_dict.items():
if dtype != v.dtype:
state_dict[k] = v.to(dtype)
print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s")
return state_dict
def merge_megatron_ckpt_gptmodel_qwen_moe(
wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False
):
raise NotImplementedError("merge_megatron_ckpt_gptmodel_qwen_moe is not implemented")
def merge_megatron_ckpt_gptmodel_qwen2_5_vl(
wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False
):
raise NotImplementedError("merge_megatron_ckpt_gptmodel_qwen2_5_vl is not implemented")
def merge_megatron_ckpt_gptmodel_dpskv3(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False):
raise NotImplementedError("merge_megatron_ckpt_gptmodel_dpskv3 is not implemented")
def merge_megatron_ckpt_gptmodel_mixtral(
wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False
):
raise NotImplementedError("merge_megatron_ckpt_gptmodel_mixtral is not implemented")
|
verl__models__mcore__util.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from megatron.core import parallel_state as mpu
from megatron.core.packed_seq_params import PackedSeqParams
from verl.utils.model import CausalLMOutputForPPO
def preprocess_packed_seqs(
input_ids: torch.Tensor, attention_mask: torch.Tensor, pre_process: bool = True, use_fp8_padding=False
) -> tuple[torch.Tensor, PackedSeqParams]:
"""
Preprocess packed sequences
CP splits sequence into CP*2 chunks, and each GPU gets 2 chunks (GPU0 gets first and last chunks, GPU1
gets second and second last chunks, and so on), this is for load balancing with causal masking.
See https://github.com/NVIDIA/TransformerEngine/issues/1368
"""
batch_size = input_ids.shape[0]
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
tp_size = mpu.get_tensor_model_parallel_world_size()
cp_size = mpu.get_context_parallel_world_size()
cp_rank = mpu.get_context_parallel_rank()
align_size = tp_size * cp_size * 2 if cp_size > 1 else tp_size
if use_fp8_padding:
# if fp8 is enabled, ensure the sequence is padded to multiples of 16 for better performance
original_align_size = align_size
align_size = math.lcm(16, align_size)
pad_size = (align_size - seqlens_in_batch % align_size) % align_size
seqlens_in_batch_padded = seqlens_in_batch + pad_size
cu_seqlens = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device)
cu_seqlens[1:] = torch.cumsum(seqlens_in_batch, dim=0)
cu_seqlens_padded = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device)
cu_seqlens_padded[1:] = torch.cumsum(seqlens_in_batch_padded, dim=0)
if use_fp8_padding:
# make sure all the sequences are padded to multiples of 128 for TE compatibility
align_size_last = original_align_size * 128
pad_size_last = (align_size_last - cu_seqlens_padded[-1] % align_size_last) % align_size_last
cu_seqlens_padded[-1] += pad_size_last
seqlens_in_batch_padded[-1] += pad_size_last
# ----------------------------------------------------------------------------
# Move the index information needed in the subsequent loop to the CPU at once,
# to avoid frequent .item() calls in the loop that cause D2H synchronization
# ----------------------------------------------------------------------------
seqlens_in_batch_cpu: list[int] = seqlens_in_batch.tolist() # original valid lengths
seqlens_in_batch_padded_cpu: list[int] = seqlens_in_batch_padded.tolist() # lengths after padding
cu_seqlens_padded_cpu: list[int] = cu_seqlens_padded.tolist() # start positions (after padding)
# Pure Python int calculation to avoid further synchronization
max_seqlen_in_batch = max(seqlens_in_batch_padded_cpu)
shape = list(input_ids.shape[1:])
shape[0] = sum(seqlens_in_batch_padded_cpu) // cp_size
if pre_process:
input_ids_rmpad = torch.zeros(shape, dtype=input_ids.dtype, device=input_ids.device)
for i in range(batch_size):
# Use Python int, so no GPU→CPU sync in the loop
if cp_size <= 1:
seqlen = seqlens_in_batch_cpu[i]
start_idx = cu_seqlens_padded_cpu[i]
input_ids_rmpad[start_idx : start_idx + seqlen] = input_ids[i, attention_mask[i]]
continue
seqlen_padded_i = seqlens_in_batch_padded_cpu[i]
seqlen = seqlen_padded_i // cp_size
half_seqlen = seqlen // 2
start_idx = cu_seqlens_padded_cpu[i] // cp_size
# split to 2 chunks
d = input_ids[i, attention_mask[i]]
input_ids_rmpad[start_idx : start_idx + half_seqlen] = d[
half_seqlen * cp_rank : half_seqlen * (cp_rank + 1)
]
remain_start = seqlen_padded_i - half_seqlen * (cp_rank + 1)
remain_end = seqlen_padded_i - half_seqlen * cp_rank
remain_end = min(remain_end, d.shape[0])
remain_len = remain_end - remain_start
if remain_len > 0:
input_ids_rmpad[start_idx + half_seqlen : start_idx + half_seqlen + remain_len] = d[
remain_start:remain_end
]
packed_seq_params = PackedSeqParams(
qkv_format="thd",
cu_seqlens_q=cu_seqlens_padded,
max_seqlen_q=max_seqlen_in_batch,
cu_seqlens_kv=cu_seqlens_padded,
max_seqlen_kv=max_seqlen_in_batch,
cu_seqlens_q_padded=cu_seqlens_padded,
cu_seqlens_kv_padded=cu_seqlens_padded,
)
if pre_process:
return input_ids_rmpad.unsqueeze(0), packed_seq_params
else:
return input_ids, packed_seq_params
def postprocess_packed_seqs(
output: torch.Tensor,
packed_seq_params: PackedSeqParams,
attention_mask: torch.Tensor,
batch_size: int,
seq_len: int,
post_process: bool = True,
) -> torch.Tensor:
"""
Postprocess packed sequences
"""
if not post_process:
return output
# -------------------------------------------------------------------------
# Move the lengths and offsets needed for subsequent Python-level indexing to the CPU in advance,
# to avoid a large number of .item() calls in the loop
# -------------------------------------------------------------------------
cu_padded_cpu: list[int] = packed_seq_params.cu_seqlens_q_padded.tolist()
seq_lens_cpu: list[int] = attention_mask.sum(dim=1, dtype=torch.int32).cpu().tolist()
shape = [batch_size, seq_len] + list(output.shape[2:]) # 1,packed, dim -> batch_size, seq_len, dim
output_new = torch.zeros(shape, dtype=output.dtype, device=output.device)
cp_size = mpu.get_context_parallel_world_size()
# all gather output across context parallel group
if cp_size > 1:
# output shape: [1, packed_len, hidden_dim]
# need to gather across cp group and concatenate in sequence dimension
output_list = [torch.empty_like(output, dtype=output.dtype) for _ in range(cp_size)]
torch.distributed.all_gather(output_list, output.detach(), group=mpu.get_context_parallel_group())
output_list[mpu.get_context_parallel_rank()] = output
else:
output_list = [output]
for i in range(batch_size):
if cp_size <= 1:
s = seq_lens_cpu[i]
start_idx = cu_padded_cpu[i]
output_new[i, attention_mask[i]] = output[0][start_idx : start_idx + s]
continue
s_len_padded_chunk = (cu_padded_cpu[i + 1] - cu_padded_cpu[i]) // cp_size
half_seqlen = s_len_padded_chunk // 2
s_len = seq_lens_cpu[i]
s_len_padded = s_len_padded_chunk * cp_size
tmp = torch.empty(s_len_padded, *output.shape[2:], device=output.device, dtype=output.dtype)
for j in range(cp_size):
o = output_list[j][0]
# split to 2 chunks
packed_start_idx = cu_padded_cpu[i] // cp_size
o0, o1 = (
o[packed_start_idx : packed_start_idx + half_seqlen],
o[packed_start_idx + half_seqlen : packed_start_idx + s_len_padded_chunk],
)
tmp[j * half_seqlen : (j + 1) * half_seqlen] = o0
tmp[s_len_padded - (j + 1) * half_seqlen : s_len_padded - j * half_seqlen] = o1
output_new[i, attention_mask[i]] = tmp[:s_len]
return output_new
def preprocess_bshd(
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
position_ids: torch.Tensor,
sequence_parallel: bool = False,
pre_process: bool = True,
):
"""
Remove left padding from input_ids, attention_mask and position_ids
return new_input_ids, new_attention_mask, new_position_ids
"""
assert attention_mask.ndim == 2
assert position_ids.ndim == 2
cp_size = mpu.get_context_parallel_world_size()
assert cp_size == 1, "Context parallel size without seq_pack is not supported"
batch_size = input_ids.shape[0]
shape = list(input_ids.shape) # batch_size, seq_len,...
seq_lens = attention_mask.sum(dim=1)
seq_len = seq_lens.max().item()
if sequence_parallel:
sp_world_size = mpu.get_tensor_model_parallel_world_size()
pad_size = (sp_world_size - seq_len % sp_world_size) % sp_world_size
seq_len = seq_len + pad_size
shape[1] = seq_len
if pre_process:
new_input_ids = torch.zeros(dtype=input_ids.dtype, device=input_ids.device, size=shape)
new_attention_mask = torch.zeros(
dtype=attention_mask.dtype, device=attention_mask.device, size=(batch_size, seq_len)
)
new_position_ids = torch.zeros(dtype=position_ids.dtype, device=position_ids.device, size=(batch_size, seq_len))
for i in range(batch_size):
if pre_process:
new_input_ids[i, : seq_lens[i]] = input_ids[i, attention_mask[i]]
new_attention_mask[i, : seq_lens[i]] = attention_mask[i, attention_mask[i]]
new_position_ids[i, : seq_lens[i]] = position_ids[i, attention_mask[i]]
if pre_process:
return new_input_ids, new_attention_mask, new_position_ids
else:
return input_ids, new_attention_mask, new_position_ids
def postprocess_bshd(
result,
attention_mask: torch.Tensor,
original_attention_mask: torch.Tensor,
origin_seqlen: int,
post_process: bool = True,
):
"""
Recover left padding from result
return result
"""
if not post_process:
return result
shape = list(result.shape)
batch_size = shape[0]
shape[1] = origin_seqlen
new_result = torch.zeros(dtype=result.dtype, device=result.device, size=shape)
for i in range(batch_size):
new_result[i, original_attention_mask[i]] = result[i, attention_mask[i]]
return new_result
def postprocess_packed_seqs_for_dict_output(
labels_mask: torch.Tensor,
output: CausalLMOutputForPPO,
packed_seq_params: PackedSeqParams,
attention_mask: torch.Tensor,
batch_size: int,
seq_len: int,
post_process: bool = True,
) -> dict[str, torch.Tensor]:
"""_summary_
For fused kernels, the output is a dictionary with keys like 'log_probs', 'entropy', etc.
This function post-processes each tensor in the output dictionary.
Args:
output (CausalLMOutputForPPO): _description_
packed_seq_params (PackedSeqParams): _description_
attention_mask (torch.Tensor): _description_
batch_size (int): _description_
seq_len (int): _description_
post_process (bool, optional): _description_. Defaults to True.
Returns:
CausalLMOutputForPPO: _description_
"""
ret = {}
output.entropy = output.entropy.view(1, -1)
output.log_probs = output.log_probs.view(1, -1)
output.log_probs = output.log_probs.masked_fill(~labels_mask, 0.0)
ret["entropy"] = postprocess_packed_seqs(
output.entropy, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process
)
ret["log_probs"] = postprocess_packed_seqs(
output.log_probs, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process
)
return ret
### No padding versions for model engine
### inputs are nested tensors
def preprocess_thd_no_padding(
input_ids: torch.Tensor, pre_process: bool = True, need_roll: bool = False
) -> tuple[torch.Tensor, PackedSeqParams]:
"""
Preprocess packed sequences
CP splits sequence into CP*2 chunks, and each GPU gets 2 chunks (GPU0 gets first and last chunks, GPU1
gets second and second last chunks, and so on), this is for load balancing with causal masking.
See https://github.com/NVIDIA/TransformerEngine/issues/1368
"""
batch_size = input_ids.shape[0]
tp_size = mpu.get_tensor_model_parallel_world_size()
cp_size = mpu.get_context_parallel_world_size()
cp_rank = mpu.get_context_parallel_rank()
align_size = tp_size * cp_size * 2 if cp_size > 1 else tp_size
seqlens_in_batch = input_ids.offsets().diff()
pad_size = (align_size - seqlens_in_batch % align_size) % align_size
seqlens_in_batch_padded = seqlens_in_batch + pad_size
cu_seqlens = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device)
cu_seqlens[1:] = torch.cumsum(seqlens_in_batch, dim=0)
cu_seqlens_padded = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device)
cu_seqlens_padded[1:] = torch.cumsum(seqlens_in_batch_padded, dim=0)
# ----------------------------------------------------------------------------
# Move the index information needed in the subsequent loop to the CPU at once,
# to avoid frequent .item() calls in the loop that cause D2H synchronization
# ----------------------------------------------------------------------------
seqlens_in_batch_cpu: list[int] = seqlens_in_batch.tolist() # original valid lengths
seqlens_in_batch_padded_cpu: list[int] = seqlens_in_batch_padded.tolist() # lengths after padding
cu_seqlens_padded_cpu: list[int] = cu_seqlens_padded.tolist() # start positions (after padding)
# Pure Python int calculation to avoid further synchronization
max_seqlen_in_batch = max(seqlens_in_batch_padded_cpu)
shape = list(input_ids.shape[1:])
shape[0] = sum(seqlens_in_batch_padded_cpu) // cp_size
if pre_process:
input_ids_rmpad = torch.zeros(shape, dtype=input_ids.dtype, device=input_ids.device)
if need_roll:
saved_roll_dict = {}
for i in range(batch_size):
# Use Python int, so no GPU→CPU sync in the loop
if cp_size <= 1:
seqlen = seqlens_in_batch_cpu[i]
start_idx = cu_seqlens_padded_cpu[i]
input_ids_rmpad[start_idx : start_idx + seqlen] = input_ids[i]
continue
seqlen_padded_i = seqlens_in_batch_padded_cpu[i]
seqlen = seqlen_padded_i // cp_size
half_seqlen = seqlen // 2
start_idx = cu_seqlens_padded_cpu[i] // cp_size
# split to 2 chunks
d = input_ids[i]
input_ids_rmpad[start_idx : start_idx + half_seqlen] = d[
half_seqlen * cp_rank : half_seqlen * (cp_rank + 1)
]
remain_start = seqlen_padded_i - half_seqlen * (cp_rank + 1)
remain_end = seqlen_padded_i - half_seqlen * cp_rank
remain_end = min(remain_end, d.shape[0])
remain_len = remain_end - remain_start
if remain_len > 0:
input_ids_rmpad[start_idx + half_seqlen : start_idx + half_seqlen + remain_len] = d[
remain_start:remain_end
]
if need_roll:
# Handle roll for cp_size > 1 case
saved_roll_dict[start_idx + half_seqlen - 1] = d[(cp_rank + 1) * half_seqlen]
if remain_len > 0:
if remain_end == d.shape[0]:
saved_roll_dict[start_idx + half_seqlen + remain_len - 1] = d[0]
else:
saved_roll_dict[start_idx + half_seqlen + remain_len - 1] = d[remain_end]
if need_roll:
input_ids_rmpad = torch.roll(input_ids_rmpad, shifts=-1, dims=0)
if len(saved_roll_dict) > 0:
for k, v in saved_roll_dict.items():
input_ids_rmpad[k] = v
packed_seq_params = PackedSeqParams(
qkv_format="thd",
cu_seqlens_q=cu_seqlens_padded,
max_seqlen_q=max_seqlen_in_batch,
cu_seqlens_kv=cu_seqlens_padded,
max_seqlen_kv=max_seqlen_in_batch,
cu_seqlens_q_padded=cu_seqlens_padded,
cu_seqlens_kv_padded=cu_seqlens_padded,
)
if pre_process:
return input_ids_rmpad.unsqueeze(0), packed_seq_params
else:
return input_ids, packed_seq_params
def postprocess_thd_no_padding(
output: torch.Tensor,
packed_seq_params: PackedSeqParams,
input_ids: torch.Tensor,
batch_size: int,
post_process: bool = True,
) -> torch.Tensor:
"""
Postprocess packed sequences
"""
if not post_process:
return output
# -------------------------------------------------------------------------
# Move the lengths and offsets needed for subsequent Python-level indexing to the CPU in advance,
# to avoid a large number of .item() calls in the loop
# -------------------------------------------------------------------------
cu_padded_cpu: list[int] = packed_seq_params.cu_seqlens_q_padded.tolist()
# The reason why we use input_ids.offsets() instead of packed_seq_params.cu_seqlens_q.diff()
# is that the latter one is the padded length, while the former one is the original length.
cu_seqlens = input_ids.offsets()
seq_lens_cpu: list[int] = cu_seqlens.diff().tolist()
output_new = []
cp_size = mpu.get_context_parallel_world_size()
# all gather output across context parallel group
if cp_size > 1:
# output shape: [1, packed_len, hidden_dim]
# need to gather across cp group and concatenate in sequence dimension
output_list = [torch.empty_like(output) for _ in range(cp_size)]
torch.distributed.all_gather(output_list, output.detach(), group=mpu.get_context_parallel_group())
output_list[mpu.get_context_parallel_rank()] = output
else:
output_list = [output]
for i in range(batch_size):
if cp_size <= 1:
s = seq_lens_cpu[i]
start_idx = cu_padded_cpu[i]
output_new.append(output[0][start_idx : start_idx + s])
continue
s_len_padded_chunk = (cu_padded_cpu[i + 1] - cu_padded_cpu[i]) // cp_size
half_seqlen = s_len_padded_chunk // 2
s_len = seq_lens_cpu[i]
s_len_padded = s_len_padded_chunk * cp_size
tmp = torch.empty(s_len_padded, *output.shape[2:], device=output.device)
for j in range(cp_size):
o = output_list[j][0]
# split to 2 chunks
packed_start_idx = cu_padded_cpu[i] // cp_size
o0, o1 = (
o[packed_start_idx : packed_start_idx + half_seqlen],
o[packed_start_idx + half_seqlen : packed_start_idx + s_len_padded_chunk],
)
tmp[j * half_seqlen : (j + 1) * half_seqlen] = o0
tmp[s_len_padded - (j + 1) * half_seqlen : s_len_padded - j * half_seqlen] = o1
output_new.append(tmp[:s_len])
output_new_tensor = torch.nested.as_nested_tensor(output_new, layout=torch.jagged)
return output_new_tensor
def preprocess_bshd_no_padding(input_ids: torch.Tensor, pre_process: bool = True, need_roll: bool = False):
"""
Preprocess bshd sequences
return "input_ids, attention_mask, position_ids"
"""
cp_size = mpu.get_context_parallel_world_size()
# TODO: support context parallel size > 1
assert cp_size == 1, "Context parallel size without bshd is not supported yet"
batch_size = input_ids.shape[0]
seqlens_in_batch = input_ids.offsets().diff()
max_seqlen = seqlens_in_batch.max().item()
if mpu.get_tensor_model_parallel_world_size() > 1:
sp_world_size = mpu.get_tensor_model_parallel_world_size()
pad_size = (sp_world_size - max_seqlen % sp_world_size) % sp_world_size
max_seqlen = max_seqlen + pad_size
attention_mask = torch.zeros(batch_size, max_seqlen, dtype=torch.bool, device=input_ids.device)
input_ids_bshd = torch.zeros(batch_size, max_seqlen, dtype=input_ids.dtype, device=input_ids.device)
for i in range(batch_size):
attention_mask[i, : seqlens_in_batch[i]] = True
input_ids_bshd[i, : seqlens_in_batch[i]] = input_ids[i]
position_ids = torch.arange(max_seqlen, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids_bshd)
if need_roll:
input_ids_bshd = torch.roll(input_ids_bshd, shifts=-1, dims=1)
return input_ids_bshd, attention_mask, position_ids
def postprocess_bshd_no_padding(
output: torch.Tensor,
attention_mask: torch.Tensor,
post_process: bool = True,
) -> torch.Tensor:
"""
Postprocess bshd sequences
"""
if not post_process:
return output
batch_size = output.shape[0]
output_new = []
for i in range(batch_size):
mask = attention_mask[i].bool()
output_new.append(output[i][mask])
output_new_tensor = torch.nested.as_nested_tensor(output_new, layout=torch.jagged)
return output_new_tensor
|
verl__models__mcore__weight_converter.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# online convert mcore weight to pure huggingface weight, no any fusion
# including format conversion and name mapping
# not including resharding
import torch
from megatron.core.transformer import TransformerConfig
from transformers import PretrainedConfig
class McoreToHFWeightConverterBase:
def __init__(self, hf_config: PretrainedConfig, mcore_config: TransformerConfig):
self.hf_config = hf_config
self.mcore_config = mcore_config
def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> torch.Tensor:
raise NotImplementedError
class McoreToHFWeightConverterDense(McoreToHFWeightConverterBase):
def _convert_attention_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
# 'decoder.layers.0.self_attention.linear_proj.weight'
# 'decoder.layers.0.self_attention.linear_qkv.layer_norm_weight'
# 'decoder.layers.0.self_attention.linear_qkv.weight'
# 'decoder.layers.0.self_attention.linear_qkv.bias'
layer_number = name.split(".")[2]
convert_names = []
if "self_attention.linear_qkv.bias" in name or "self_attention.linear_qkv.weight" in name:
param_type = name.split(".")[-1]
assert param_type == "bias" or param_type == "weight"
convert_names.append(f"model.layers.{layer_number}.self_attn.q_proj.{param_type}")
convert_names.append(f"model.layers.{layer_number}.self_attn.k_proj.{param_type}")
convert_names.append(f"model.layers.{layer_number}.self_attn.v_proj.{param_type}")
assert len(params) == 3
elif "self_attention.linear_proj.weight" in name:
convert_names.append(f"model.layers.{layer_number}.self_attn.o_proj.weight")
assert len(params) == 1
elif "self_attention.linear_qkv.layer_norm_weight" in name:
convert_names.append(f"model.layers.{layer_number}.input_layernorm.weight")
assert len(params) == 1
elif "self_attention.q_layernorm.weight" in name:
convert_names.append(f"model.layers.{layer_number}.self_attn.q_norm.weight")
assert len(params) == 1
elif "self_attention.k_layernorm.weight" in name:
convert_names.append(f"model.layers.{layer_number}.self_attn.k_norm.weight")
assert len(params) == 1
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
return convert_names, params
def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
# 'decoder.layers.0.mlp.linear_fc1.layer_norm_weight'
# 'decoder.layers.0.mlp.linear_fc1.weight'
# 'decoder.layers.0.mlp.linear_fc2.weight'
layer_number = name.split(".")[2]
convert_names = []
if "mlp.linear_fc1.weight" in name:
# split gate_proj and up_proj
convert_names.append(f"model.layers.{layer_number}.mlp.gate_proj.weight")
convert_names.append(f"model.layers.{layer_number}.mlp.up_proj.weight")
assert len(params) == 2
elif "mlp.linear_fc1.layer_norm_weight" in name:
convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight")
assert len(params) == 1
elif "mlp.linear_fc2.weight" in name:
convert_names.append(f"model.layers.{layer_number}.mlp.down_proj.weight")
assert len(params) == 1
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
return convert_names, params
def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
direct_name_mapping = {
"embedding.word_embeddings.weight": "model.embed_tokens.weight",
"decoder.final_layernorm.weight": "model.norm.weight",
"output_layer.weight": "lm_head.weight",
}
if name in direct_name_mapping:
return [direct_name_mapping[name]], [params_one_group[0]]
if "self_attention" in name:
return self._convert_attention_param(name, params_one_group)
elif "mlp" in name:
return self._convert_mlp_param(name, params_one_group)
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
class McoreToHFWeightConverterQwen2Moe(McoreToHFWeightConverterDense):
def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
# 'decoder.layers.0.pre_mlp_layernorm.weight',
# 'decoder.layers.0.mlp.router.weight',
# 'decoder.layers.0.mlp.shared_experts.gate_weight',
# 'decoder.layers.0.mlp.shared_experts.linear_fc1.weight',
# 'decoder.layers.0.mlp.shared_experts.linear_fc2.weight'
# moe1
# 'decoder.layers.0.mlp.experts.linear_fc1.weight0',
# 'decoder.layers.0.mlp.experts.linear_fc1.weight1',
# 'decoder.layers.0.mlp.experts.linear_fc1.weight2',
# 'decoder.layers.0.mlp.experts.linear_fc1.weight3',
# moe2
# 'decoder.layers.0.mlp.experts.linear_fc2.weight0',
# 'decoder.layers.0.mlp.experts.linear_fc2.weight1',
layer_number = name.split(".")[2]
convert_names = []
if "pre_mlp_layernorm" in name:
convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight")
assert len(params) == 1
elif "mlp.router.weight" in name:
convert_names.append(f"model.layers.{layer_number}.mlp.gate.weight")
assert len(params) == 1
elif "shared_experts.gate_weight" in name:
convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert_gate.weight")
assert len(params) == 1
elif "shared_experts.linear_fc1.weight" in name: # split gate_proj and up_proj
convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert.gate_proj.weight")
convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert.up_proj.weight")
assert len(params) == 2
elif "shared_experts.linear_fc2.weight" in name:
convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert.down_proj.weight")
assert len(params) == 1
elif "mlp.experts.linear_fc1" in name: # split gate_proj and up_proj
expert_id = name.split("weight")[-1]
convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.gate_proj.weight")
convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.up_proj.weight")
assert len(params) == 2
elif "mlp.experts.linear_fc2" in name:
expert_id = name.split("weight")[-1]
convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.down_proj.weight")
assert len(params) == 1
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
return convert_names, params
class McoreToHFWeightConverterQwen2_5_VL(McoreToHFWeightConverterDense):
def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
direct_name_mapping = {
"language_model.embedding.word_embeddings.weight": "model.embed_tokens.weight",
"language_model.decoder.final_layernorm.weight": "model.norm.weight",
"language_model.output_layer.weight": "lm_head.weight",
"vision_model.patch_embed.proj.weight": "visual.patch_embed.proj.weight",
"vision_model.decoder.final_layernorm.weight": "visual.merger.ln_q.weight",
"vision_model.projection.encoder.linear_fc1.weight": "visual.merger.mlp.0.weight",
"vision_model.projection.encoder.linear_fc1.bias": "visual.merger.mlp.0.bias",
"vision_model.projection.encoder.linear_fc2.weight": "visual.merger.mlp.2.weight",
"vision_model.projection.encoder.linear_fc2.bias": "visual.merger.mlp.2.bias",
}
if name in direct_name_mapping:
return [direct_name_mapping[name]], [params_one_group[0]]
if "self_attention" in name:
return self._convert_attention_param(name, params_one_group)
elif "mlp" in name:
return self._convert_mlp_param(name, params_one_group)
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
def _convert_attention_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
model_type, _, _, layer_number = name.split(".")[:4]
convert_names = []
if model_type == "language_model":
name_map_after_layer = {
"self_attention.linear_qkv.bias": [
"self_attn.q_proj.bias",
"self_attn.k_proj.bias",
"self_attn.v_proj.bias",
],
"self_attention.linear_qkv.weight": [
"self_attn.q_proj.weight",
"self_attn.k_proj.weight",
"self_attn.v_proj.weight",
],
"self_attention.linear_proj.weight": "self_attn.o_proj.weight",
"self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight",
}
name_after_layer = ".".join(name.split(".")[-3:])
mapped_name = name_map_after_layer.get(name_after_layer)
if isinstance(mapped_name, list):
assert len(params) == len(mapped_name)
for one in mapped_name:
convert_names.append(f"model.layers.{layer_number}.{one}")
else:
assert len(params) == 1
convert_names.append(f"model.layers.{layer_number}.{mapped_name}")
elif model_type == "vision_model":
name_map_after_layer = {
"self_attention.linear_proj.weight": "attn.proj.weight",
"self_attention.linear_proj.bias": "attn.proj.bias",
"self_attention.linear_qkv.layer_norm_weight": "norm1.weight",
}
name_after_layer = ".".join(name.split(".")[-3:])
mapped_name = name_map_after_layer.get(name_after_layer, None)
if mapped_name is None:
assert "linear_qkv" in name_after_layer
assert len(params) == 3
new_param = torch.cat(params, dim=0)
params = [new_param]
if "bias" in name_after_layer:
convert_names.append(f"visual.blocks.{layer_number}.attn.qkv.bias")
else:
convert_names.append(f"visual.blocks.{layer_number}.attn.qkv.weight")
else:
assert len(params) == 1
convert_names.append(f"visual.blocks.{layer_number}.{mapped_name}")
else:
raise NotImplementedError(f"Unsupported model type: {model_type}")
return convert_names, params
def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
model_type, _, _, layer_number = name.split(".")[:4]
convert_names = []
if model_type == "language_model":
name_map_after_layer = {
"mlp.linear_fc1.weight": ["mlp.gate_proj.weight", "mlp.up_proj.weight"],
"mlp.linear_fc1.bias": ["mlp.gate_proj.bias", "mlp.up_proj.bias"],
"mlp.linear_fc2.weight": "mlp.down_proj.weight",
"mlp.linear_fc2.bias": "mlp.down_proj.bias",
"mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight",
}
name_after_layer = ".".join(name.split(".")[-3:])
mapped_name = name_map_after_layer.get(name_after_layer)
if isinstance(mapped_name, list):
assert len(params) == len(mapped_name)
for one in mapped_name:
convert_names.append(f"model.layers.{layer_number}.{one}")
else:
assert len(params) == 1
convert_names.append(f"model.layers.{layer_number}.{mapped_name}")
elif model_type == "vision_model":
name_map_after_layer = {
"mlp.linear_fc1.weight": ["mlp.gate_proj.weight", "mlp.up_proj.weight"],
"mlp.linear_fc1.bias": ["mlp.gate_proj.bias", "mlp.up_proj.bias"],
"mlp.linear_fc2.weight": "mlp.down_proj.weight",
"mlp.linear_fc2.bias": "mlp.down_proj.bias",
"mlp.linear_fc1.layer_norm_weight": "norm2.weight",
}
name_after_layer = ".".join(name.split(".")[-3:])
mapped_name = name_map_after_layer.get(name_after_layer)
if isinstance(mapped_name, list):
assert len(params) == len(mapped_name)
for one in mapped_name:
convert_names.append(f"visual.blocks.{layer_number}.{one}")
else:
assert len(params) == 1
convert_names.append(f"visual.blocks.{layer_number}.{mapped_name}")
else:
raise NotImplementedError(f"Unsupported model type: {model_type}")
return convert_names, params
class McoreToHFWeightConverterDpskv3(McoreToHFWeightConverterBase):
def _convert_attention_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
# mcore
# 'decoder.layers.0.input_layernorm.weight'
# 'decoder.layers.0.self_attention.linear_proj.weight'
# 'decoder.layers.0.self_attention.linear_q_proj.weight'
# 'decoder.layers.0.self_attention.linear_kv_down_proj.weight'
# 'decoder.layers.0.self_attention.linear_kv_up_proj.layer_norm_weight'
# 'decoder.layers.0.self_attention.linear_kv_up_proj.weight'
# 'decoder.layers.0.self_attention.linear_q_down_proj.weight'
# 'decoder.layers.0.self_attention.linear_q_up_proj.weight'
# 'decoder.layers.0.self_attention.linear_q_up_proj.layer_norm_weight'
# hf
# 'model.layers.0.input_layernorm.weight'
# 'model.layers.0.self_attn.o_proj.weight'
# 'model.layers.0.self_attn.q_proj.weight'
# 'model.layers.0.self_attn.kv_a_proj_with_mqa.weight'
# 'model.layers.0.self_attn.kv_a_layernorm.weight'
# 'model.layers.0.self_attn.kv_b_proj.weight'
# 'model.layers.0.self_attn.q_a_proj.weight'
# 'model.layers.0.self_attn.q_b_proj.weight'
# 'model.layers.0.self_attn.q_a_layernorm.weight'
name_map_after_layer = {
"input_layernorm.weight": "input_layernorm.weight",
"self_attention.linear_proj.weight": "self_attn.o_proj.weight",
"self_attention.linear_q_proj.weight": "self_attn.q_proj.weight",
"self_attention.linear_kv_down_proj.weight": "self_attn.kv_a_proj_with_mqa.weight",
"self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight",
"self_attention.linear_kv_up_proj.weight": "self_attn.kv_b_proj.weight",
"self_attention.linear_q_down_proj.weight": "self_attn.q_a_proj.weight",
"self_attention.linear_q_up_proj.weight": "self_attn.q_b_proj.weight",
"self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight",
}
assert len(params) == 1
convert_names = []
layer_number = name.split(".")[2]
name_after_layer = name.split(f".{layer_number}.")[1]
convert_names.append(f"model.layers.{layer_number}.{name_map_after_layer[name_after_layer]}")
return convert_names, params
def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
# mcore dense
# 'decoder.layers.0.mlp.linear_fc1.layer_norm_weight'
# 'decoder.layers.0.mlp.linear_fc2.weight'
# 'decoder.layers.0.mlp.linear_fc1.weight'
# ---
# 'decoder.layers.1.mlp.shared_experts.linear_fc1.weight'
# ---
# 'decoder.layers.1.mlp.shared_experts.linear_fc2.weight'
# hf dense
# 'model.layers.0.post_attention_layernorm.weight'
# 'model.layers.0.mlp.down_proj.weight'
# 'model.layers.0.mlp.gate_proj.weight'
# 'model.layers.0.mlp.up_proj.weight'
# 'model.layers.1.mlp.shared_experts.gate_proj.weight'
# 'model.layers.1.mlp.shared_experts.up_proj.weight'
# 'model.layers.1.mlp.shared_experts.down_proj.weight'
# mcore moe
# 'decoder.layers.1.pre_mlp_layernorm.weight'
# 'decoder.layers.1.mlp.router.weight'
# 'decoder.layers.1.mlp.router.expert_bias'
# 'decoder.layers.1.mlp.experts.linear_fc1.weight0'
# ---
# 'decoder.layers.1.mlp.experts.linear_fc2.weight0'
# hf moe
# 'model.layers.1.post_attention_layernorm.weight'
# 'model.layers.1.mlp.gate.weight'
# 'model.layers.1.mlp.gate.e_score_correction_bias'
# 'model.layers.1.mlp.experts.0.gate_proj.weight'
# 'model.layers.1.mlp.experts.0.up_proj.weight'
# 'model.layers.1.mlp.experts.0.down_proj.weight'
name_map_after_layer = {
"mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight",
"mlp.linear_fc2.weight": "mlp.down_proj.weight",
"mlp.shared_experts.linear_fc2.weight": "mlp.shared_experts.down_proj.weight",
"mlp.linear_fc1.weight": ["mlp.gate_proj.weight", "mlp.up_proj.weight"],
"mlp.shared_experts.linear_fc1.weight": [
"mlp.shared_experts.gate_proj.weight",
"mlp.shared_experts.up_proj.weight",
],
"pre_mlp_layernorm.weight": "post_attention_layernorm.weight",
"mlp.router.weight": "mlp.gate.weight",
"mlp.router.expert_bias": "mlp.gate.e_score_correction_bias",
}
convert_names = []
layer_number = name.split(".")[2]
name_after_layer = name.split(f".{layer_number}.")[1]
if name_after_layer in name_map_after_layer:
mapped_name = name_map_after_layer[name_after_layer]
if isinstance(mapped_name, list):
assert len(params) == len(mapped_name)
for one in mapped_name:
convert_names.append(f"model.layers.{layer_number}.{one}")
else:
assert len(params) == 1
convert_names.append(f"model.layers.{layer_number}.{mapped_name}")
else:
if "mlp.experts.linear_fc1.weight" in name:
expert_id = name.split("weight")[-1]
convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.gate_proj.weight")
convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.up_proj.weight")
assert len(params) == 2
elif "mlp.experts.linear_fc2.weight" in name:
expert_id = name.split("weight")[-1]
convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.down_proj.weight")
assert len(params) == 1
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
return convert_names, params
def _convert_mtp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
assert self.mcore_config.mtp_num_layers == 1, "only support one mtp layer for now"
assert self.mcore_config.num_layers == 61, "only support 61 layers for now"
direct_name_mapping = {
"mtp.layers.0.enorm.weight": "model.layers.61.enorm.weight",
"mtp.layers.0.hnorm.weight": "model.layers.61.hnorm.weight",
"mtp.layers.0.eh_proj.weight": "model.layers.61.eh_proj.weight",
"mtp.layers.0.final_layernorm.weight": "model.layers.61.shared_head.norm.weight",
}
if name in direct_name_mapping:
return [direct_name_mapping[name]], [params[0]]
assert "mtp.layers.0.transformer_layer" in name, "only support transformer layer for now"
# use proxy name to convert
proxy_name = name.replace("mtp.layers.0.transformer_layer", "decoder.layers.61")
if "self_attention" in proxy_name or "input_layernorm.weight" in proxy_name:
convert_names, params = self._convert_attention_param(proxy_name, params)
elif "mlp" in proxy_name:
convert_names, params = self._convert_mlp_param(proxy_name, params)
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
return convert_names, params
def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
direct_name_mapping = {
"embedding.word_embeddings.weight": "model.embed_tokens.weight",
"decoder.final_layernorm.weight": "model.norm.weight",
"output_layer.weight": "lm_head.weight",
}
if name in direct_name_mapping:
return [direct_name_mapping[name]], [params_one_group[0]]
if "mtp" in name:
return self._convert_mtp_param(name, params_one_group)
elif "self_attention" in name or "input_layernorm.weight" in name:
return self._convert_attention_param(name, params_one_group)
elif "mlp" in name:
return self._convert_mlp_param(name, params_one_group)
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
class McoreToHFWeightConverterMixtral(McoreToHFWeightConverterDense):
def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
# decoder.layers.0.mlp.router.weight
# decoder.layers.0.mlp.experts.linear_fc1.weight0 - weight7
# decoder.layers.0.mlp.experts.linear_fc2.weight0 - weight7
layer_number = name.split(".")[2]
convert_names = []
if "pre_mlp_layernorm" in name:
convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight")
elif "mlp.router.weight" in name:
convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.gate.weight")
elif "mlp.experts.linear_fc1.weight" in name:
expert_id = name.split("weight")[-1]
convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.experts.{expert_id}.w1.weight")
convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.experts.{expert_id}.w3.weight")
elif "mlp.experts.linear_fc2.weight" in name:
expert_id = name.split("weight")[-1]
convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.experts.{expert_id}.w2.weight")
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
return convert_names, params
class McoreToHFWeightConverterQwen3Moe(McoreToHFWeightConverterDense):
def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]:
# qwen3 moe no share expert
# 'decoder.layers.0.pre_mlp_layernorm.weight',
# 'decoder.layers.0.mlp.router.weight',
# moe1
# 'decoder.layers.0.mlp.experts.linear_fc1.weight0',
# 'decoder.layers.0.mlp.experts.linear_fc1.weight1',
# 'decoder.layers.0.mlp.experts.linear_fc1.weight2',
# 'decoder.layers.0.mlp.experts.linear_fc1.weight3',
# moe2
# 'decoder.layers.0.mlp.experts.linear_fc2.weight0',
# 'decoder.layers.0.mlp.experts.linear_fc2.weight1',
layer_number = name.split(".")[2]
convert_names = []
if "pre_mlp_layernorm" in name:
convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight")
assert len(params) == 1
elif "mlp.router.weight" in name:
convert_names.append(f"model.layers.{layer_number}.mlp.gate.weight")
assert len(params) == 1
elif "mlp.experts.linear_fc1" in name: # split gate_proj and up_proj
expert_id = name.split("weight")[-1]
convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.gate_proj.weight")
convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.up_proj.weight")
assert len(params) == 2
elif "mlp.experts.linear_fc2" in name:
expert_id = name.split("weight")[-1]
convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.down_proj.weight")
assert len(params) == 1
else:
raise NotImplementedError(f"Unsupported parameter name: {name}")
return convert_names, params
|
verl__models__qwen2__megatron__checkpoint_utils__qwen2_loader.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
import torch.distributed as dist
from verl.utils.device import get_device_id, get_torch_device
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
from megatron.core import mpu
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (
virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model
)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def load_state_dict_to_megatron_qwen2(
state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False
):
"""Load merged state_dict to sharded Megatron module in training."""
from megatron.core import DistributedDataParallel as LocalDDP
from megatron.core import mpu
from megatron.core.transformer.module import Float16Module
from torch.nn.parallel import DistributedDataParallel as torchDDP
from verl.utils.logger import print_rank_0
from verl.utils.megatron_utils import unwrap_model
start_time = time.time()
def _get_gpt_model(model):
return model
def fetch_params(module):
for param in module.parameters():
torch.distributed.fetch(
param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group()
)
dp_rank = mpu.get_data_parallel_rank()
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if torch.distributed.get_rank() == 0:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, list | tuple):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, (
f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size: "
f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}"
)
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
gpt_model_module = _get_gpt_model(models[i])
assert len(gpt_model_module.model.layers) == num_layers_per_model
def _fetch_tensor(tensor, name) -> torch.Tensor:
"""fetch tensor"""
nonlocal state_dict
if tensor is not None:
tensor = tensor.data.copy_(state_dict[name], non_blocking=True)
def _fetch_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""fetch tensor in tp shards"""
nonlocal state_dict
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
if tensor is not None:
tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True)
else:
print(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
def _fetch_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""fetch tensor in tp shards"""
nonlocal state_dict
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
if tensor is not None:
tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True)
else:
print(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
def _fetch_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor:
"""fetch gate_up tensor in tp shards"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if gate_name in state_dict and up_name in state_dict:
gate_weight = state_dict[gate_name]
up_weight = state_dict[up_name]
new_gate_up_weight = torch.empty(
config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
for i in range(tp_size):
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp]
up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp]
new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_(
torch.cat([gate_weight_tp, up_weight_tp], dim=0)
)
tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0)
if tensor is not None:
tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True)
else:
print(f"tp_shard tensor:[{gate_name}, {up_name}] not in state_dict, skip loading")
def _fetch_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor:
"""fetch tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
assert q_name in state_dict and k_name in state_dict and v_name in state_dict
full_weight_q = state_dict[q_name]
full_weight_k = state_dict[k_name]
full_weight_v = state_dict[v_name]
hidden_size_per_head = config.hidden_size // config.num_attention_heads
if config.num_key_value_heads >= tp_size:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
if not bias:
new_weight_qkv = torch.empty(
total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
else:
new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp]
k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp]
v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp]
new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0))
else:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
if not bias:
new_weight_qkv = torch.empty(
total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
else:
new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp]
start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head
end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head
k_part = full_weight_k[start_idx:end_idx]
v_part = full_weight_v[start_idx:end_idx]
new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0))
tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0)
if tensor is not None:
tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True)
# Embeddings
# -------------------
print_rank_0("loading embeddings...")
gpt_model_module = _get_gpt_model(models[0])
if pp_rank == 0:
embed_tokens_weight = gpt_model_module.model.embed_tokens.weight
_fetch_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight")
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
num_layer_per_pp = config.num_hidden_layers // pp_size
vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size()
layer_list = []
if vpp_size is not None:
for vpp_rank in range(vpp_size):
num_layer_vpp_chunk = num_layer_per_pp // vpp_size
num_layer_this_model = num_layer_vpp_chunk
offset = vpp_rank * (config.num_hidden_layers // mpu.get_virtual_pipeline_model_parallel_world_size()) + (
mpu.get_pipeline_model_parallel_rank() * num_layer_vpp_chunk
)
layer_list.extend(list(range(offset, offset + num_layer_this_model)))
else:
num_layer_this_model = num_layer_per_pp
offset = pp_rank * num_layer_per_pp
layer_list.extend(list(range(offset, offset + num_layer_this_model)))
for layer in layer_list:
print(f"{torch.distributed.get_rank()} loading layer #{layer}...")
layer_name = f"model.layers.{layer}"
dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer]
print(
f"{torch.distributed.get_rank()} offset: {offset}, num_layer_this_model: {num_layer_this_model}, "
f"layer_name: {layer_name}, layer_map[layer]: {layer_map[layer]}"
)
gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank])
sync_layer = gpt_model_module.model.layers[dst_layer_idx]
_fetch_tensor(
sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.input_layernorm.weight",
)
_fetch_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
)
_fetch_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.bias if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.bias",
f"{layer_name}.self_attn.k_proj.bias",
f"{layer_name}.self_attn.v_proj.bias",
bias=True,
)
_fetch_tp_shard_tensor(
sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.o_proj.weight",
chunk_dim=1,
)
_fetch_tensor(
sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.post_attention_layernorm.weight",
)
_fetch_tp_shard_tensor_gate_up(
sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.gate_proj.weight",
f"{layer_name}.mlp.up_proj.weight",
)
_fetch_tp_shard_tensor(
sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.down_proj.weight",
chunk_dim=1,
)
# Final Layernorm
# -------------------
print_rank_0("loading final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_fetch_tensor(
getattr(gpt_model_module.model.norm, "weight", None),
"model.norm.weight",
)
if tie_word_embeddings:
print_rank_0("tie_word_embeddings skip load lm_head")
else:
print_rank_0("loading lm_head...")
if pp_rank + 1 == pp_size:
lm_head_weight = gpt_model_module.lm_head.weight
if is_value_model:
if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1:
_fetch_tensor(lm_head_weight, "lm_head.weight")
print_rank_0("load lm_head from value_head weight")
elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1:
_fetch_tensor(lm_head_weight, "reward_head.weight")
print_rank_0("load lm_head from value_head weight")
else:
_fetch_tensor(None, "lm_head.weight")
print_rank_0("fail to match lm_head in value_model")
else:
_fetch_tp_shard_tensor(lm_head_weight, "lm_head.weight")
dist.barrier()
get_torch_device().empty_cache()
print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s")
|
verl__models__qwen2__megatron__checkpoint_utils__qwen2_loader_depracated.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
import torch.distributed as dist
from verl.utils.device import get_device_id, get_torch_device
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
from megatron.core import mpu
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (
virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model
)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def load_state_dict_to_megatron_qwen2(
state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False
):
"""Load merged state_dict to sharded Megatron module in training."""
from megatron.core import DistributedDataParallel as LocalDDP
from megatron.core import mpu
from megatron.core.transformer.module import Float16Module
from torch.nn.parallel import DistributedDataParallel as torchDDP
from verl.utils.logger import print_rank_0
from verl.utils.megatron_utils import unwrap_model
start_time = time.time()
def _get_gpt_model(model):
return model
def broadcast_params(module):
for param in module.parameters():
torch.distributed.broadcast(
param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group()
)
dp_rank = mpu.get_data_parallel_rank()
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if torch.distributed.get_rank() == 0:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, list | tuple):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, (
f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size: "
f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}"
)
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
gpt_model_module = _get_gpt_model(models[i])
assert len(gpt_model_module.model.layers) == num_layers_per_model
def _broadcast_tensor(tensor, name) -> torch.Tensor:
"""broadcast tensor from rank0 across mp_group"""
nonlocal state_dict
nonlocal mp_group
if torch.distributed.get_rank() == 0:
if name in state_dict:
weight = state_dict[name]
tensor_shape = weight.shape
else:
tensor_shape = None
else:
weight = None
tensor_shape = None
obj_list = [tensor_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
tensor_shape = obj_list[0]
if tensor_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tensor:[{name}] not in state_dict, skip load")
return
if tensor is None:
tensor = torch.empty(
tensor_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
if torch.distributed.get_rank() == 0:
tensor.data.copy_(weight)
dist.broadcast(tensor, src=0, group=mp_group)
def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
else:
assert tensor.shape == chunk_shape, (
f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}"
)
sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
else:
assert tensor.shape == chunk_shape, (
f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}"
)
sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
gate_weight = state_dict[gate_name]
up_weight = state_dict[up_name]
new_gate_up_weight = torch.empty(
config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
for i in range(tp_size):
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp]
up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp]
new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_(
torch.cat([gate_weight_tp, up_weight_tp], dim=0)
)
tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
else:
assert tensor.shape == chunk_shape, (
f"rank #{torch.distributed.get_rank() == 0:} tensor {gate_name, up_name} shape "
f"{tensor.shape} != {chunk_shape}"
)
sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
assert q_name in state_dict and k_name in state_dict and v_name in state_dict
full_weight_q = state_dict[q_name]
full_weight_k = state_dict[k_name]
full_weight_v = state_dict[v_name]
hidden_size_per_head = config.hidden_size // config.num_attention_heads
if config.num_key_value_heads >= tp_size:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
if not bias:
new_weight_qkv = torch.empty(
total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
else:
new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp]
k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp]
v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp]
new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(
torch.cat([q_part, k_part, v_part], dim=0)
)
else:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
if not bias:
new_weight_qkv = torch.empty(
total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id()
)
else:
new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp]
start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head
end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head
k_part = full_weight_k[start_idx:end_idx]
v_part = full_weight_v[start_idx:end_idx]
new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(
torch.cat([q_part, k_part, v_part], dim=0)
)
tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{q_name, k_name, v_name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=get_device_id(),
requires_grad=False,
)
else:
assert tensor.shape == chunk_shape, (
f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}"
)
sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
if dp_rank == 0:
# Embeddings
# -------------------
print_rank_0("loading embeddings...")
gpt_model_module = _get_gpt_model(models[0])
embed_tokens_weight = None
if pp_rank == 0:
embed_tokens_weight = gpt_model_module.model.embed_tokens.weight
_broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight")
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
for layer in range(config.num_hidden_layers):
print_rank_0(f"loading layer #{layer}...")
layer_name = f"model.layers.{layer}"
dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer]
gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank])
sync_layer = gpt_model_module.model.layers[dst_layer_idx]
_broadcast_tensor(
sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.input_layernorm.weight",
)
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
)
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.bias if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.bias",
f"{layer_name}.self_attn.k_proj.bias",
f"{layer_name}.self_attn.v_proj.bias",
bias=True,
)
_broadcast_tp_shard_tensor(
sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.o_proj.weight",
chunk_dim=1,
)
_broadcast_tensor(
sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.post_attention_layernorm.weight",
)
_broadcast_tp_shard_tensor_gate_up(
sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.gate_proj.weight",
f"{layer_name}.mlp.up_proj.weight",
)
_broadcast_tp_shard_tensor(
sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.down_proj.weight",
chunk_dim=1,
)
# Final Layernorm
# -------------------
print_rank_0("loading final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_broadcast_tensor(
getattr(gpt_model_module.model.norm, "weight", None),
"model.norm.weight",
)
if tie_word_embeddings:
print_rank_0("tie_word_embeddings skip load lm_head")
else:
print_rank_0("loading lm_head...")
lm_head_weight = None
if pp_rank + 1 == pp_size:
lm_head_weight = gpt_model_module.lm_head.weight
if is_value_model:
if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1:
_broadcast_tensor(lm_head_weight, "lm_head.weight")
print_rank_0("load lm_head from value_head weight")
elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1:
_broadcast_tensor(lm_head_weight, "reward_head.weight")
print_rank_0("load lm_head from value_head weight")
else:
_broadcast_tensor(None, "lm_head.weight")
print_rank_0("fail to match lm_head in value_model")
else:
_broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight")
dist.barrier()
# Broadcast weights inside data parallel groups
for wrapped_model in wrapped_models:
broadcast_params(wrapped_model)
get_torch_device().empty_cache()
print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s")
|
verl__models__qwen2__megatron__layers__parallel_attention.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional
import torch.nn.functional as F
from einops import rearrange
from transformers.utils import is_flash_attn_2_available
if is_flash_attn_2_available():
from flash_attn import flash_attn_varlen_func
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa: F401
import torch
from flash_attn.layers.rotary import apply_rotary_emb
from megatron.core import ModelParallelConfig, tensor_parallel
from megatron.core import parallel_state as mpu
from torch import nn
from transformers import Qwen2Config
from verl.models.qwen2.megatron.layers.parallel_linear import QKVParallelLinear
from verl.utils.megatron import tensor_parallel as tp_utils
class Qwen2RotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
# Build here to make `torch.jit.trace` work.
self._set_cos_sin_cache(
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
)
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
if seq_len > self.max_seq_len_cached:
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
return (
self.cos_cached[:seq_len].to(dtype=x.dtype),
self.sin_cached[:seq_len].to(dtype=x.dtype),
)
class Qwen2LinearScalingRotaryEmbedding(Qwen2RotaryEmbedding):
"""Qwen2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
self.scaling_factor = scaling_factor
super().__init__(dim, max_position_embeddings, base, device)
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
t = t / self.scaling_factor
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
class Qwen2DynamicNTKScalingRotaryEmbedding(Qwen2RotaryEmbedding):
"""Qwen2RotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
self.scaling_factor = scaling_factor
super().__init__(dim, max_position_embeddings, base, device)
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
if seq_len > self.max_position_embeddings:
base = self.base * (
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
) ** (self.dim / (self.dim - 2))
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
class ParallelQwen2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.config = config
self.megatron_config = megatron_config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
# assign values after tp
tp_size = mpu.get_tensor_model_parallel_world_size()
assert self.num_heads % tp_size == 0, (
f"num_head must be divisible by tp_size. Got num_head={self.num_heads}, tp_size={tp_size}"
)
assert self.num_key_value_heads % tp_size == 0, (
f"num_key_value_heads must be divisible by tp_size. Got num_key_value_heads="
f"{self.num_key_value_heads}, tp_size={tp_size}"
)
self.num_heads_per_tp = self.num_heads // tp_size
self.num_key_value_heads_per_tp = self.num_key_value_heads // tp_size
self.hidden_size_per_tp = self.hidden_size // tp_size
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and "
f"`num_heads`: {self.num_heads})."
)
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
assert row_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, megatron_config)
tp_utils.update_kwargs_with_config(row_kwargs, megatron_config)
# [self.q_size, self.k_size, self.v_size]
self.qkv_proj = QKVParallelLinear(
input_size=self.hidden_size,
num_heads=self.num_heads,
num_key_value_heads=self.num_key_value_heads,
head_dim=self.head_dim,
# bias=config.attention_bias,
bias=True,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
self.q_size = self.num_heads_per_tp * self.head_dim
self.k_size = self.num_key_value_heads_per_tp * self.head_dim
self.v_size = self.num_key_value_heads_per_tp * self.head_dim
self.o_proj = tensor_parallel.RowParallelLinear(
input_size=self.num_heads * self.head_dim,
output_size=self.hidden_size,
# bias=config.attention_bias,
bias=False,
input_is_parallel=True,
skip_bias_add=False,
**row_kwargs,
)
self._init_rope()
def _init_rope(self):
self.rotary_emb = Qwen2RotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
base=self.rope_theta,
)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
qkv = self.qkv_proj(hidden_states)[0]
query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1)
query_states = query_states.view(bsz, q_len, self.num_heads_per_tp, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[-2]
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attn_weights.size() != (bsz, self.num_heads_per_tp, q_len, kv_seq_len):
raise ValueError(
f"Attention weights should be of size {(bsz, self.num_heads_per_tp, q_len, kv_seq_len)}, "
f"but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights + attention_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads_per_tp, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads_per_tp, q_len, self.head_dim)}, "
f"but is {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size_per_tp)
attn_output = self.o_proj(attn_output)[0]
return attn_output
"""
Remove padding Attention
- Using Flash-attn 2
- Compatible with sequence parallel
"""
def apply_rotary_pos_emb_rmpad(q, k, cos, sin, position_ids, indices, sequence_length):
batch_size = position_ids.shape[0]
q = pad_input(q, indices, batch_size, sequence_length) # (batch_size, seqlen, num_head, head_dim)
k = pad_input(k, indices, batch_size, sequence_length)
cos = cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
sin = sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
q_embed = index_first_axis(rearrange(q_embed, "b s ... -> (b s) ..."), indices)
k_embed = index_first_axis(rearrange(k_embed, "b s ... -> (b s) ..."), indices)
return q_embed, k_embed
# use flash-attn rotary embeddings with rmpad
# cos/sin shoudl be: (seq_length, rotary_dim / 2)
def apply_rotary_pos_emb_rmpad_flash(q, k, cos, sin, cu_seqlens, max_seqlen):
q_embed = apply_rotary_emb(
q, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen
)
k_embed = apply_rotary_emb(
k, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen
)
return q_embed, k_embed
class ParallelQwen2AttentionRmPad(ParallelQwen2Attention):
def forward(
self,
hidden_states: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: torch.Tensor = None,
max_seqlen_in_batch: int = None,
):
total_nnz, _, _ = hidden_states.size() # This is the total_nnz padded after sequence parallel
if self.megatron_config.sequence_parallel:
total_nnz = total_nnz * mpu.get_tensor_model_parallel_world_size()
qkv = self.qkv_proj(hidden_states)[0]
query_states, key_states, value_states = qkv.split(
[self.q_size, self.k_size, self.v_size], dim=-1
) # (total_nnz, 1, hidden_size)
if self.megatron_config.sequence_parallel:
sequence_parallel_pad = total_nnz - cu_seqlens[-1]
total_nnz = cu_seqlens[-1] # total_nnz before sp padding
query_states = query_states[:total_nnz]
key_states = key_states[:total_nnz]
value_states = value_states[:total_nnz]
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dime x hidden_dim
# therefore we just need to keep the original shape
query_states = query_states.view(total_nnz, self.num_heads_per_tp, self.head_dim)
key_states = key_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim)
value_states = value_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim)
cos, sin = self.rotary_emb(value_states, seq_len=sequence_length)
cos, sin = cos[:, : cos.shape[1] // 2], sin[:, : sin.shape[1] // 2] # flash attn only needs half
query_states, key_states = apply_rotary_pos_emb_rmpad_flash(
query_states, key_states, cos, sin, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen_in_batch
)
# query_states, key_states = apply_rotary_pos_emb_rmpad(query_states, key_states, cos, sin,
# position_ids, indices,
# It is recommended to use dropout with FA according to the docs
# when training.
dropout_rate = 0.0 # if not self.training else self.attn_dropout
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in float16 just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (Qwen2RMSNorm handles it correctly)
input_dtype = query_states.dtype
if input_dtype == torch.float32:
query_states = query_states.to(torch.float16)
key_states = key_states.to(torch.float16)
value_states = value_states.to(torch.float16)
attn_output_unpad = flash_attn_varlen_func(
query_states,
key_states,
value_states,
cu_seqlens_q=cu_seqlens,
cu_seqlens_k=cu_seqlens,
max_seqlen_q=max_seqlen_in_batch,
max_seqlen_k=max_seqlen_in_batch,
dropout_p=dropout_rate,
softmax_scale=None,
causal=True,
)
attn_output_unpad = attn_output_unpad.to(input_dtype)
attn_output_unpad = attn_output_unpad.reshape(total_nnz, 1, self.hidden_size_per_tp).contiguous()
# sequence parallel reduce_scatter is performed inside RowColumnParallel if enabled
# Here we need to repad
if self.megatron_config.sequence_parallel:
attn_output_unpad = F.pad(attn_output_unpad, pad=(0, 0, 0, 0, 0, sequence_parallel_pad))
attn_output_unpad = self.o_proj(attn_output_unpad)[0]
return attn_output_unpad
|
verl__models__qwen2__megatron__layers__parallel_mlp.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from megatron.core import ModelParallelConfig, tensor_parallel
from megatron.core import parallel_state as mpu
from torch import nn
from transformers.activations import ACT2FN
from verl.models.qwen2.megatron.layers.parallel_linear import MergedColumnParallelLinear
from verl.utils.megatron import tensor_parallel as tp_utils
class ParallelQwen2MLP(nn.Module):
def __init__(self, config, megatron_config: ModelParallelConfig = None) -> None:
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
# The weight is only [hidden_size, intermediate_size // model_parallel_world_size]
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
assert row_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(row_kwargs, megatron_config)
tp_utils.update_kwargs_with_config(column_kwargs, megatron_config)
tp_size = mpu.get_tensor_model_parallel_world_size()
self.gate_up_proj = MergedColumnParallelLinear(
input_size=self.hidden_size,
gate_ouput_size=self.intermediate_size,
up_output_size=self.intermediate_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
self.gate_size = self.intermediate_size // tp_size
self.down_proj = tensor_parallel.RowParallelLinear(
input_size=self.intermediate_size,
output_size=self.hidden_size,
bias=False,
input_is_parallel=True,
skip_bias_add=False,
**row_kwargs,
)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
gate_up = self.gate_up_proj(x)[0]
gate, up = gate_up.split(self.gate_size, dim=-1)
return self.down_proj(self.act_fn(gate) * up)[0]
|
verl__models__qwen2__megatron__modeling_qwen2_megatron.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Qwen2 model."""
from typing import Optional
import torch
import torch.utils.checkpoint
from megatron.core import ModelParallelConfig, mpu, parallel_state, tensor_parallel
from torch import nn
from transformers.modeling_outputs import BaseModelOutputWithPast
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
from transformers.models.qwen2.modeling_qwen2 import CausalLMOutputWithPast
from verl.utils.device import get_device_name
from verl.utils.megatron import sequence_parallel as sp_utils
from verl.utils.megatron import tensor_parallel as tp_utils
from verl.utils.megatron_utils import TransformerConfig, convert_config
from .layers import ParallelQwen2DecoderLayer, ParallelQwen2DecoderLayerRmPad, ParallelQwen2RMSNorm
"""
TODO:
1. Add weight initialization. Here we need to be careful on TP weight init.
2. Add sequence parallel
3. Load checkpoint from Qwen2 pretrained checkpoint
"""
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class ParallelQwen2Model(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
Args:
config: Qwen2Config
"""
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
if megatron_config is not None:
assert embedding_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(embedding_kwargs, megatron_config)
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(
num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs
)
self.layers = nn.ModuleList(
[ParallelQwen2DecoderLayer(config, megatron_config) for _ in range(config.num_hidden_layers)]
)
self.norm = ParallelQwen2RMSNorm(config, megatron_config)
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | BaseModelOutputWithPast:
"""
Args:
input_ids: input ids. shape (batch_size, seq_length)
attention_mask: attention_mask. shape (batch_size, seq_length)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
batch_size, seq_length = input_ids.shape
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelQwen2ForCausalLM(nn.Module):
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.model = ParallelQwen2Model(config, megatron_config=megatron_config)
self.vocab_size = config.vocab_size
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(
input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
)
hidden_states = outputs
logits = self.lm_head(hidden_states)[0]
logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits)
logits = logits.float()
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa: F401, E402
class ParallelQwen2ModelRmPad(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
Args:
config: Qwen2Config
"""
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
self.megatron_config = megatron_config
if megatron_config is not None:
assert embedding_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(
num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs
)
self.layers = nn.ModuleList(
[ParallelQwen2DecoderLayerRmPad(config, megatron_config) for _ in range(config.num_hidden_layers)]
)
self.norm = ParallelQwen2RMSNorm(config, megatron_config)
def forward(
self,
input_ids: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None,
) -> tuple | BaseModelOutputWithPast:
"""
Args:
input_ids: input ids. shape (1, totol_nnz)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size)
# (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size)
inputs_embeds = inputs_embeds.transpose(0, 1)
if self.megatron_config.sequence_parallel:
inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(
hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelQwen2ForCausalLMRmPad(nn.Module):
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.megatron_config = megatron_config
self.model = ParallelQwen2ModelRmPad(config, megatron_config=megatron_config)
self.vocab_size = config.vocab_size
self._init_head(config)
def _init_head(self, config: Qwen2Config):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(
input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
def _forward_head(self, hidden_states):
# all_gather from sequence parallel region is performed inside lm_head
logits = self.lm_head(hidden_states)[0]
logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp)
logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) # (total_nnz_padded, 1, vocab_size)
return logits
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
batch_size, sequence_length = input_ids.shape
# remove padding here
input_ids, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input(
input_ids.unsqueeze(dim=-1), attention_mask
) # (total_nnz, 1)
# pad input_ids to multiple of tp for all tp ranks
# TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap
if self.megatron_config.sequence_parallel:
input_ids = sp_utils.pad_to_sequence_parallel(input_ids)
input_ids = input_ids.transpose(0, 1) # (1, total_nnz+pad)
outputs = self.model(
input_ids=input_ids,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch,
)
hidden_states = outputs
logits = self._forward_head(hidden_states)
# remove padding from sequence parallel
if self.megatron_config.sequence_parallel:
totol_nnz = cu_seqlens[-1]
logits = logits[:totol_nnz] # (total_nnz_padded)
logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension
# add removed padding back
logits = pad_input(
logits, indices, batch_size, seqlen=sequence_length
) # (batch_size, sequence_length, vocab_size)
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
class ParallelQwen2ForValueRmPad(ParallelQwen2ForCausalLMRmPad):
def _init_head(self, config):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False)
# lm_head is effectively the same as sequence parallel
sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight)
def _forward_head(self, hidden_states):
logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1)
logits = logits.float()
if self.megatron_config.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
output = super().forward(input_ids, attention_mask, position_ids)
output.logits = torch.squeeze(output.logits, dim=-1)
return output
"""
Support pipeline parallelism
"""
class ParallelQwen2ModelRmPadPP(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
This model definition supports pipeline parallelism. To support pp and vpp,
- This model only contains layer in this pp stage and vpp chunk
- When calling get_model in Megatron, this rank will instantiate all the vpp chunks in this pp.
Args:
config: Qwen2Config
"""
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig, pre_process, post_process):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.pre_process = pre_process
self.post_process = post_process
self.megatron_config = megatron_config
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
if megatron_config is not None:
assert embedding_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
if pre_process:
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(
num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs
)
else:
self.embed_tokens = None
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = megatron_config.pipeline_model_parallel_size
self.num_layer_per_pp = config.num_hidden_layers // pp_size
vpp_size = megatron_config.virtual_pipeline_model_parallel_size
vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank()
if vpp_size is not None:
self.num_layer_vpp_chunk = self.num_layer_per_pp // vpp_size
self.num_layer_this_model = self.num_layer_vpp_chunk
offset = vpp_rank * (config.num_hidden_layers // vpp_size) + (pp_rank * self.num_layer_vpp_chunk)
else:
self.num_layer_this_model = self.num_layer_per_pp
offset = pp_rank * self.num_layer_per_pp
self.layers = nn.ModuleList()
for i in range(self.num_layer_this_model):
layer = ParallelQwen2DecoderLayerRmPad(config, megatron_config, layer_idx=i + offset)
self.layers.add_module(f"{i}", layer)
if post_process:
self.norm = ParallelQwen2RMSNorm(config, megatron_config)
else:
self.norm = None
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(
self,
input_ids: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None,
) -> tuple | BaseModelOutputWithPast:
"""
Args:
input_ids: input ids. shape (1, totol_nnz)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
if self.pre_process:
inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size)
# vocab parallel embedding will not do sequence parallel reduce-scatter in open source megatron
# so need to deal with it by handle here:
# (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size)
inputs_embeds = inputs_embeds.transpose(0, 1)
if self.megatron_config.sequence_parallel:
inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds)
hidden_states = inputs_embeds
else:
# self.hidden_states should be passed by Megatron
hidden_states = self.input_tensor
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(
hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch,
)
hidden_states = layer_outputs
if self.post_process:
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelQwen2ForCausalLMRmPadPP(nn.Module):
def __init__(
self,
config: Qwen2Config,
megatron_config: ModelParallelConfig,
pre_process,
post_process,
share_embeddings_and_output_weights,
):
super().__init__()
self.config: TransformerConfig = convert_config(config, megatron_config)
self.megatron_config = megatron_config
self.model = ParallelQwen2ModelRmPadPP(
config, megatron_config=megatron_config, pre_process=pre_process, post_process=post_process
)
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
self.vocab_size = config.vocab_size
self.pre_process = pre_process
self.post_process = post_process
if post_process:
self._init_head(config)
if pre_process or post_process:
self.setup_embeddings_and_output_layer()
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
assert len(input_tensor) == 1
self.model.set_input_tensor(input_tensor[0])
def _init_head(self, config):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(
input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
skip_weight_param_allocation=self.pre_process and self.share_embeddings_and_output_weights,
**column_kwargs,
)
def setup_embeddings_and_output_layer(self) -> None:
"""Sets up embedding layer in first stage and output layer in last stage.
This function initializes word embeddings in the final stage when we are
using pipeline parallelism and sharing word embeddings, and sets up param
attributes on the embedding and output layers.
"""
# Set `is_embedding_or_output_parameter` attribute.
if self.pre_process:
self.model.embed_tokens.weight.is_embedding_or_output_parameter = True
if self.post_process and self.lm_head.weight is not None:
self.lm_head.weight.is_embedding_or_output_parameter = True
if not self.share_embeddings_and_output_weights:
return
if parallel_state.get_pipeline_model_parallel_world_size() == 1:
# Zero out wgrad if sharing embeddings between two layers on same
# pipeline stage to make sure grad accumulation into main_grad is
# correct and does not include garbage values (e.g., from torch.empty).
self.shared_embedding_or_output_weight().zero_out_wgrad = True
return
if parallel_state.is_pipeline_first_stage() and self.pre_process and not self.post_process:
self.shared_embedding_or_output_weight().shared_embedding = True
if self.post_process and not self.pre_process:
assert not parallel_state.is_pipeline_first_stage()
# set word_embeddings weights to 0 here, then copy first
# stage's weights using all_reduce below.
self.lm_head.weight.data.fill_(0)
self.lm_head.weight.shared = True
self.lm_head.weight.shared_embedding = True
if torch.distributed.is_initialized() and parallel_state.is_rank_in_embedding_group():
weight = self.shared_embedding_or_output_weight()
weight.data = weight.data.to(get_device_name())
torch.distributed.all_reduce(weight.data, group=parallel_state.get_embedding_group())
def shared_embedding_or_output_weight(self) -> torch.Tensor:
if self.pre_process:
return self.model.embed_tokens.weight
elif self.post_process:
return self.lm_head.weight
return None
def _forward_head(self, hidden_states):
# all_gather from sequence parallel region is performed inside lm_head
# print(f'logits shape before forward_head: {hidden_states.shape}, vocab_size = '
# f'{self.config.vocab_size}') # [4, 32, 4096]
output_weight = None
if self.share_embeddings_and_output_weights:
output_weight = self.shared_embedding_or_output_weight()
logits = self.lm_head(hidden_states, weight=output_weight)[0]
# print(f'logits shape after forward_head: {logits.shape}') # [8, 32, 8]
logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp)
return logits
def forward(
self,
# original input
*,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
# Note that input_ids, attention_mask and position_ids should be passed to every pp layer.
# In the first pp, input_ids will be used, in other pp layers hidden_states will be used inside self.model
batch_size, sequence_length = input_ids.shape
# remove padding here
input_ids_rmpad, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input(
input_ids.unsqueeze(dim=-1), attention_mask
) # (total_nnz, 1)
# pad input_ids to multiple of tp for all tp ranks
# TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap
if self.megatron_config.sequence_parallel:
input_ids_rmpad = sp_utils.pad_to_sequence_parallel(input_ids_rmpad)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz+pad)
outputs = self.model(
input_ids=input_ids_rmpad,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch,
)
if self.post_process:
hidden_states = outputs
logits = self._forward_head(hidden_states)
logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # torch.Size([8, 32, 16])
# remove padding from sequence parallel
if self.megatron_config.sequence_parallel:
totol_nnz = cu_seqlens[-1]
logits = logits[:totol_nnz] # (total_nnz_padded)
# add removed padding back. If input is already rmpad, we let the caller pad_input
logits = pad_input(
logits, indices, batch_size, seqlen=sequence_length
) # (batch_size, sequence_length, vocab_size)
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
else:
return outputs
class ParallelQwen2ForValueRmPadPP(ParallelQwen2ForCausalLMRmPadPP):
def _init_head(self, config):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get("config", False), "must have ModelParallelConfig"
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False)
# lm_head is effectively the same as sequence parallel
sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight)
def _forward_head(self, hidden_states):
logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1)
logits = logits.float()
if self.megatron_config.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits
def forward(
self,
*,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple | CausalLMOutputWithPast:
output = super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
if self.post_process:
output.logits = torch.squeeze(output.logits, dim=-1)
return output
else:
return output
|
verl__models__registry.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from typing import Optional
import torch.nn as nn
# Supported models in Megatron-LM
# Architecture -> (module, class).
_MODELS = {
"LlamaForCausalLM": (
"llama",
("ParallelLlamaForCausalLMRmPadPP", "ParallelLlamaForValueRmPadPP", "ParallelLlamaForCausalLMRmPad"),
),
"Qwen2ForCausalLM": (
"qwen2",
("ParallelQwen2ForCausalLMRmPadPP", "ParallelQwen2ForValueRmPadPP", "ParallelQwen2ForCausalLMRmPad"),
),
"MistralForCausalLM": (
"mistral",
("ParallelMistralForCausalLMRmPadPP", "ParallelMistralForValueRmPadPP", "ParallelMistralForCausalLMRmPad"),
),
"ApertusForCausalLM": (
"apertus",
("ParallelApertusForCausalLMRmPadPP", "ParallelApertusForValueRmPadPP", "ParallelApertusForCausalLMRmPad"),
),
}
# return model class
class ModelRegistry:
@staticmethod
def load_model_cls(model_arch: str, value=False) -> Optional[type[nn.Module]]:
if model_arch not in _MODELS:
return None
megatron = "megatron"
module_name, model_cls_name = _MODELS[model_arch]
if not value: # actor/ref
model_cls_name = model_cls_name[0]
elif value: # critic/rm
model_cls_name = model_cls_name[1]
module = importlib.import_module(f"verl.models.{module_name}.{megatron}.modeling_{module_name}_megatron")
return getattr(module, model_cls_name, None)
@staticmethod
def get_supported_archs() -> list[str]:
return list(_MODELS.keys())
|
verl__models__transformers__dense_common.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Union
import torch
from transformers.cache_utils import Cache
from transformers.modeling_outputs import CausalLMOutputWithPast
@dataclass
class CausalLMOutputForPPO(CausalLMOutputWithPast):
log_probs: Optional[torch.FloatTensor] = None
entropy: Optional[torch.FloatTensor] = None
def forward_base_model(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> CausalLMOutputWithPast:
r"""
Copy paste LLaMa's forward
https://github.com/linkedin/Liger-Kernel/blob/main/src/liger_kernel/transformers/model/llama.py
This function should be generic enough for all pure text models.
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
return outputs
def forward_with_torch_backend(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union["Cache", list[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: int | torch.Tensor = 0,
temperature: float = 1.0,
**loss_kwargs,
) -> tuple | CausalLMOutputForPPO:
from verl.utils.experimental.torch_functional import FusedLinearForPPO
outputs = forward_base_model(
self,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
)
hidden_states = outputs[0]
if not return_dict:
raise NotImplementedError("forward_with_torch_backend has to return_dict")
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.")
fused_linear_for_ppo = FusedLinearForPPO()
log_probs, entropy = fused_linear_for_ppo.forward(
hidden_states=hidden_states,
vocab_weights=self.lm_head.weight,
input_ids=rolled_labels,
temperature=temperature,
)
return CausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def forward_with_triton_backend(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union["Cache", list[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: int | torch.Tensor = 0,
temperature: float = 1.0,
**loss_kwargs,
) -> tuple | CausalLMOutputForPPO:
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
outputs = forward_base_model(
self,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0]
if not return_dict:
raise NotImplementedError("forward_with_triton_backend has to return_dict")
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.")
log_probs, entropy = linear_cross_entropy(
hidden_states,
self.lm_head.weight,
rolled_labels,
temperature,
"none",
)
return CausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
verl__models__transformers__glm4v.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import itertools
import logging
import os
from dataclasses import dataclass
from typing import Optional
import torch
import torch.distributed as dist
from transformers.modeling_flash_attention_utils import _flash_attention_forward, fa_peft_integration_check
from transformers.models.glm4v.modeling_glm4v import (
Glm4vCausalLMOutputWithPast,
Glm4vForConditionalGeneration,
Glm4vTextAttention,
)
from transformers.utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10
from verl.utils.device import is_npu_available
from verl.utils.ulysses import (
gather_heads_scatter_seq,
gather_seq_scatter_heads,
get_ulysses_sequence_parallel_group,
get_ulysses_sequence_parallel_world_size,
validate_ulysses_config,
)
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
if is_flash_attn_2_available():
from flash_attn import flash_attn_func, flash_attn_varlen_func
_flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters
_flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters
_flash_use_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
if is_npu_available:
from transformers.integrations.npu_flash_attention import npu_flash_attn_func as flash_attn_func
from transformers.integrations.npu_flash_attention import npu_flash_attn_varlen_func as flash_attn_varlen_func
from transformers.modeling_flash_attention_utils import flash_attn_supports_top_left_mask
_flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters
_flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters
_flash_use_top_left_mask = flash_attn_supports_top_left_mask()
_flash_deterministic_enabled = os.getenv("FLASH_ATTENTION_DETERMINISTIC", "0") == "1"
def get_rope_index(
processor,
input_ids: torch.Tensor,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Gets the position ids for GLM4V in padding-free format.
The batch dim has been removed and the input_ids should be a 1D tensor representing a single example.
"""
spatial_merge_size = processor.image_processor.merge_size
image_token_id = processor.tokenizer.convert_tokens_to_ids("<|image|>")
video_start_token_id = processor.tokenizer.convert_tokens_to_ids("<|begin_of_video|>")
video_end_token_id = processor.tokenizer.convert_tokens_to_ids("<|end_of_video|>")
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
position_ids = torch.ones(3, input_ids.size(0), dtype=input_ids.dtype, device=input_ids.device) # (3, seqlen)
image_index, video_index = 0, 0
video_group_index = 0
input_ids_filtered = input_ids[attention_mask == 1]
input_tokens = input_ids_filtered.tolist()
input_token_type = []
video_check_flg = False
for token in input_tokens:
if token == video_start_token_id:
video_check_flg = True
elif token == video_end_token_id:
video_check_flg = False
if token == image_token_id and not video_check_flg:
input_token_type.append("image")
elif token == image_token_id and video_check_flg:
input_token_type.append("video")
else:
input_token_type.append("text")
input_type_group = []
for key, group in itertools.groupby(enumerate(input_token_type), lambda x: x[1]):
group = list(group)
start_index = group[0][0]
end_index = group[-1][0] + 1
input_type_group.append((key, start_index, end_index))
llm_pos_ids_list = []
video_frame_num = 1
for modality_type, start_idx, end_idx in input_type_group:
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
if modality_type == "image":
t, h, w = (
image_grid_thw[image_index][0],
image_grid_thw[image_index][1],
image_grid_thw[image_index][2],
)
llm_grid_t, llm_grid_h, llm_grid_w = (
t.item(),
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx)
image_index += 1
video_frame_num = 1
elif modality_type == "video":
t, h, w = (
video_frame_num,
video_grid_thw[video_index][1],
video_grid_thw[video_index][2],
)
llm_grid_t, llm_grid_h, llm_grid_w = (
t,
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
for t_idx in range(llm_grid_t):
t_index = torch.tensor(t_idx).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(1, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(1, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx)
video_group_index += 1
if video_group_index >= video_grid_thw[video_index][0]:
video_index += 1
video_group_index = 0
video_frame_num += 1
else:
text_len = end_idx - start_idx
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
video_frame_num = 1
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device)
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1).to(input_ids.device)
else:
position_ids = torch.arange(input_ids.shape[0], device=input_ids.device).view(1, -1).expand(3, -1)
return position_ids
def prepare_fa2_from_position_ids(
query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, position_ids: torch.Tensor
):
assert position_ids.ndim == 2 # (batch_size, seq_length)
query = query.contiguous().view(-1, query.size(-2), query.size(-1))
key = key.contiguous().view(-1, key.size(-2), key.size(-1))
value = value.contiguous().view(-1, value.size(-2), value.size(-1))
position_ids = position_ids.view(-1)
cu_seqlens = torch.cat(
(
(position_ids == 0).nonzero().view(-1).to(torch.int32),
torch.tensor(position_ids.size(), device=position_ids.device, dtype=torch.int32),
)
)
max_length = cu_seqlens.diff().max() # use cu_seqlens to infer max_length for qwen2vl mrope
return (query, key, value, (cu_seqlens, cu_seqlens), (max_length, max_length))
def _custom_flash_attention_forward(
query_states: torch.Tensor,
key_states: torch.Tensor,
value_states: torch.Tensor,
attention_mask: Optional[torch.Tensor],
query_length: int,
is_causal: bool = True,
position_ids: Optional[torch.Tensor] = None,
use_top_left_mask: bool = False,
deterministic: Optional[bool] = None,
**kwargs,
):
"""
Patches flash attention forward to handle 3D position ids in mrope. (3, batch_size, seq_length)
"""
# Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length).
flash_kwargs = {}
if _flash_supports_deterministic:
flash_kwargs["deterministic"] = deterministic if deterministic is not None else _flash_deterministic_enabled
if kwargs.get("softcap") is not None:
flash_kwargs["softcap"] = kwargs.pop("softcap")
query_states, key_states, value_states = fa_peft_integration_check(
query_states, key_states, value_states, target_dtype=torch.bfloat16
)
if position_ids is not None:
assert position_ids.ndim == 2 # (batch_size, seq_length / sp_size)
sp_size = get_ulysses_sequence_parallel_world_size()
if sp_size > 1:
# qkv: (batch_size, seq_length / sp_size, num_head, head_size)
validate_ulysses_config(query_states.size(2), sp_size)
query_states = gather_seq_scatter_heads(query_states, seq_dim=1, head_dim=2)
key_states = gather_seq_scatter_heads(key_states, seq_dim=1, head_dim=2)
value_states = gather_seq_scatter_heads(value_states, seq_dim=1, head_dim=2)
position_ids_lst = [torch.empty_like(position_ids) for _ in range(sp_size)]
position_ids = dist.all_gather(position_ids_lst, position_ids, group=get_ulysses_sequence_parallel_group())
position_ids = torch.cat(position_ids_lst, dim=-1) # (batch_size, seq_length)
if position_ids is not None and query_length != 1 and not (torch.diff(position_ids, dim=-1) >= 0).all():
batch_size = query_states.size(0)
q, k, v, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) = prepare_fa2_from_position_ids(
query_states, key_states, value_states, position_ids
)
attn_output = flash_attn_varlen_func(
q=q,
k=k,
v=v,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
dropout_p=kwargs.pop("dropout", 0.0),
softmax_scale=kwargs.pop("softmax_scale", None),
causal=is_causal,
**flash_kwargs,
)
attn_output = attn_output.view(batch_size, -1, attn_output.size(-2), attn_output.size(-1))
else:
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
query_length,
is_causal=is_causal,
use_top_left_mask=use_top_left_mask,
deterministic=deterministic,
**kwargs,
) # do not pass position_ids to old flash_attention_forward
if sp_size > 1:
# (batch_size, seq_length, num_head, head_size)
attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1)
return attn_output
def glm4v_attn_forward(
self: "Glm4vTextAttention",
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
**kwargs,
) -> tuple[torch.Tensor, None, None]:
from transformers.models.glm4v.modeling_glm4v import apply_multimodal_rotary_pos_emb, repeat_kv
bsz, q_len, _ = hidden_states.size() # q_len = seq_length / sp_size
query_states = self.q_proj(hidden_states) # (batch_size, seq_length / sp_size, num_heads * head_size)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
# Because the input can be padded, the absolute sequence length depends on the max position id.
cos, sin = position_embeddings
query_states, key_states = apply_multimodal_rotary_pos_emb(
query_states, key_states, cos, sin, self.rope_scaling["mrope_section"]
)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
dropout_rate = 0.0 if not self.training else self.attention_dropout
# This is before the transpose
q_len = query_states.shape[2]
# FA2 uses non-transposed inputs
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
attn_output = _custom_flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
query_length=q_len,
is_causal=getattr(self, "is_causal", True),
dropout=dropout_rate,
use_top_left_mask=_flash_use_top_left_mask,
position_ids=position_ids, # important: pass position ids
) # (batch_size, seq_length / sp_size, num_head, head_size)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, None
def _get_input_embeds(
model: "Glm4vForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
):
inputs_embeds = model.get_input_embeddings()(input_ids)
if pixel_values is not None:
pixel_values = pixel_values.type(model.visual.dtype)
image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
n_image_tokens = (input_ids == model.config.image_token_id).sum().item()
n_image_features = image_embeds.shape[0]
if n_image_tokens != n_image_features:
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
mask = input_ids == model.config.image_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
image_mask = mask_expanded.to(inputs_embeds.device)
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
pixel_values_videos = pixel_values_videos.type(model.visual.dtype)
video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw)
n_video_tokens = (input_ids == model.config.video_token_id).sum().item()
n_video_features = video_embeds.shape[0]
if n_video_tokens != n_video_features:
raise ValueError(
f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
)
mask = input_ids == model.config.video_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
video_mask = mask_expanded.to(inputs_embeds.device)
video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
if pixel_values is None and pixel_values_videos is None: # handle mixed text-image data
pixel_values = torch.zeros((16, 1176), dtype=inputs_embeds.dtype, device=inputs_embeds.device)
image_grid_thw = torch.tensor([[1, 4, 4]], dtype=torch.long, device=inputs_embeds.device)
image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
inputs_embeds += 0.0 * image_embeds.mean()
if attention_mask is not None:
attention_mask = attention_mask.to(inputs_embeds.device)
return inputs_embeds, attention_mask
def process_position_ids(position_ids: torch.Tensor) -> torch.Tensor:
if position_ids.ndim != 3 or position_ids.size(0) != 4:
# we concat the text position ids with the 3D vision position ids by default
# see https://github.com/huggingface/transformers/pull/39447
raise ValueError("position_ids should be a 3D tensor of shape (4, batch_size, seq_length).")
return position_ids
@dataclass
class Glm4vCausalLMOutputForPPO(Glm4vCausalLMOutputWithPast):
log_probs: Optional[torch.FloatTensor] = None
entropy: Optional[torch.FloatTensor] = None
def glm4v_base_forward(
self: "Glm4vForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
**kwargs,
):
kwargs["inputs_embeds"], kwargs["attention_mask"] = _get_input_embeds(
self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw
) # avoid lora module having multiple keyword arguments
return self.language_model(
input_ids=None,
**kwargs,
)
def glm4v_forward(
self: "Glm4vForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
**kwargs,
):
return self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=process_position_ids(position_ids),
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
**kwargs,
)
def forward_with_normal_backend(
self: Glm4vForConditionalGeneration,
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> "Glm4vCausalLMOutputWithPast":
outputs = glm4v_forward(self, input_ids, **kwargs)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
return Glm4vCausalLMOutputWithPast(
logits=logits,
hidden_states=outputs.hidden_states,
)
def forward_with_torch_backend(
self: Glm4vForConditionalGeneration,
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> tuple | Glm4vCausalLMOutputForPPO:
from verl.utils.experimental.torch_functional import FusedLinearForPPO
outputs = glm4v_forward(self, input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.")
fused_linear_for_ppo = FusedLinearForPPO()
log_probs, entropy = fused_linear_for_ppo.forward(
hidden_states=hidden_states,
vocab_weights=self.lm_head.weight,
input_ids=rolled_labels,
temperature=temperature,
)
return Glm4vCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
def forward_with_triton_backend(
self: Glm4vForConditionalGeneration,
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> tuple | Glm4vCausalLMOutputForPPO:
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
outputs = glm4v_forward(self, input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.")
log_probs, entropy = linear_cross_entropy(
hidden_states,
self.lm_head.weight,
rolled_labels,
temperature,
"none",
)
return Glm4vCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
|
verl__models__transformers__kimi_vl.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import torch.nn.functional as F
from transformers.cache_utils import Cache
from transformers.modeling_flash_attention_utils import _flash_attention_forward
from verl.models.transformers.monkey_patch import is_transformers_version_in_range
# Import compatibility wrapper for flash_attn_supports_top_left_mask
from verl.utils.transformers_compat import flash_attn_supports_top_left_mask
from verl.utils.ulysses import (
gather_heads_scatter_seq,
gather_seq_scatter_heads,
get_ulysses_sequence_parallel_world_size,
validate_ulysses_config,
)
# Copied from transformers.models.llama.modeling_llama.rotate_half
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`):
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
used to pass offsetted position ids when working with a KV-cache.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
b, h, s, d = q.shape
q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
b, h, s, d = k.shape
k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
# Copied from transformers.models.llama.modeling_llama.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def _ulysses_flash_attn_forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
if self.q_lora_rank is None:
q = self.q_proj(hidden_states)
else:
q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2)
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dim x hidden_dim
# therefore we just need to keep the original shape
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
compressed_kv, k_pe = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2)
kv = (
self.kv_b_proj(self.kv_a_layernorm(compressed_kv))
.view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
.transpose(1, 2)
)
k_nope, value_states = torch.split(kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
# patch
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
validate_ulysses_config(self.num_heads, ulysses_sp_size)
num_key_value_groups = self.config.num_attention_heads // self.config.num_key_value_heads
k_pe = repeat_kv(k_pe, ulysses_sp_size) # to keep heads=1 after a2a
k_nope = repeat_kv(k_nope, num_key_value_groups)
value_states = repeat_kv(value_states, num_key_value_groups)
q = gather_seq_scatter_heads(q, seq_dim=2, head_dim=1)
k_pe = gather_seq_scatter_heads(k_pe, seq_dim=2, head_dim=1)
k_nope = gather_seq_scatter_heads(k_nope, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
# (batch_size, num_head / sp_size, seq_length, head_size)
full_q_len = q.size(2) # full_q_len = seq_length
else:
full_q_len = q_len
q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
cos, sin = self.rotary_emb(value_states, seq_len=full_q_len)
q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids)
query_states = k_pe.new_empty(bsz, self.num_heads // ulysses_sp_size, full_q_len, self.q_head_dim)
query_states[:, :, :, : self.qk_nope_head_dim] = q_nope
query_states[:, :, :, self.qk_nope_head_dim :] = q_pe
key_states = k_pe.new_empty(bsz, self.num_heads // ulysses_sp_size, full_q_len, self.q_head_dim)
key_states[:, :, :, : self.qk_nope_head_dim] = k_nope
key_states[:, :, :, self.qk_nope_head_dim :] = k_pe
if self.q_head_dim != self.v_head_dim:
value_states = F.pad(value_states, [0, self.q_head_dim - self.v_head_dim])
# TODO: These transpose are quite inefficient but Flash Attention requires the layout
# [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
# to be able to avoid many of these transpose/reshape/view.
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
full_q_len,
dropout=dropout_rate,
sliding_window=None,
is_causal=self.is_causal,
use_top_left_mask=flash_attn_supports_top_left_mask(),
position_ids=position_ids, # important: pass position ids
softmax_scale=self.softmax_scale,
)
if ulysses_sp_size > 1:
attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1)
if self.q_head_dim != self.v_head_dim:
attn_output = attn_output[:, :, :, : self.v_head_dim]
attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim).contiguous()
attn_output = self.o_proj(attn_output)
if is_transformers_version_in_range(min_version="4.53.0"):
return attn_output, None
else:
return attn_output, None, None
|
verl__models__transformers__llama.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Callable, Optional
import torch
if sys.version_info >= (3, 11):
pass
else:
pass
from transformers.cache_utils import Cache
from transformers.modeling_flash_attention_utils import _flash_attention_forward
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
from transformers.utils import logging
# Import compatibility wrapper for flash_attn_supports_top_left_mask
from verl.utils.transformers_compat import flash_attn_supports_top_left_mask
from verl.utils.ulysses import (
gather_heads_scatter_seq,
gather_seq_scatter_heads,
get_ulysses_sequence_parallel_world_size,
validate_ulysses_config,
)
logger = logging.get_logger(__name__)
def llama_flash_attn_forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""
Adapted from transformers 4.47.1 to support Ulysses sequence parallelism.
NOTE: This function is used for transformers versions in the range [4.45.0, 4.47.1].
"""
output_attentions = False
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dim x hidden_dim
# therefore we just need to keep the original shape
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
# trade off: repeat first and then all to all
# key_states = repeat_kv(key_states, self.num_key_value_groups)
# value_states = repeat_kv(value_states, self.num_key_value_groups)
########## AlltoAll for Ulysses ##########
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
validate_ulysses_config(self.num_heads, ulysses_sp_size)
# (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim)
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
full_q_len = query_states.size(2) # full seq length
if position_embeddings is None:
logger.warning_once(
"The attention layers in this model are transitioning from computing the RoPE embeddings internally "
"through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
"`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
"removed and `position_embeddings` will be mandatory."
)
cos, sin = self.rotary_emb(value_states, position_ids)
else:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
# TODO: These transpose are quite inefficient but Flash Attention requires the layout
# [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
# to be able to avoid many of these transpose/reshape/view.
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (LlamaRMSNorm handles it correctly)
input_dtype = query_states.dtype
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to "
f"the fact you have upcasted embedding or layer norm layers in float32. We will cast back the "
f"input in {target_dtype}."
)
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
full_q_len,
position_ids=position_ids,
dropout=dropout_rate,
sliding_window=getattr(self, "sliding_window", None),
use_top_left_mask=flash_attn_supports_top_left_mask(),
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous()
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1:
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
def llama_attn_forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""
Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0.
NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.50.0.
"""
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
from transformers.models.llama.modeling_llama import eager_attention_forward
bsz, q_len, _ = hidden_states.shape
query_states = self.q_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
########## AlltoAll for Ulysses ##########
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
validate_ulysses_config(self.config.num_attention_heads, ulysses_sp_size)
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
full_q_len = query_states.size(2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
logger.warning_once(
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. "
"Falling back to eager attention. This warning can be removed using the argument "
'`attn_implementation="eager"` when loading the model.'
)
else:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous()
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1:
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
verl__models__transformers__monkey_patch.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Apply monkey-patch function to models
"""
import sys
from types import SimpleNamespace
from typing import Optional
import torch
from transformers.modeling_flash_attention_utils import _flash_attention_forward
from transformers.modeling_utils import PreTrainedModel
from verl.utils.import_utils import is_trl_available
from verl.utils.transformers_compat import is_transformers_version_in_range
from verl.utils.ulysses import (
gather_heads_scatter_seq,
gather_seq_scatter_heads,
get_ulysses_sequence_parallel_group,
get_ulysses_sequence_parallel_world_size,
slice_input_tensor,
)
_PREFIX_GROUPER_PATCHED = False
_PREFIX_GROUPER_SUPPORTED_ATTENTIONS = {"flash_attention_2", "flash_attention_3", "sdpa", "flex_attention", "eager"}
def _create_prefix_grouper_wrapper(original_fn):
"""Wrap attention function to support prefix_grouper in kwargs."""
def wrapped(module, query, key, value, attention_mask, *args, **kwargs):
prefix_grouper = kwargs.pop("prefix_grouper", None)
if prefix_grouper is None:
return original_fn(module, query, key, value, attention_mask, *args, **kwargs)
def attn_func(q, k, v, attn_mask, *inner_args, **inner_kwargs):
out, _ = original_fn(module, q, k, v, attn_mask, *inner_args, **inner_kwargs)
return out
return prefix_grouper.forward(attn_func, query, key, value, *args, **kwargs), None
return wrapped
def apply_prefix_grouper_patch():
"""Patch ALL_ATTENTION_FUNCTIONS to support prefix_grouper parameter."""
global _PREFIX_GROUPER_PATCHED
if _PREFIX_GROUPER_PATCHED:
return
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
patched = []
for name in list(ALL_ATTENTION_FUNCTIONS.keys()):
if name in _PREFIX_GROUPER_SUPPORTED_ATTENTIONS:
ALL_ATTENTION_FUNCTIONS[name] = _create_prefix_grouper_wrapper(ALL_ATTENTION_FUNCTIONS[name])
patched.append(name)
_PREFIX_GROUPER_PATCHED = True
print(f"[PrefixGrouper] Patched: {patched}")
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=2, repeats=n_rep). The hidden states go from (batch,
seqlen, num_key_value_heads, head_dim) to (batch, seqlen, num_attention_heads, head_dim)
"""
batch, slen, num_key_value_heads, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, :, None, :].expand(batch, slen, num_key_value_heads, n_rep, head_dim)
return hidden_states.reshape(batch, slen, num_key_value_heads * n_rep, head_dim)
def _ulysses_flash_attention_forward(
query_states: torch.Tensor,
key_states: torch.Tensor,
value_states: torch.Tensor,
attention_mask: Optional[torch.Tensor],
query_length: int,
*args,
position_ids: Optional[torch.Tensor] = None,
**kwargs,
):
"""Insert all-to-all before and after flash attention.
DeepSpeed-Ulysses: https://arxiv.org/pdf/2309.14509
For transformers>=4.55, the flash attention api has changed,
we need to pass the query_length after doing ulysses all2all.
See https://github.com/huggingface/transformers/issues/40399
Args:
query_states (torch.Tensor): (batch_size, seqlen/sp_size, nheads, head_dim)
key_states (torch.Tensor): (batch_size, seqlen/sp_size, nheads_k, head_dim)
value_states (torch.Tensor): (batch_size, seqlen/sp_size, nheads_k, head_dim)
position_ids (torch.Tensor, optional): (batch_size, seqlen/sp_size)
Returns:
torch.Tensor: (batch_size, seqlen/sp_size, nheads, head_dim)
"""
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
########## AlltoAll for Ulysses ##########
# TODO: Disable sp for ViT, there's no elegent way to determine whether it's ViT or not.
# Use `position_ids` as condition since ViT doesn't pass it to flash attention.
if ulysses_sp_size > 1 and position_ids is not None:
# NOTE: repeat kv heads to be divided by sequence parallel. Instead of repeating nheads_q//nheads_k,
# we choose to repeat sp_size//nheads_k, since flash_attention supports MQA/GQA.
# For example:
# - nheads_k=4, sp=8, repeats=2
# - nheads_k=8, sp=8, repeats=1
# - nheads_k=16, sp=8, repeats=1
repeats = max(ulysses_sp_size // key_states.size(2), 1)
key_states = repeat_kv(key_states, repeats)
value_states = repeat_kv(value_states, repeats)
# (bsz, seq_len/n, n_head, head_dim) -> (bsz, seq_len, n_head/n, head_dim)
query_states = gather_seq_scatter_heads(query_states, seq_dim=1, head_dim=2)
key_states = gather_seq_scatter_heads(key_states, seq_dim=1, head_dim=2)
value_states = gather_seq_scatter_heads(value_states, seq_dim=1, head_dim=2)
# TODO: all_gather position_ids because `prepare_fa2_from_position_ids` needs it, we can eliminate
# this all_gather by passing cu_seq_lens_q, cu_seq_lens_k, max_length_k, max_length_q explicitly.
# https://github.com/huggingface/transformers/pull/33932
# (bsz, seq_len/n) -> (bsz, seq_len)
position_ids_list = [torch.empty_like(position_ids) for _ in range(ulysses_sp_size)]
torch.distributed.all_gather(position_ids_list, position_ids, group=get_ulysses_sequence_parallel_group())
position_ids = torch.concat(position_ids_list, dim=-1)
# (bsz, seq_len, n_head/n, head_dim)
query_length = query_states.size(1)
attn_output = _flash_attention_forward(
query_states, key_states, value_states, attention_mask, query_length, *args, position_ids=position_ids, **kwargs
)
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1 and position_ids is not None:
# (bsz, seq_len, n_head/n, head_dim) -> (bsz, seq_len/n, n_head, head_dim)
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
return attn_output
def patch_vlm_for_ulysses_input_slicing(model_class: type):
"""
Applies a monkey patch to the forward method of a given model class
to enable Ulysses sequence parallelism input slicing.
"""
def _create_ulysses_wrapped_decoder_forward(original_forward):
def ulysses_wrapped_decoder_forward(self, *args, **kwargs):
inputs_embeds = kwargs.get("inputs_embeds")
position_ids = kwargs.get("position_ids")
visual_pos_masks = kwargs.get("visual_pos_masks")
deepstack_visual_embeds = kwargs.get("deepstack_visual_embeds")
call_kwargs = kwargs.copy()
current_ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
slice_now = (
inputs_embeds is not None
and current_ulysses_sp_size > 1
and getattr(self, "_needs_initial_slice", True)
)
if slice_now:
call_kwargs["inputs_embeds"] = slice_input_tensor(inputs_embeds, dim=1, padding=False)
call_kwargs["position_ids"] = slice_input_tensor(position_ids, dim=-1, padding=False)
# Also slice visual_pos_masks and deepstack_visual_embeds for Qwen3 VL models
if visual_pos_masks is not None:
original_visual_mask = visual_pos_masks
sliced_visual_mask = slice_input_tensor(visual_pos_masks, dim=1, padding=False)
call_kwargs["visual_pos_masks"] = sliced_visual_mask
if deepstack_visual_embeds is not None:
sliced_embeds = []
num_visual_before = original_visual_mask.sum().item()
num_visual_in_shard = sliced_visual_mask.sum().item()
if num_visual_in_shard > 0 and num_visual_before > 0:
# Calculate which visual embeddings belong to this shard
# We need to find the offset of visual tokens in this shard
from verl.utils.ulysses import get_ulysses_sequence_parallel_rank
rank = get_ulysses_sequence_parallel_rank()
seq_len = original_visual_mask.shape[1]
local_seq_len = seq_len // current_ulysses_sp_size
start_idx = rank * local_seq_len
end_idx = start_idx + local_seq_len
# Get total visual tokens before and up to the end of the shard's sequence slice
# This correctly handles batches by summing across all samples
visual_start = original_visual_mask[:, :start_idx].sum().item() if start_idx > 0 else 0
visual_end = original_visual_mask[:, :end_idx].sum().item()
# Slice each tensor in deepstack_visual_embeds
for embed in deepstack_visual_embeds:
sliced_embeds.append(embed[visual_start:visual_end])
else:
# No visual tokens in this shard, create empty tensors to maintain gradient flow
for embed in deepstack_visual_embeds:
sliced_embeds.append(embed[:0])
call_kwargs["deepstack_visual_embeds"] = sliced_embeds
self._needs_initial_slice = False
try:
return original_forward(self, *args, **call_kwargs)
finally:
if slice_now:
self._needs_initial_slice = True
return ulysses_wrapped_decoder_forward
original_forward = model_class.forward
wrapped_forward = _create_ulysses_wrapped_decoder_forward(original_forward)
model_class.forward = wrapped_forward
print(f"Monkey patch {model_class.__name__}.forward for Ulysses SP input slicing.")
def patch_forward_with_backends(
model: PreTrainedModel,
use_fused_kernels: bool = False,
fused_kernels_backend: str = None,
):
"""
Choose the forward function based on the model and backend.
Args:
model (PreTrainedModel): The model to apply the monkey patch.
use_fused_kernels (bool): Whether to use fused kernels.
fused_kernels_backend (str): The backend to use for fused kernels.
"""
if not use_fused_kernels or fused_kernels_backend not in ["triton", "torch"]:
print(
f"Skipping monkey patch for {model.__class__.__name__} as use_fused_kernels is "
f"{use_fused_kernels} or fused_kernels_backend is {fused_kernels_backend}"
)
return
forward_with_torch_backend_function = model.__class__.forward
forward_with_triton_backend_function = model.__class__.forward
if model.config.model_type in ["qwen2_5_vl", "qwen2_vl"]:
from verl.models.transformers.qwen2_vl import forward_with_torch_backend, forward_with_triton_backend
forward_with_torch_backend_function = forward_with_torch_backend
forward_with_triton_backend_function = forward_with_triton_backend
elif model.config.model_type in ["qwen3_vl", "qwen3_vl_moe"]:
from verl.models.transformers.qwen3_vl import forward_with_torch_backend, forward_with_triton_backend
forward_with_torch_backend_function = forward_with_torch_backend
forward_with_triton_backend_function = forward_with_triton_backend
elif model.config.model_type == "glm4v":
from verl.models.transformers.glm4v import forward_with_torch_backend, forward_with_triton_backend
forward_with_torch_backend_function = forward_with_torch_backend
forward_with_triton_backend_function = forward_with_triton_backend
else:
from verl.models.transformers.dense_common import forward_with_torch_backend, forward_with_triton_backend
forward_with_torch_backend_function = forward_with_torch_backend
forward_with_triton_backend_function = forward_with_triton_backend
if fused_kernels_backend == "triton":
model.__class__.forward = forward_with_triton_backend_function
print(f"Using Triton backend for fused kernels in {model.__class__.__name__}")
elif fused_kernels_backend == "torch":
model.__class__.forward = forward_with_torch_backend_function
print(f"Using Torch backend for fused kernels in {model.__class__.__name__}")
else:
raise ValueError(f"Unsupported fused_kernels_backend: {fused_kernels_backend}. Choose 'triton' or 'torch'.")
def apply_monkey_patch(
model: PreTrainedModel,
ulysses_sp_size: int = 1,
use_remove_padding: bool = True,
use_fused_kernels: bool = False,
fused_kernels_backend: str = None,
use_prefix_grouper: bool = False,
use_tiled_mlp: bool = False,
tiled_mlp_shards: int = 4,
):
"""
Apply monkey patch to the models for ulysses sequence parallel, fused kernel, tiled MLP and prefix grouper.
In the end of this function forward function of the model is patched for fused kernel.
If the model is not supported with fused kernel, please return after patch.
Args:
model: The model to apply the monkey patch.
ulysses_sp_size: The size of ulysses sequence parallel.
use_remove_padding: Whether to use remove padding.
use_fused_kernels: Whether to use fused kernels.
fused_kernels_backend: The backend to use for fused kernels.
use_tiled_mlp: Whether to use TiledMLP for memory-efficient MLP computation.
tiled_mlp_shards: Number of shards for TiledMLP (higher = lower memory, slightly slower).
"""
# Apply TiledMLP monkey patch for memory-efficient MLP computation
if use_tiled_mlp:
from verl.models.transformers.tiled_mlp import apply_tiled_mlp_monkey_patch
model_type = getattr(model.config, "model_type", None)
apply_tiled_mlp_monkey_patch(num_shards=tiled_mlp_shards, model_type=model_type)
# Apply PrefixGrouper patch if enabled
if use_prefix_grouper:
apply_prefix_grouper_patch()
"""Replace _flash_attention_forward to _ulysses_flash_attention_forward"""
module = sys.modules[model.__module__]
try:
num_attention_heads, num_key_value_heads = model.config.num_attention_heads, model.config.num_key_value_heads
except AttributeError:
num_attention_heads, num_key_value_heads = (
model.config.text_config.num_attention_heads,
model.config.text_config.num_key_value_heads,
)
assert num_attention_heads % ulysses_sp_size == 0, (
f"num_attention_heads {num_attention_heads} must be divisible by ulysses_sp_size {ulysses_sp_size}"
)
assert num_key_value_heads % ulysses_sp_size == 0 or ulysses_sp_size % num_key_value_heads == 0, (
f"num_key_value_heads {num_key_value_heads} must be divisible by ulysses_sp_size "
f"{ulysses_sp_size}or vise versa. Upon ulysses_sp_size % num_key_value_heads == 0,"
f"kv heads are repeated to ensure correctness."
)
if is_trl_available():
from trl import AutoModelForCausalLMWithValueHead # type: ignore
def state_dict(self, *args, **kwargs):
return torch.nn.Module.state_dict(self, *args, **kwargs)
AutoModelForCausalLMWithValueHead.state_dict = state_dict
print("Monkey patch state_dict in AutoModelForCausalLMWithValueHead. ")
# TODO: VLM models only, unify monkey patch to LLM models.
if model.config.model_type in ["qwen2_5_vl", "qwen2_vl"]:
# Step 1: patch model to support image-text mixed data
if is_transformers_version_in_range(min_version="4.52.0"):
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import (
Qwen2_5_VLForConditionalGeneration,
Qwen2_5_VLModel,
Qwen2_5_VLTextModel,
)
from transformers.models.qwen2_vl.modeling_qwen2_vl import (
Qwen2VLForConditionalGeneration,
Qwen2VLModel,
Qwen2VLTextModel,
)
else:
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLForConditionalGeneration
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLModel as Qwen2_5_VLTextModel
from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration
from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLModel as Qwen2VLTextModel
Qwen2_5_VLModel = SimpleNamespace(forward=None)
Qwen2VLModel = SimpleNamespace(forward=None)
from verl.models.transformers.qwen2_vl import forward_with_normal_backend, qwen2_vl_base_forward
Qwen2_5_VLModel.forward = qwen2_vl_base_forward
Qwen2VLModel.forward = qwen2_vl_base_forward
Qwen2_5_VLForConditionalGeneration.forward = forward_with_normal_backend
Qwen2VLForConditionalGeneration.forward = forward_with_normal_backend
print(f"Monkey patch {model.__class__.__name__} model forward")
# Step 2: patch attention to support ulysses parallelism
if is_transformers_version_in_range(min_version="4.54.0"):
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLAttention
from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLAttention
elif is_transformers_version_in_range(min_version="4.53.0"):
raise RuntimeError("Transformers 4.53.* is bugged. Use transformers 4.54.0 or later.")
else:
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import (
Qwen2_5_VLFlashAttention2 as Qwen2_5_VLAttention,
)
from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLFlashAttention2 as Qwen2VLAttention
if use_remove_padding or ulysses_sp_size > 1:
from verl.models.transformers.qwen2_vl import qwen2_vl_attn_forward
Qwen2_5_VLAttention.forward = qwen2_vl_attn_forward
Qwen2VLAttention.forward = qwen2_vl_attn_forward
print(f"Monkey patch {model.__class__.__name__} attention layer")
# Step 3: patch input for multimodal sequence parallelism
if ulysses_sp_size > 1:
patch_vlm_for_ulysses_input_slicing(Qwen2_5_VLTextModel)
patch_vlm_for_ulysses_input_slicing(Qwen2VLTextModel)
elif model.config.model_type in ["qwen3_vl", "qwen3_vl_moe"]:
# Step 1: patch model to support image-text mixed data
from transformers.models.qwen3_vl.modeling_qwen3_vl import (
Qwen3VLForConditionalGeneration,
Qwen3VLModel,
Qwen3VLTextModel,
)
from transformers.models.qwen3_vl_moe.modeling_qwen3_vl_moe import (
Qwen3VLMoeForConditionalGeneration,
Qwen3VLMoeModel,
Qwen3VLMoeTextModel,
)
from verl.models.transformers.qwen3_vl import (
forward_with_normal_backend,
patch_qwen3_vl_moe_sparse_moe_block_forward,
qwen3_vl_base_forward,
)
Qwen3VLModel.forward = qwen3_vl_base_forward
Qwen3VLMoeModel.forward = qwen3_vl_base_forward
Qwen3VLForConditionalGeneration.forward = forward_with_normal_backend
Qwen3VLMoeForConditionalGeneration.forward = forward_with_normal_backend
print(f"Monkey patch {model.__class__.__name__} model forward")
# Step 1.5: patch Qwen3VLMoeTextSparseMoeBlock to fix transformers 4.57.3 bug
if model.config.model_type == "qwen3_vl_moe" and is_transformers_version_in_range(max_version="4.57.3"):
patch_qwen3_vl_moe_sparse_moe_block_forward()
# Step 2: patch input for multimodal sequence parallelism
if ulysses_sp_size > 1:
patch_vlm_for_ulysses_input_slicing(Qwen3VLTextModel)
patch_vlm_for_ulysses_input_slicing(Qwen3VLMoeTextModel)
elif model.config.model_type == "glm4v":
# Step 1: patch model to support image-text mixed data
from transformers.models.glm4v.modeling_glm4v import (
Glm4vForConditionalGeneration,
Glm4vModel,
Glm4vTextAttention,
Glm4vTextModel,
)
from verl.models.transformers.glm4v import forward_with_normal_backend, glm4v_base_forward
Glm4vModel.forward = glm4v_base_forward
Glm4vForConditionalGeneration.forward = forward_with_normal_backend
print(f"Monkey patch {model.__class__.__name__} model forward")
# Step 2: patch attention to support ulysses parallelism
if use_remove_padding or ulysses_sp_size > 1:
from verl.models.transformers.glm4v import glm4v_attn_forward
Glm4vTextAttention.forward = glm4v_attn_forward
print(f"Monkey patch {model.__class__.__name__} attention layer")
# Step 3: patch input for multimodal sequence parallelism
if ulysses_sp_size > 1:
patch_vlm_for_ulysses_input_slicing(Glm4vTextModel)
elif model.config.model_type == "kimi_vl":
if use_remove_padding or ulysses_sp_size > 1:
# TODO: Changes need to be made when transformers are adapted.
from verl.models.transformers.kimi_vl import _ulysses_flash_attn_forward
module.DeepseekV3FlashAttention2.forward = _ulysses_flash_attn_forward
print("Monkey patch FlashAttention2.forward in KimiVL")
if ulysses_sp_size > 1:
patch_vlm_for_ulysses_input_slicing(module.DeepseekV3ForCausalLM)
if use_fused_kernels:
print("Not support fused kernels for KimiVL")
return
if use_remove_padding or ulysses_sp_size > 1:
if hasattr(module, "_flash_attention_forward"): # transformers <= 4.47.1 or legacy models
module._flash_attention_forward = _ulysses_flash_attention_forward
print(f"Monkey patch _flash_attention_forward in {model.__module__}")
else:
from transformers.integrations import flash_attention
flash_attention._flash_attention_forward = _ulysses_flash_attention_forward
print(f"Monkey patch _flash_attention_forward in {flash_attention.__name__}")
patch_forward_with_backends(model, use_fused_kernels=use_fused_kernels, fused_kernels_backend=fused_kernels_backend)
|
verl__models__transformers__npu_patch.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
import torch_npu
from torch import nn
from transformers.activations import ACT2FN
from transformers.models.qwen2 import modeling_qwen2
from transformers.models.qwen2_5_vl import modeling_qwen2_5_vl
from transformers.models.qwen3 import modeling_qwen3
from transformers.models.qwen3_moe import modeling_qwen3_moe
from transformers.models.qwen3_next import modeling_qwen3_next
from transformers.models.qwen3_vl import modeling_qwen3_vl
from transformers.models.qwen3_vl_moe import modeling_qwen3_vl_moe
from transformers.utils import logging
logger = logging.get_logger(__name__)
def rms_norm_forward_npu(self, x):
"""NPU optimized implementation for RMSNorm."""
if x.dtype != self.weight.dtype:
x = x.to(self.weight.dtype)
return torch_npu.npu_rms_norm(x, self.weight, epsilon=self.variance_epsilon)[0]
def silu_forward_npu(self, hidden_state):
"""NPU optimized implementation for SiLU in `forward` func in MLP layer."""
gate_up = torch.cat((self.gate_proj(hidden_state), self.up_proj(hidden_state)), dim=-1)
return self.down_proj(torch_npu.npu_swiglu(gate_up, dim=-1))
def apply_rotary_pos_emb_npu(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""NPU optimized implementation for RoPE."""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = torch_npu.npu_rotary_mul(q, cos, sin)
k_embed = torch_npu.npu_rotary_mul(k, cos, sin)
return q_embed.to(q.dtype), k_embed.to(k.dtype)
def qwen3_next_rms_norm_forward_npu(self, x):
return torch_npu.npu_rms_norm(x.float(), 1.0 + self.weight.float(), epsilon=self.eps)[0].type_as(x)
def qwen3_next_rms_norm_forward_gated_npu(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
hidden_states = torch_npu.npu_rms_norm(hidden_states, self.weight.float(), epsilon=self.variance_epsilon)[0]
hidden_states = hidden_states * F.silu(gate.to(torch.float32))
return hidden_states.to(input_dtype)
def qwen3_next_apply_rotary_pos_emb_npu(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Keep half or full tensor for later concatenation
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
q_embed = torch_npu.npu_rotary_mul(q_rot, cos, sin).to(q.dtype)
k_embed = torch_npu.npu_rotary_mul(k_rot, cos, sin).to(k.dtype)
q_embed = torch.cat([q_embed, q_pass], dim=-1)
k_embed = torch.cat([k_embed, k_pass], dim=-1)
return q_embed, k_embed
class NPUGmmFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weight, group_list, group_list_type=1):
"""
Grouped Matmul(GMM) for Ascend NPU.
Args:
x (torch.Tensor): Input tensor, shape (tokens_num * top_k, hidden_size)
weight (torch.Tensor): Expert weights, shape (n_experts, hidden_size, intermediate_size)
group_list (torch.Tensor): Expert token counts, shape (n_experts,)
- type 0: cumsum of tokens per expert
- type 1: direct tokens per expert (default)
"""
ctx.save_for_backward(x, weight)
ctx.group_list = group_list
ctx.group_list_type = group_list_type
output = torch_npu.npu_grouped_matmul(
[x], [weight], bias=None, group_list=group_list, split_item=2, group_type=0, group_list_type=group_list_type
)[0]
return output
@staticmethod
def backward(ctx, grad_output):
x, weight = ctx.saved_tensors
group_list = ctx.group_list
group_list_type = ctx.group_list_type
dx = torch_npu.npu_grouped_matmul(
[grad_output],
[weight.transpose(1, 2)],
bias=None,
group_list=group_list,
split_item=2,
group_type=0,
group_list_type=group_list_type,
)[0]
dw = torch_npu.npu_grouped_matmul(
[x.transpose(0, 1)],
[grad_output],
bias=None,
group_list=group_list,
split_item=3,
group_type=2,
group_list_type=group_list_type,
)[0]
return dx, dw, None, None
def _qwen3_sparse_moe_routed_forward_npu(self, hidden_states: torch.Tensor):
"""
Shared NPU routed-expert path for Qwen3Moe/Qwen3Next sparse MoE blocks.
Returns:
tuple: (flattened_input, routed_hidden_states, router_logits)
"""
hidden_dim = hidden_states.shape[-1]
hidden_states = hidden_states.view(-1, hidden_dim)
# router_logits: (batch * sequence_length, n_experts)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
if self.norm_topk_prob: # only diff with mixtral sparse moe block!
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
# we cast back to the input dtype
routing_weights = routing_weights.to(hidden_states.dtype)
# Loop over all available experts in the model and perform the computation on each expert
# Concat all weights
input_dtype = hidden_states.dtype
up_weight_list = [e.up_proj.weight for e in self.experts]
gate_weight_list = [e.gate_proj.weight for e in self.experts]
down_weight_list = [e.down_proj.weight for e in self.experts]
w1 = torch.stack(up_weight_list).transpose(1, 2).to(input_dtype)
w2 = torch.stack(gate_weight_list).transpose(1, 2).to(input_dtype)
w3 = torch.stack(down_weight_list).transpose(1, 2).to(input_dtype)
permuted_tokens, row_ids_map = torch_npu.npu_moe_token_permute(hidden_states, selected_experts.to(torch.int32))
tokens_per_expert = torch.histc(selected_experts, bins=self.num_experts, min=0, max=self.num_experts)
up_res = NPUGmmFunction.apply(permuted_tokens, w1, tokens_per_expert)
gate_res = NPUGmmFunction.apply(permuted_tokens, w2, tokens_per_expert)
act_res = torch_npu.npu_swiglu(torch.cat([gate_res, up_res], dim=-1))
down_res = NPUGmmFunction.apply(act_res, w3, tokens_per_expert)
routed_hidden_states = torch_npu.npu_moe_token_unpermute(down_res, row_ids_map, probs=routing_weights)
return hidden_states, routed_hidden_states, router_logits
def qwen3_moe_sparse_moe_block_forward_npu(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""NPU optimized implementation for `forward` in Qwen3MoeSparseMoeBlock."""
output_shape = hidden_states.shape
_, routed_hidden_states, router_logits = _qwen3_sparse_moe_routed_forward_npu(self, hidden_states)
final_hidden_states = routed_hidden_states.reshape(output_shape)
return final_hidden_states, router_logits
def qwen3_next_sparse_moe_block_forward_npu(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""NPU optimized implementation for `forward` in Qwen3NextSparseMoeBlock."""
output_shape = hidden_states.shape
hidden_states, routed_hidden_states, router_logits = _qwen3_sparse_moe_routed_forward_npu(self, hidden_states)
shared_expert_output = self.shared_expert(hidden_states)
shared_expert_output = torch.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output
final_hidden_states = (routed_hidden_states + shared_expert_output).reshape(output_shape)
return final_hidden_states, router_logits
class NPUQwen3VLMoeTextExperts(nn.Module):
"""NPU optimized implementation for Qwen3VLMoeTextExperts."""
def __init__(self, config):
super().__init__()
self.num_experts = config.num_experts
self.intermediate_size = config.moe_intermediate_size
self.hidden_size = config.hidden_size
self.expert_dim = self.intermediate_size
self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_size, 2 * self.expert_dim))
self.down_proj = nn.Parameter(torch.empty((self.num_experts, self.expert_dim, self.hidden_size)))
self.act_fn = ACT2FN[config.hidden_act]
def forward(
self, hidden_states: torch.Tensor, routing_weights: torch.Tensor, router_indices: torch.Tensor
) -> torch.Tensor:
"""
When training it is more efficient to just loop over the experts and compute the output for each expert
as otherwise the memory would explode.
For inference we can sacrifice some memory and compute the output for all experts at once.
By repeating the inputs.
Args:
hidden_states (torch.Tensor): (batch_size * token_num, hidden_size)
routing_weights (torch.Tensor): (batch_size * token_num, num_experts)
router_indices (torch.Tensor): (batch_size * token_num, top_k)
Returns:
torch.Tensor
"""
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size) # (num_tokens, hidden_size)
if self.training:
permuted_hidden_states, row_ids_map = torch_npu.npu_moe_token_permute(
hidden_states, router_indices.to(torch.int32)
)
tokens_per_expert = torch.histc(router_indices, bins=self.num_experts, min=0, max=self.num_experts)
intermediate_hidden_states = NPUGmmFunction.apply(
permuted_hidden_states, self.gate_up_proj, tokens_per_expert
)
intermediate_activations = torch_npu.npu_swiglu(intermediate_hidden_states, dim=-1)
output = NPUGmmFunction.apply(intermediate_activations, self.down_proj, tokens_per_expert)
num_tokens = hidden_states.shape[0]
top_k = router_indices.shape[1]
batch_idx = torch.arange(num_tokens, device=routing_weights.device)
batch_idx = batch_idx.unsqueeze(1).expand(-1, top_k)
selected_probs = routing_weights[batch_idx, router_indices]
next_states = torch_npu.npu_moe_token_unpermute(output, row_ids_map, probs=selected_probs)
next_states = next_states.view(batch_size, -1, self.hidden_size)
else:
hidden_states = hidden_states.repeat(self.num_experts, 1)
hidden_states = hidden_states.view(self.num_experts, -1, self.hidden_size)
gate_up = torch.bmm(hidden_states, self.gate_up_proj)
gate, up = gate_up.chunk(2, dim=-1) # not supported for DTensors
next_states = torch.bmm((up * self.act_fn(gate)), self.down_proj)
next_states = next_states.reshape(self.num_experts, batch_size, -1, self.hidden_size)
next_states = (
next_states * routing_weights.transpose(0, 1).view(self.num_experts, batch_size, -1)[..., None]
)
next_states = next_states.sum(dim=0)
return next_states
class NPUQwen3VLMoeTextSparseMoeBlock(nn.Module):
"""NPU optimized implementation for Qwen3VLMoeTextSparseMoeBlock."""
def __init__(self, config):
super().__init__()
self.hidden_size = config.hidden_size
self.num_experts = config.num_experts
self.top_k = config.num_experts_per_tok
self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
self.experts = NPUQwen3VLMoeTextExperts(config)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size)
router_logits = self.gate(hidden_states)
routing_weights = torch.nn.functional.softmax(router_logits, dim=-1, dtype=torch.float)
routing_weights, router_indices = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True)
routing_weights = routing_weights.to(router_logits.dtype)
hidden_states = hidden_states.reshape(batch_size, -1, self.hidden_size)
if not self.training:
routing_weights = torch.zeros_like(router_logits).scatter_(1, router_indices, routing_weights)
routed_out = self.experts(hidden_states, routing_weights, router_indices)
return routed_out
# Patches for Qwen2 Model
modeling_qwen2.Qwen2RMSNorm.forward = rms_norm_forward_npu
modeling_qwen2.Qwen2MLP.forward = silu_forward_npu
modeling_qwen2.apply_rotary_pos_emb = apply_rotary_pos_emb_npu
# Patches for Qwen2.5-VL Model
modeling_qwen2_5_vl.Qwen2RMSNorm.forward = rms_norm_forward_npu
modeling_qwen2_5_vl.Qwen2_5_VLMLP.forward = silu_forward_npu
# Patches for Qwen3 Model
modeling_qwen3.Qwen3RMSNorm.forward = rms_norm_forward_npu
modeling_qwen3.Qwen3MLP.forward = silu_forward_npu
modeling_qwen3.apply_rotary_pos_emb = apply_rotary_pos_emb_npu
# Patches for Qwen3 MoE Model
modeling_qwen3_moe.Qwen3MoeRMSNorm.forward = rms_norm_forward_npu
modeling_qwen3_moe.Qwen3MoeSparseMoeBlock.forward = qwen3_moe_sparse_moe_block_forward_npu
modeling_qwen3_moe.apply_rotary_pos_emb = apply_rotary_pos_emb_npu
# Patches for Qwen3 VL Model
modeling_qwen3_vl.Qwen3VLTextRMSNorm.forward = rms_norm_forward_npu
modeling_qwen3_vl.Qwen3VLTextMLP.forward = silu_forward_npu
# Patches for Qwen3-VL MoE Model
modeling_qwen3_vl_moe.Qwen3VLMoeTextSparseMoeBlock = NPUQwen3VLMoeTextSparseMoeBlock
modeling_qwen3_vl_moe.Qwen3VLMoeTextRMSNorm.forward = rms_norm_forward_npu
modeling_qwen3_vl_moe.apply_rotary_pos_emb = apply_rotary_pos_emb_npu
# Patches for Qwen3 Next Model
modeling_qwen3_next.Qwen3NextSparseMoeBlock.forward = qwen3_next_sparse_moe_block_forward_npu
modeling_qwen3_next.Qwen3NextRMSNormGated.forward = qwen3_next_rms_norm_forward_gated_npu
modeling_qwen3_next.Qwen3NextRMSNorm.forward = qwen3_next_rms_norm_forward_npu
modeling_qwen3_next.apply_rotary_pos_emb = qwen3_next_apply_rotary_pos_emb_npu
|
verl__models__transformers__qwen2.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import torch
from transformers.cache_utils import Cache
from transformers.modeling_flash_attention_utils import _flash_attention_forward
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv
from transformers.utils import logging
# Import compatibility wrapper for flash_attn_supports_top_left_mask
from verl.utils.transformers_compat import flash_attn_supports_top_left_mask
from verl.utils.ulysses import (
gather_heads_scatter_seq,
gather_seq_scatter_heads,
get_ulysses_sequence_parallel_world_size,
validate_ulysses_config,
)
logger = logging.get_logger(__name__)
def qwen2_flash_attn_forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
):
"""
Adapted from transformers 4.47.1 to support Ulysses sequence parallelism.
NOTE: This function is only tested on transformers versions between 4.45.0 and 4.47.1.
"""
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
########## AlltoAll for Ulysses ##########
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
validate_ulysses_config(self.num_heads, ulysses_sp_size)
# (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim)
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
full_q_len = query_states.size(2) # full seq length
if position_embeddings is None:
logger.warning_once(
"The attention layers in this model are transitioning from computing the RoPE embeddings internally "
"through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
"`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
"removed and `position_embeddings` will be mandatory."
)
cos, sin = self.rotary_emb(value_states, position_ids)
else:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
# repeat k/v heads if n_kv_heads < n_heads
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
dropout_rate = 0.0 if not self.training else self.attention_dropout
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in float16 just to be sure everything works as expected.
input_dtype = query_states.dtype
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to "
f"the fact you have upcasted embedding or layer norm layers in float32. We will cast back the "
f"input in {target_dtype}."
)
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
# Reashape to the expected shape for Flash Attention
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
if (
self.config.use_sliding_window
and getattr(self.config, "sliding_window", None) is not None
and self.layer_idx >= self.config.max_window_layers
):
sliding_window = self.config.sliding_window
else:
sliding_window = None
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
full_q_len,
position_ids=position_ids,
dropout=dropout_rate,
sliding_window=sliding_window,
is_causal=self.is_causal,
use_top_left_mask=flash_attn_supports_top_left_mask(),
)
# use full_q_len to reshape
attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous()
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1:
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
def qwen2_attn_forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""
Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0.
NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.50.0.
"""
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
bsz, q_len, _ = hidden_states.shape
hidden_shape = (bsz, q_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
########## AlltoAll for Ulysses ##########
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
validate_ulysses_config(self.config.num_attention_heads, ulysses_sp_size)
# (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim)
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
full_q_len = query_states.size(2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
sliding_window = None
if (
self.config.use_sliding_window
and getattr(self.config, "sliding_window", None) is not None
and self.layer_idx >= self.config.max_window_layers
):
sliding_window = self.config.sliding_window
from transformers.models.qwen2.modeling_qwen2 import eager_attention_forward
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
logger.warning_once(
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. "
"Falling back to eager attention. This warning can be removed using the argument "
'`attn_implementation="eager"` when loading the model.'
)
else:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=sliding_window, # main diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous()
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1:
# (bsz, seq_len, n_head/n, head_dim) -> (bsz, seq_len/n, n_head, head_dim)
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
verl__models__transformers__qwen2_vl.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import os
from dataclasses import dataclass
from typing import Optional
import torch
import torch.distributed as dist
from transformers.modeling_flash_attention_utils import _flash_attention_forward, fa_peft_integration_check
from transformers.models.qwen2_vl.modeling_qwen2_vl import (
Qwen2VLAttention,
Qwen2VLCausalLMOutputWithPast,
Qwen2VLForConditionalGeneration,
)
from transformers.utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10
from verl.utils.device import is_npu_available
from verl.utils.transformers_compat import is_transformers_version_in_range
from verl.utils.ulysses import (
gather_heads_scatter_seq,
gather_seq_scatter_heads,
get_ulysses_sequence_parallel_group,
get_ulysses_sequence_parallel_world_size,
validate_ulysses_config,
)
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
if is_flash_attn_2_available():
from flash_attn import flash_attn_func, flash_attn_varlen_func
_flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters
_flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters
_flash_use_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
if is_npu_available:
from transformers.integrations.npu_flash_attention import npu_flash_attn_func as flash_attn_func
from transformers.integrations.npu_flash_attention import npu_flash_attn_varlen_func as flash_attn_varlen_func
from transformers.modeling_flash_attention_utils import flash_attn_supports_top_left_mask
_flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters
_flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters
_flash_use_top_left_mask = flash_attn_supports_top_left_mask()
_flash_deterministic_enabled = os.getenv("FLASH_ATTENTION_DETERMINISTIC", "0") == "1"
def get_rope_index(
processor,
input_ids: torch.Tensor,
image_grid_thw: Optional[torch.Tensor] = None,
video_grid_thw: Optional[torch.Tensor] = None,
second_per_grid_ts: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Gets the position ids for Qwen2-VL, it should be generated before sharding the sequence.
The batch dim has been removed and the input_ids should be a 1D tensor representing a single example.
https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py#L1405
"""
spatial_merge_size = processor.image_processor.merge_size
tokens_per_second = 2
image_token_id = processor.tokenizer.convert_tokens_to_ids("<|image_pad|>")
video_token_id = processor.tokenizer.convert_tokens_to_ids("<|video_pad|>")
vision_start_token_id = processor.tokenizer.convert_tokens_to_ids("<|vision_start|>")
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
position_ids = torch.ones(3, input_ids.size(0), dtype=input_ids.dtype, device=input_ids.device) # (3, seqlen)
image_index, video_index = 0, 0
input_ids = input_ids[attention_mask == 1]
image_nums, video_nums = 0, 0
vision_start_indices = torch.argwhere(input_ids == vision_start_token_id)
vision_tokens = input_ids[vision_start_indices + 1]
image_nums = (vision_tokens == image_token_id).sum()
video_nums = (vision_tokens == video_token_id).sum()
input_tokens = input_ids.tolist()
llm_pos_ids_list: list = []
st = 0
remain_images, remain_videos = image_nums, video_nums
for _ in range(image_nums + video_nums):
if image_token_id in input_tokens and remain_images > 0:
ed_image = input_tokens.index(image_token_id, st)
else:
ed_image = len(input_tokens) + 1
if video_token_id in input_tokens and remain_videos > 0:
ed_video = input_tokens.index(video_token_id, st)
else:
ed_video = len(input_tokens) + 1
if ed_image < ed_video:
t, h, w = (
image_grid_thw[image_index][0],
image_grid_thw[image_index][1],
image_grid_thw[image_index][2],
)
second_per_grid_t = 0
image_index += 1
remain_images -= 1
ed = ed_image
else:
t, h, w = (
video_grid_thw[video_index][0],
video_grid_thw[video_index][1],
video_grid_thw[video_index][2],
)
second_per_grid_t = second_per_grid_ts[video_index] if second_per_grid_ts is not None else 1.0
video_index += 1
remain_videos -= 1
ed = ed_video
llm_grid_t, llm_grid_h, llm_grid_w = (
t.item(),
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
text_len = ed - st
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w)
t_index = (t_index * second_per_grid_t * tokens_per_second).long().flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx)
st = ed + llm_grid_t * llm_grid_h * llm_grid_w
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device)
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1).to(input_ids.device)
else:
position_ids = torch.arange(input_ids.shape[1], device=input_ids.device).view(1, -1).expand(3, -1)
return position_ids
def prepare_fa2_from_position_ids(
query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, position_ids: torch.Tensor
):
assert position_ids.ndim == 2 # (batch_size, seq_length)
query = query.contiguous().view(-1, query.size(-2), query.size(-1))
key = key.contiguous().view(-1, key.size(-2), key.size(-1))
value = value.contiguous().view(-1, value.size(-2), value.size(-1))
position_ids = position_ids.view(-1)
cu_seqlens = torch.cat(
(
(position_ids == 0).nonzero().view(-1).to(torch.int32),
torch.tensor(position_ids.size(), device=position_ids.device, dtype=torch.int32),
)
)
max_length = cu_seqlens.diff().max() # use cu_seqlens to infer max_length for qwen2vl mrope
return (query, key, value, (cu_seqlens, cu_seqlens), (max_length, max_length))
def _custom_flash_attention_forward(
query_states: torch.Tensor,
key_states: torch.Tensor,
value_states: torch.Tensor,
attention_mask: Optional[torch.Tensor],
query_length: int,
is_causal: bool = True,
position_ids: Optional[torch.Tensor] = None,
sliding_window: Optional[int] = None,
use_top_left_mask: bool = False,
deterministic: Optional[bool] = None,
**kwargs,
):
"""
Patches flash attention forward to handle 3D position ids in mrope. (3, batch_size, seq_length)
"""
# Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length).
use_sliding_windows = (
_flash_supports_window_size and sliding_window is not None and key_states.shape[1] > sliding_window
)
flash_kwargs = {"window_size": (sliding_window, sliding_window)} if use_sliding_windows else {}
if _flash_supports_deterministic:
flash_kwargs["deterministic"] = deterministic if deterministic is not None else _flash_deterministic_enabled
if kwargs.get("softcap") is not None:
flash_kwargs["softcap"] = kwargs.pop("softcap")
query_states, key_states, value_states = fa_peft_integration_check(
query_states, key_states, value_states, target_dtype=torch.bfloat16
)
if position_ids is not None:
assert position_ids.ndim == 2 # (batch_size, seq_length / sp_size)
sp_size = get_ulysses_sequence_parallel_world_size()
if sp_size > 1:
# qkv: (batch_size, seq_length / sp_size, num_head, head_size)
validate_ulysses_config(query_states.size(2), sp_size)
query_states = gather_seq_scatter_heads(query_states, seq_dim=1, head_dim=2)
key_states = gather_seq_scatter_heads(key_states, seq_dim=1, head_dim=2)
value_states = gather_seq_scatter_heads(value_states, seq_dim=1, head_dim=2)
position_ids_lst = [torch.empty_like(position_ids) for _ in range(sp_size)]
position_ids = dist.all_gather(position_ids_lst, position_ids, group=get_ulysses_sequence_parallel_group())
position_ids = torch.cat(position_ids_lst, dim=-1) # (batch_size, seq_length)
if position_ids is not None and query_length != 1 and not (torch.diff(position_ids, dim=-1) >= 0).all():
batch_size = query_states.size(0)
q, k, v, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) = prepare_fa2_from_position_ids(
query_states, key_states, value_states, position_ids
)
attn_output = flash_attn_varlen_func(
q=q,
k=k,
v=v,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
dropout_p=kwargs.pop("dropout", 0.0),
softmax_scale=kwargs.pop("softmax_scale", None),
causal=is_causal,
**flash_kwargs,
)
attn_output = attn_output.view(batch_size, -1, attn_output.size(-2), attn_output.size(-1))
else:
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
query_length,
is_causal=is_causal,
sliding_window=sliding_window,
use_top_left_mask=use_top_left_mask,
deterministic=deterministic,
**kwargs,
) # do not pass position_ids to old flash_attention_forward
if sp_size > 1:
# (batch_size, seq_length, num_head, head_size)
attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1)
return attn_output
def qwen2_vl_attn_forward(
self: "Qwen2VLAttention",
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
**kwargs,
) -> tuple[torch.Tensor, None, None]:
from transformers.models.qwen2_vl.modeling_qwen2_vl import apply_multimodal_rotary_pos_emb, repeat_kv
bsz, q_len, _ = hidden_states.size() # q_len = seq_length / sp_size
query_states = self.q_proj(hidden_states) # (batch_size, seq_length / sp_size, num_heads * head_size)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
# Because the input can be padded, the absolute sequence length depends on the max position id.
cos, sin = position_embeddings
query_states, key_states = apply_multimodal_rotary_pos_emb(
query_states, key_states, cos, sin, self.rope_scaling["mrope_section"]
)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
dropout_rate = 0.0 if not self.training else self.attention_dropout
sliding_window = None
if (
self.config.use_sliding_window
and getattr(self.config, "sliding_window", None) is not None
and self.layer_idx >= self.config.max_window_layers
):
sliding_window = self.config.sliding_window
# This is before the transpose
q_len = query_states.shape[2]
# FA2 uses non-transposed inputs
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
if position_ids.ndim == 3:
position_ids = position_ids[0]
attn_output = _custom_flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
query_length=q_len,
is_causal=getattr(self, "is_causal", True),
dropout=dropout_rate,
sliding_window=sliding_window,
use_top_left_mask=_flash_use_top_left_mask,
position_ids=position_ids, # important: pass position ids
) # (batch_size, seq_length / sp_size, num_head, head_size)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
attn_output = self.o_proj(attn_output)
if is_transformers_version_in_range(min_version="4.54.0"):
return attn_output, None
else:
return attn_output, None, None
def _get_input_embeds(
model: "Qwen2VLForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
):
inputs_embeds = model.get_input_embeddings()(input_ids)
if pixel_values is not None:
pixel_values = pixel_values.type(model.visual.dtype)
image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
n_image_tokens = (input_ids == model.config.image_token_id).sum().item()
n_image_features = image_embeds.shape[0]
if n_image_tokens != n_image_features:
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
mask = input_ids == model.config.image_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
image_mask = mask_expanded.to(inputs_embeds.device)
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
pixel_values_videos = pixel_values_videos.type(model.visual.dtype)
video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw)
n_video_tokens = (input_ids == model.config.video_token_id).sum().item()
n_video_features = video_embeds.shape[0]
if n_video_tokens != n_video_features:
raise ValueError(
f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
)
mask = input_ids == model.config.video_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
video_mask = mask_expanded.to(inputs_embeds.device)
video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
if pixel_values is None and pixel_values_videos is None: # handle mixed text-image data
config = model.config.vision_config
patch_dim = config.in_channels * config.temporal_patch_size * config.patch_size**2
pixel_values = torch.zeros((16, patch_dim), dtype=inputs_embeds.dtype, device=inputs_embeds.device)
image_grid_thw = torch.tensor([[1, 4, 4]], dtype=torch.long, device=inputs_embeds.device)
image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
inputs_embeds += 0.0 * image_embeds.mean()
if attention_mask is not None:
attention_mask = attention_mask.to(inputs_embeds.device)
return inputs_embeds, attention_mask
def process_position_ids(position_ids: torch.Tensor) -> torch.Tensor:
if position_ids.ndim != 3 or position_ids.size(0) != 4:
# we concat the text position ids with the 3D vision position ids by default
# see https://github.com/huggingface/transformers/pull/39447
raise ValueError("position_ids should be a 3D tensor of shape (4, batch_size, seq_length).")
if is_transformers_version_in_range(max_version="4.53.3"):
# transformers < 4.54.0 only accepts vision position ids, so we discard the text position ids here
position_ids = position_ids[1:]
return position_ids
@dataclass
class Qwen2VLCausalLMOutputForPPO(Qwen2VLCausalLMOutputWithPast):
log_probs: Optional[torch.FloatTensor] = None
entropy: Optional[torch.FloatTensor] = None
def qwen2_vl_base_forward(
self: "Qwen2VLForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
**kwargs,
):
kwargs["inputs_embeds"], kwargs["attention_mask"] = _get_input_embeds(
self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw
) # avoid lora module having multiple keyword arguments
return self.language_model(input_ids=None, **kwargs)
def qwen2_vl_forward(
self: "Qwen2VLForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
**kwargs,
):
if is_transformers_version_in_range(min_version="4.52.0"):
return self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=process_position_ids(position_ids),
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
**kwargs,
)
else:
inputs_embeds, attention_mask = _get_input_embeds(
self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw
)
return self.model(
input_ids=None,
attention_mask=attention_mask,
position_ids=process_position_ids(position_ids),
inputs_embeds=inputs_embeds,
**kwargs,
)
def forward_with_normal_backend(
self: Qwen2VLForConditionalGeneration,
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> "Qwen2VLCausalLMOutputWithPast":
outputs = qwen2_vl_forward(self, input_ids, **kwargs)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
return Qwen2VLCausalLMOutputWithPast(
logits=logits,
hidden_states=outputs.hidden_states,
)
def forward_with_torch_backend(
self: Qwen2VLForConditionalGeneration,
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> tuple | Qwen2VLCausalLMOutputForPPO:
from verl.utils.experimental.torch_functional import FusedLinearForPPO
outputs = qwen2_vl_forward(self, input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.")
fused_linear_for_ppo = FusedLinearForPPO()
log_probs, entropy = fused_linear_for_ppo.forward(
hidden_states=hidden_states,
vocab_weights=self.lm_head.weight,
input_ids=rolled_labels,
temperature=temperature,
)
return Qwen2VLCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
def forward_with_triton_backend(
self: Qwen2VLForConditionalGeneration,
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> tuple | Qwen2VLCausalLMOutputForPPO:
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
outputs = qwen2_vl_forward(self, input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.")
log_probs, entropy = linear_cross_entropy(
hidden_states,
self.lm_head.weight,
rolled_labels,
temperature,
"none",
)
return Qwen2VLCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
|
verl__models__transformers__qwen3_vl.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from dataclasses import dataclass
from typing import Optional
import torch
from transformers.models.qwen3_vl.modeling_qwen3_vl import (
Qwen3VLCausalLMOutputWithPast,
Qwen3VLForConditionalGeneration,
)
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_rope_index(
processor,
input_ids: torch.Tensor,
image_grid_thw: Optional[torch.Tensor] = None,
video_grid_thw: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
"""
Gets the position ids for Qwen3-VL, it should be generated before sharding the sequence.
The batch dim has been removed and the input_ids should be a 1D tensor representing a single example.
https://github.com/huggingface/transformers/blob/v4.57.0/src/transformers/models/qwen3_vl/modeling_qwen3_vl.py#L916
"""
spatial_merge_size = processor.image_processor.merge_size
image_token_id = processor.image_token_id
video_token_id = processor.video_token_id
vision_start_token_id = processor.vision_start_token_id
# Since we use timestamps to separate videos,
# like <t1> <vision_start> <frame1> <vision_end> <t2> <vision_start> <frame2> <vision_end>,
# the video_grid_thw should also be split
if video_grid_thw is not None:
video_grid_thw = torch.repeat_interleave(video_grid_thw, video_grid_thw[:, 0], dim=0)
video_grid_thw[:, 0] = 1
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
position_ids = torch.ones(3, input_ids.shape[0], dtype=input_ids.dtype, device=input_ids.device)
image_index, video_index = 0, 0
attention_mask = attention_mask.to(input_ids.device)
input_ids = input_ids[attention_mask == 1]
image_nums, video_nums = 0, 0
vision_start_indices = torch.argwhere(input_ids == vision_start_token_id)
vision_tokens = input_ids[vision_start_indices + 1]
image_nums = (vision_tokens == image_token_id).sum()
video_nums = (vision_tokens == video_token_id).sum()
input_tokens = input_ids.tolist()
llm_pos_ids_list: list = []
st = 0
remain_images, remain_videos = image_nums, video_nums
for _ in range(image_nums + video_nums):
if image_token_id in input_tokens and remain_images > 0:
ed_image = input_tokens.index(image_token_id, st)
else:
ed_image = len(input_tokens) + 1
if video_token_id in input_tokens and remain_videos > 0:
ed_video = input_tokens.index(video_token_id, st)
else:
ed_video = len(input_tokens) + 1
if ed_image < ed_video:
t, h, w = (
image_grid_thw[image_index][0],
image_grid_thw[image_index][1],
image_grid_thw[image_index][2],
)
image_index += 1
remain_images -= 1
ed = ed_image
else:
t, h, w = (
video_grid_thw[video_index][0],
video_grid_thw[video_index][1],
video_grid_thw[video_index][2],
)
video_index += 1
remain_videos -= 1
ed = ed_video
llm_grid_t, llm_grid_h, llm_grid_w = (
t.item(),
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
text_len = ed - st
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
# t_index is always 0 because llm_grid_t is always 1
# (we use timestamps to encode the temporal information for videos)
t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx)
st = ed + llm_grid_t * llm_grid_h * llm_grid_w
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device)
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1).to(attention_mask.device)
else:
position_ids = torch.arange(input_ids.shape[1], device=input_ids.device).view(1, -1).expand(3, -1)
return position_ids
def _get_input_embeds(
model: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
):
inputs_embeds = model.get_input_embeddings()(input_ids)
image_mask, video_mask = None, None
if pixel_values is not None:
pixel_values = pixel_values.type(model.visual.dtype)
image_embeds, deepstack_image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
n_image_tokens = (input_ids == model.config.image_token_id).sum().item()
n_image_features = image_embeds.shape[0]
if n_image_tokens != n_image_features:
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
mask = input_ids == model.config.image_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
image_mask = mask_expanded.to(inputs_embeds.device)
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
pixel_values_videos = pixel_values_videos.type(model.visual.dtype)
video_embeds, deepstack_video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw)
n_video_tokens = (input_ids == model.config.video_token_id).sum().item()
n_video_features = video_embeds.shape[0]
if n_video_tokens != n_video_features:
raise ValueError(
f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
)
mask = input_ids == model.config.video_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
video_mask = mask_expanded.to(inputs_embeds.device)
video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
visual_pos_masks = None
deepstack_visual_embeds = None
if image_mask is not None and video_mask is not None:
# aggregate visual_pos_masks and deepstack_visual_embeds
image_mask = image_mask[..., 0]
video_mask = video_mask[..., 0]
visual_pos_masks = image_mask | video_mask
deepstack_visual_embeds = []
image_mask_joint = image_mask[visual_pos_masks]
video_mask_joint = video_mask[visual_pos_masks]
for img_embed, vid_embed in zip(deepstack_image_embeds, deepstack_video_embeds, strict=False):
embed_joint = img_embed.new_zeros(visual_pos_masks.sum(), img_embed.shape[-1]).to(img_embed.device)
embed_joint[image_mask_joint, :] = img_embed
embed_joint[video_mask_joint, :] = vid_embed
deepstack_visual_embeds.append(embed_joint)
elif image_mask is not None:
image_mask = image_mask[..., 0]
visual_pos_masks = image_mask
deepstack_visual_embeds = deepstack_image_embeds
elif video_mask is not None:
video_mask = video_mask[..., 0]
visual_pos_masks = video_mask
deepstack_visual_embeds = deepstack_video_embeds
if pixel_values is None and pixel_values_videos is None:
config = model.config.vision_config
patch_dim = config.in_channels * config.temporal_patch_size * config.patch_size**2
pixel_values = torch.zeros((16, patch_dim), dtype=inputs_embeds.dtype, device=inputs_embeds.device)
image_grid_thw = torch.tensor([[1, 4, 4]], dtype=torch.long, device=inputs_embeds.device)
image_embeds, dummy_deepstack_image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
inputs_embeds += 0.0 * image_embeds.mean()
for emb in dummy_deepstack_image_embeds or []:
inputs_embeds += 0.0 * emb.mean()
if attention_mask is not None:
attention_mask = attention_mask.to(inputs_embeds.device)
return {
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"visual_pos_masks": visual_pos_masks,
"deepstack_visual_embeds": deepstack_visual_embeds,
}
@dataclass
class Qwen3VLCausalLMOutputForPPO(Qwen3VLCausalLMOutputWithPast):
log_probs: Optional[torch.FloatTensor] = None
entropy: Optional[torch.FloatTensor] = None
def qwen3_vl_base_forward(
self: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
**kwargs,
):
input_kwargs = _get_input_embeds(
self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw
) # avoid lora module having multiple keyword arguments
kwargs.update(input_kwargs)
return self.language_model(
input_ids=None,
**kwargs,
)
def forward_with_normal_backend(
self: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> "Qwen3VLCausalLMOutputForPPO":
outputs = self.model(input_ids, **kwargs)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
return Qwen3VLCausalLMOutputForPPO(
logits=logits,
hidden_states=outputs.hidden_states,
)
def forward_with_torch_backend(
self: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> "Qwen3VLCausalLMOutputForPPO":
from verl.utils.experimental.torch_functional import FusedLinearForPPO
outputs = self.model(input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.")
fused_linear_for_ppo = FusedLinearForPPO()
log_probs, entropy = fused_linear_for_ppo.forward(
hidden_states=hidden_states,
vocab_weights=self.lm_head.weight,
input_ids=rolled_labels,
temperature=temperature,
)
return Qwen3VLCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
def forward_with_triton_backend(
self: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> "Qwen3VLCausalLMOutputForPPO":
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
outputs = self.model(input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.")
log_probs, entropy = linear_cross_entropy(
hidden_states,
self.lm_head.weight,
rolled_labels,
temperature,
"none",
)
return Qwen3VLCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
def patch_qwen3_vl_moe_sparse_moe_block_forward():
"""
Monkey patch to fix a bug in transformers 4.57.3 where Qwen3VLMoeTextSparseMoeBlock.forward
incorrectly uses torch.zeros_like(hidden_states) instead of torch.zeros_like(router_logits)
when creating router_weights (line 148 in modeling_qwen3_vl_moe.py).
This is a minimal fix that only changes the problematic line while keeping the rest of the
original implementation intact.
"""
try:
from transformers.models.qwen3_vl_moe.modeling_qwen3_vl_moe import Qwen3VLMoeTextSparseMoeBlock
except ImportError:
# Model not available, skip patching
return
# Store the original forward method for reference
original_forward = Qwen3VLMoeTextSparseMoeBlock.forward
@functools.wraps(original_forward)
def patched_forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size)
router_logits = self.gate(hidden_states)
routing_weights = torch.nn.functional.softmax(router_logits, dim=-1, dtype=torch.float)
routing_weights, router_indices = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True)
# BUG FIX: Original code incorrectly uses hidden_states here, should use router_logits
routing_weights = routing_weights.to(router_logits.dtype)
router_weights = torch.zeros_like(router_logits).scatter_(1, router_indices, routing_weights)
hidden_states = hidden_states.reshape(batch_size, -1, self.hidden_size)
routed_out = self.experts(hidden_states, router_weights, router_indices)
return routed_out
# Apply the patch
Qwen3VLMoeTextSparseMoeBlock.forward = patched_forward
logger.info("Monkey patched Qwen3VLMoeTextSparseMoeBlock.forward to fix router_weights bug")
|
verl__models__transformers__tiled_mlp.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FSDP2-compatible TiledMLP implementation for memory-efficient MLP computation.
This module provides a tiled MLP implementation that reduces peak memory usage
by processing the MLP forward/backward pass in chunks (tiles). This is particularly
useful for large models with FSDP2 training.
"""
import threading
from typing import Optional
import torch
import torch.nn as nn
class GradientAccumulator:
"""Gradient accumulator for TiledMLP (FSDP compatible).
This class manages gradient accumulation across multiple shards during
the backward pass of TiledMLP. It ensures correct gradient computation
when processing input in chunks.
"""
def __init__(self, params: list[torch.nn.Parameter], total_shards: int, dtype: torch.dtype = None):
self.params = params
self.total_shards = total_shards
self.grad_accumulation_dtype = dtype or torch.float32
self.accumulated_grads = {}
self.hooks = []
self.lock = threading.Lock()
for param in self.params:
if param.grad is not None:
self.accumulated_grads[param] = param.grad.to(self.grad_accumulation_dtype)
param.grad = None
else:
self.accumulated_grads[param] = torch.zeros_like(param, dtype=self.grad_accumulation_dtype)
def install_hooks(self, is_last_shard: bool):
"""Install gradient hooks for the current shard."""
self._remove_hooks()
def create_hook(param):
def hook(grad):
with self.lock:
grad_to_accum_dtype = grad.to(self.grad_accumulation_dtype)
self.accumulated_grads[param] += grad_to_accum_dtype
if is_last_shard:
param.grad = None # Critical: prevent double accumulation
final_grad = self.accumulated_grads[param].to(param.dtype)
return final_grad
return None
return hook
for param in self.params:
if param.requires_grad:
hook = param.register_hook(create_hook(param))
self.hooks.append(hook)
def _remove_hooks(self):
"""Remove all registered hooks."""
for hook in self.hooks:
hook.remove()
self.hooks.clear()
def cleanup(self):
"""Cleanup hooks and resources."""
self._remove_hooks()
class TiledMLP(torch.autograd.Function):
"""TiledMLP implementation for memory-efficient MLP computation.
This autograd function processes MLP forward/backward in tiles (chunks)
to reduce peak memory usage. Compatible with FSDP2.
"""
@staticmethod
def forward(ctx, fn, module, x, shards, compute_params):
ctx.fn = fn
ctx.module = module
ctx.shards = shards
ctx.compute_params = [p for p in compute_params if p.requires_grad]
ctx.save_for_backward(x)
# Split on dim=-2 (seqlen dimension) following Liger Kernel style
x_shards = list(torch.chunk(x, chunks=shards, dim=-2))
with torch.no_grad():
output_shards = [fn(module, x_shard) for x_shard in x_shards]
output_unsharded = torch.cat(output_shards, dim=-2)
return output_unsharded
@staticmethod
def backward(ctx, *grads):
fn = ctx.fn
(x,) = ctx.saved_tensors
module = ctx.module
shards = ctx.shards
compute_params = ctx.compute_params
x_requires_grad = x.requires_grad
x = x.detach()
x.requires_grad_(x_requires_grad)
# Flatten to [bs*seqlen, hidden_size]
hidden_size = x.shape[-1]
x_shape_orig = x.shape
x = x.view(-1, hidden_size)
incoming_grad = grads[0].view(-1, hidden_size)
# Pre-allocate input gradient
x_grad = torch.zeros_like(x)
# Split on dim=0
x_shards = list(torch.chunk(x, chunks=shards, dim=0))
grad_accumulator = GradientAccumulator(compute_params, shards, dtype=x.dtype)
for i, x_shard in enumerate(x_shards):
x_shard.requires_grad_(x_requires_grad)
shard_step = x_shards[i].shape[0]
shard_offset = i * x_shards[0].shape[0]
# narrow(0, ...) creates a contiguous view that can receive gradients
x_shard.grad = x_grad.narrow(0, shard_offset, shard_step)
incoming_grad_shard = incoming_grad.narrow(0, shard_offset, shard_step)
is_last_shard = i + 1 == shards
grad_accumulator.install_hooks(is_last_shard)
with torch.enable_grad():
output = fn(module, x_shard)
torch.autograd.backward(output, incoming_grad_shard)
grad_accumulator.cleanup()
del grad_accumulator
# Restore original shape
x_grad = x_grad.view(x_shape_orig) if x_requires_grad else None
return (None, None, x_grad, None, None)
def _mlp_forward_fn(module, x):
"""Forward function for LlamaMLP / Qwen2MLP / Qwen3MLP style."""
return module.down_proj(module.act_fn(module.gate_proj(x)) * module.up_proj(x))
# ============================================================================
# Monkey Patch Functions
# ============================================================================
# Model type to MLP class mapping
_MODEL_TYPE_TO_MLP_CLASS = {
"llama": ("transformers.models.llama.modeling_llama", "LlamaMLP"),
"qwen2": ("transformers.models.qwen2.modeling_qwen2", "Qwen2MLP"),
"qwen2_5": ("transformers.models.qwen2.modeling_qwen2", "Qwen2MLP"), # Qwen2.5 uses Qwen2 MLP
"qwen3": ("transformers.models.qwen3.modeling_qwen3", "Qwen3MLP"),
}
def apply_tiled_mlp_monkey_patch(
num_shards: int = 4,
model_type: Optional[str] = None,
):
"""Apply TiledMLP monkey patch based on model_type.
This function MUST be called BEFORE model instantiation to take effect.
It patches the MLP classes in transformers library to use TiledMLP for
memory-efficient computation during training.
Args:
num_shards: Number of shards to split the input into. Higher values
reduce peak memory but may slightly impact performance.
model_type: The model type string (e.g., "llama", "qwen2", "qwen3").
If None, patches all supported model types.
Returns:
List of patched class names.
"""
if model_type is None:
types_to_patch = list(_MODEL_TYPE_TO_MLP_CLASS.keys())
elif model_type in _MODEL_TYPE_TO_MLP_CLASS:
types_to_patch = [model_type]
else:
raise ValueError(
f"TiledMLP does not support model_type='{model_type}'. "
f"Supported types: {list(_MODEL_TYPE_TO_MLP_CLASS.keys())}. "
f"For SwiGLU-style MLPs, you can add support by extending _MODEL_TYPE_TO_MLP_CLASS "
f"in verl/models/transformers/tiled_mlp.py"
)
patched_classes = []
for mtype in types_to_patch:
module_path, class_name = _MODEL_TYPE_TO_MLP_CLASS[mtype]
try:
import importlib
module = importlib.import_module(module_path)
mlp_class = getattr(module, class_name)
_patch_mlp_class(mlp_class, _mlp_forward_fn, num_shards)
if class_name not in patched_classes:
patched_classes.append(class_name)
except (ImportError, AttributeError) as e:
print(f"Warning: Could not patch {mtype} MLP: {e}")
if patched_classes:
print(f"TiledMLP monkey patch applied to: {', '.join(patched_classes)} (shards={num_shards})")
return patched_classes
def _patch_mlp_class(mlp_class: type[nn.Module], forward_fn, num_shards: int):
"""Patch a single MLP class to use TiledMLP."""
def tiled_forward(self, x):
compute_params = [p for p in self.parameters() if p.requires_grad]
return TiledMLP.apply(forward_fn, self, x, num_shards, compute_params)
mlp_class.forward = tiled_forward
|
verl__single_controller__base__decorator.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from functools import partial, wraps
from types import FunctionType
from tensordict import TensorDict
from verl.protocol import DataProtoFuture, _padding_size_key
from verl.utils.py_functional import DynamicEnum
from verl.utils.tensordict_utils import chunk_tensordict, concat_tensordict, contiguous
from verl.utils.transferqueue_utils import BatchMeta
# here we add a magic number of avoid user-defined function already have this attribute
MAGIC_ATTR = "attrs_3141562937"
class Dispatch(DynamicEnum):
"""Enum class defining different dispatch modes for distributed computation.
Each mode represents a specific strategy for distributing data across
different ranks in a distributed system. The modes are used to control
how data is partitioned and processed across different worker groups.
"""
_registry = {}
_next_value = 0
def init_predefined_dispatch_mode():
Dispatch.register("RANK_ZERO")
Dispatch.register("ONE_TO_ALL")
Dispatch.register("ALL_TO_ALL")
Dispatch.register("DP_COMPUTE")
Dispatch.register("DP_COMPUTE_PROTO")
Dispatch.register("DP_COMPUTE_PROTO_WITH_FUNC")
Dispatch.register("DP_COMPUTE_METRIC")
# This is a special dispatch mode for vllm ExternalRayDistributedExecutor
Dispatch.register("DIRECT_ROLLOUT_METHOD")
class Execute(DynamicEnum):
"""Enum class defining different execution modes for distributed computation.
These modes control how a function should be executed across different ranks
in a distributed system.
"""
_registry = {}
_next_value = 0
def init_predefined_execute_mode():
Execute.register("ALL")
Execute.register("RANK_ZERO")
# Initialize the two Dynamic Enum Classes
init_predefined_dispatch_mode()
init_predefined_execute_mode()
def _consolidate_tuple_td(chunked_arg):
return tuple(contiguous(val).consolidate() for val in chunked_arg)
def _split_args_kwargs_data_proto(chunks, *args, **kwargs):
from verl.protocol import DataProto, DataProtoFuture
splitted_args = []
for arg in args:
assert isinstance(arg, DataProto | DataProtoFuture | BatchMeta | TensorDict)
if isinstance(arg, TensorDict):
chunked_arg = chunk_tensordict(arg, chunks)
chunked_arg = _consolidate_tuple_td(chunked_arg)
else:
chunked_arg = arg.chunk(chunks=chunks)
assert len(chunked_arg) == chunks
splitted_args.append(chunked_arg)
splitted_kwargs = {}
for key, val in kwargs.items():
assert isinstance(val, DataProto | DataProtoFuture | BatchMeta | TensorDict)
if isinstance(val, TensorDict):
chunked_kwarg = chunk_tensordict(val, chunks)
chunked_kwarg = _consolidate_tuple_td(chunked_kwarg)
else:
chunked_kwarg = val.chunk(chunks=chunks)
assert len(chunked_kwarg) == chunks
splitted_kwargs[key] = chunked_kwarg
return splitted_args, splitted_kwargs
def _split_args_kwargs_data_proto_with_auto_padding(chunks, *args, **kwargs):
from verl.protocol import DataProto, DataProtoFuture
data_proto_len = None
padding_size = None
def _padding_and_split_data(obj, chunks):
nonlocal data_proto_len, padding_size
assert isinstance(obj, DataProto | DataProtoFuture)
if isinstance(obj, DataProto) and obj.is_padding_enabled():
# for padding, we only support DataProto with same length
if data_proto_len is None:
data_proto_len = len(obj)
padding_size = (chunks - (data_proto_len % chunks)) if (data_proto_len % chunks > 0) else 0
else:
assert data_proto_len == len(obj), (
f"expecting all arg share same length of {data_proto_len}, but got {len(obj)}"
)
obj.padding(padding_size=padding_size)
return obj.chunk(chunks=chunks)
splitted_args = [_padding_and_split_data(arg, chunks) for arg in args]
splitted_kwargs = {key: _padding_and_split_data(val, chunks) for key, val in kwargs.items()}
if padding_size is not None:
splitted_kwargs[_padding_size_key] = padding_size
return splitted_args, splitted_kwargs
def dispatch_one_to_all(worker_group, *args, **kwargs):
args = tuple([arg] * worker_group.world_size for arg in args)
kwargs = {k: [v] * worker_group.world_size for k, v in kwargs.items()}
return args, kwargs
def dummy_direct_rollout_call(worker_group, *args, **kwargs):
raise NotImplementedError("Direct rollout call is forbidden.")
def dispatch_all_to_all(worker_group, *args, **kwargs):
return args, kwargs
def collect_all_to_all(worker_group, output):
return output
def _concat_data_proto_or_future(output: list):
import ray
from verl.protocol import DataProto, DataProtoFuture
# make sure all the elements in output has the same type
for o in output:
assert type(o) is type(output[0])
o = output[0]
if isinstance(o, DataProto):
return DataProto.concat(output)
elif isinstance(o, ray.ObjectRef):
return DataProtoFuture.concat(output)
elif isinstance(o, BatchMeta):
return BatchMeta.concat(output)
elif isinstance(o, TensorDict):
return concat_tensordict(output)
else:
raise NotImplementedError
def dispatch_dp_compute(worker_group, *args, **kwargs):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
for arg in args:
assert isinstance(arg, tuple | list) and len(arg) == worker_group.world_size
for k, v in kwargs.items():
assert isinstance(v, tuple | list) and len(v) == worker_group.world_size
return args, kwargs
def collect_dp_compute(worker_group, output):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
assert len(output) == worker_group.world_size
return output
def dispatch_dp_compute_data_proto(worker_group, *args, **kwargs):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
# Note: enable auto padding for dp compute DatapProto
splitted_args, splitted_kwargs = _split_args_kwargs_data_proto_with_auto_padding(
worker_group.world_size,
*args,
**kwargs,
)
return splitted_args, splitted_kwargs
def dispatch_dp_compute_data_proto_with_func(worker_group, *args, **kwargs):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
assert isinstance(args[0], FunctionType) # NOTE: The first one args is a function!
splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.world_size, *args[1:], **kwargs)
splitted_args_with_func = [[args[0]] * worker_group.world_size] + splitted_args
return splitted_args_with_func, splitted_kwargs
def collect_dp_compute_data_proto(worker_group, output):
import ray
from verl.protocol import DataProto
for o in output:
assert isinstance(o, DataProto | ray.ObjectRef), f"expecting {o} to be DataProto, but got {type(o)}"
output = collect_dp_compute(worker_group, output)
return _concat_data_proto_or_future(output)
def dispatch_nd_compute(dp_rank_mapping: list[int], dp_size, worker_group, *args, **kwargs):
import os
from verl.single_controller.base.worker_group import WorkerGroup
from verl.utils.ray_utils import parallel_put
assert isinstance(worker_group, WorkerGroup)
max_workers = max(1, min(len(args[0]), os.cpu_count()))
args = [parallel_put(arg, max_workers=max_workers) for arg in args]
kwargs = {k: parallel_put(v, max_workers=max_workers) for k, v in kwargs.items()}
all_args = []
for arg in args:
assert isinstance(arg, tuple | list) and len(arg) == dp_size
transformed_args = []
for i in range(worker_group.world_size):
local_dp_rank = dp_rank_mapping[i]
transformed_args.append(arg[local_dp_rank])
all_args.append(transformed_args)
all_args = tuple(all_args)
all_kwargs = {}
for k, v in kwargs.items():
assert isinstance(v, tuple | list) and len(v) == dp_size
transformed_v = []
for i in range(worker_group.world_size):
local_dp_rank = dp_rank_mapping[i]
transformed_v.append(v[local_dp_rank])
all_kwargs[k] = transformed_v
return all_args, all_kwargs
def collect_nd_compute(collect_mask: list[bool], worker_group, output):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
assert len(output) == worker_group.world_size
output_in_dp = []
for global_rank in range(worker_group.world_size):
collect_dp_rank = collect_mask[global_rank]
if collect_dp_rank:
output_in_dp.append(output[global_rank])
return output_in_dp
def dispatch_nd_compute_dataproto(dp_rank_mapping: list[int], dp_size, worker_group, *args, **kwargs):
splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(dp_size, *args, **kwargs)
return dispatch_nd_compute(dp_rank_mapping, dp_size, worker_group, *splitted_args, **splitted_kwargs)
def collect_nd_compute_dataproto(collect_mask: list[bool], worker_group, output):
output = collect_nd_compute(collect_mask, worker_group, output)
import ray
from verl.protocol import DataProto
for o in output:
assert isinstance(o, DataProto | ray.ObjectRef | BatchMeta | TensorDict), (
f"expecting {o} to be DataProto | ray.ObjectRef | BatchMeta | TensorDict, but got {type(o)}"
)
return _concat_data_proto_or_future(output)
def dispatch_lazy_compute_data_proto(mesh_name, worker_group, *args, **kwargs):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
# query dispatch info of the worker group
if mesh_name not in worker_group._dispatch_info:
worker_group._dispatch_info[mesh_name] = worker_group._query_dispatch_info(mesh_name)
assert len(worker_group._dispatch_info[mesh_name]) == worker_group.world_size
dp_rank_mapping = worker_group._dispatch_info[mesh_name]
# perform dispatch
dp_size = max(dp_rank_mapping) + 1
return dispatch_nd_compute_dataproto(dp_rank_mapping, dp_size, worker_group, *args, **kwargs)
def collect_lazy_compute_data_proto(mesh_name, worker_group, *args, **kwargs):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
# the dispatch info is stored in the worker group
assert mesh_name in worker_group._dispatch_info
if mesh_name not in worker_group._collect_info:
worker_group._collect_info[mesh_name] = worker_group._query_collect_info(mesh_name)
assert len(worker_group._collect_info[mesh_name]) == worker_group.world_size
# a boolean of whether the dp_rank is used for collect
collect_mask = worker_group._collect_info[mesh_name]
# perform dispatch
return collect_nd_compute_dataproto(collect_mask, worker_group, *args, **kwargs)
def make_nd_compute_dataproto_dispatch_fn(mesh_name):
return {
"dispatch_fn": partial(dispatch_lazy_compute_data_proto, mesh_name),
"collect_fn": partial(collect_lazy_compute_data_proto, mesh_name),
}
# Global registry for dispatch mode.
DISPATCH_MODE_FN_REGISTRY = {
Dispatch.ONE_TO_ALL: {
"dispatch_fn": dispatch_one_to_all,
"collect_fn": collect_all_to_all,
},
Dispatch.ALL_TO_ALL: {
"dispatch_fn": dispatch_all_to_all,
"collect_fn": collect_all_to_all,
},
Dispatch.DP_COMPUTE: {"dispatch_fn": dispatch_dp_compute, "collect_fn": collect_dp_compute},
Dispatch.DP_COMPUTE_PROTO: {
"dispatch_fn": dispatch_dp_compute_data_proto,
"collect_fn": collect_dp_compute_data_proto,
},
Dispatch.DP_COMPUTE_PROTO_WITH_FUNC: {
"dispatch_fn": dispatch_dp_compute_data_proto_with_func,
"collect_fn": collect_dp_compute_data_proto,
},
Dispatch.DP_COMPUTE_METRIC: {"dispatch_fn": dispatch_dp_compute_data_proto, "collect_fn": collect_dp_compute},
Dispatch.DIRECT_ROLLOUT_METHOD: {
"dispatch_fn": dummy_direct_rollout_call,
"collect_fn": dummy_direct_rollout_call,
},
}
def get_predefined_dispatch_fn(dispatch_mode):
return DISPATCH_MODE_FN_REGISTRY[dispatch_mode]
def register_dispatch_mode(dispatch_mode_name, dispatch_fn, collect_fn):
"""
Register a new dispatch mode.
"""
dispatch_mode = Dispatch.register(dispatch_mode_name)
_check_dispatch_mode(dispatch_mode)
assert dispatch_mode not in DISPATCH_MODE_FN_REGISTRY, f"dispatch_mode_name {dispatch_mode_name} already exists"
DISPATCH_MODE_FN_REGISTRY[dispatch_mode] = {"dispatch_fn": dispatch_fn, "collect_fn": collect_fn}
def update_dispatch_mode(dispatch_mode, dispatch_fn, collect_fn):
"""
Update the dispatch mode.
"""
_check_dispatch_mode(dispatch_mode)
assert dispatch_mode in DISPATCH_MODE_FN_REGISTRY, f"dispatch_mode {dispatch_mode} not found"
DISPATCH_MODE_FN_REGISTRY[dispatch_mode] = {"dispatch_fn": dispatch_fn, "collect_fn": collect_fn}
def get_predefined_execute_fn(execute_mode):
"""
Note that here we only asks execute_all and execute_rank_zero to be implemented
Leave the choice of how these two functions handle argument 'blocking' to users
"""
predefined_execute_mode_fn = {
Execute.ALL: {"execute_fn_name": "execute_all"},
Execute.RANK_ZERO: {"execute_fn_name": "execute_rank_zero"},
}
return predefined_execute_mode_fn[execute_mode]
def _check_dispatch_mode(dispatch_mode):
assert isinstance(dispatch_mode, Dispatch | dict), (
f"dispatch_mode must be a Dispatch or a Dict. Got {dispatch_mode}"
)
if isinstance(dispatch_mode, dict):
necessary_keys = ["dispatch_fn", "collect_fn"]
for key in necessary_keys:
assert key in dispatch_mode, f"key {key} should be in dispatch_mode if it is a dictionary"
def _check_execute_mode(execute_mode):
assert isinstance(execute_mode, Execute), f"execute_mode must be a Execute. Got {execute_mode}"
def _materialize_futures(*args, **kwargs):
new_args = []
for arg in args:
if isinstance(arg, DataProtoFuture):
arg = arg.get()
# add more type to materialize
new_args.append(arg)
for k, v in kwargs.items():
if isinstance(v, DataProtoFuture):
kwargs[k] = v.get()
new_args = tuple(new_args)
return new_args, kwargs
def register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.ALL, blocking=True, materialize_futures=True):
"""Register a function with distributed execution configuration.
This decorator registers a function with specific dispatch and execution modes
for distributed computation. It handles both synchronous and asynchronous
functions, and optionally materializes futures before execution.
Args:
dispatch_mode:
Dispatch mode for computation distribution. Default: Dispatch.ALL_TO_ALL.
execute_mode:
Execute mode for computation distribution. Default: Execute.ALL.
blocking:
Whether the execution should be blocking. Defaults to True.
materialize_futures:
Whether to materialize the data before dispatching. Defaults to True.
Returns:
A decorator that wraps the original function with distributed execution
configuration.
"""
from verl.utils.transferqueue_utils import tqbridge
_check_dispatch_mode(dispatch_mode=dispatch_mode)
_check_execute_mode(execute_mode=execute_mode)
def decorator(func):
func = tqbridge(dispatch_mode=dispatch_mode)(func)
@wraps(func)
def inner(*args, **kwargs):
if materialize_futures:
args, kwargs = _materialize_futures(*args, **kwargs)
return func(*args, **kwargs)
@wraps(func)
async def async_inner(*args, **kwargs):
if materialize_futures:
args, kwargs = _materialize_futures(*args, **kwargs)
return await func(*args, **kwargs)
wrapper = async_inner if inspect.iscoroutinefunction(func) else inner
attrs = {"dispatch_mode": dispatch_mode, "execute_mode": execute_mode, "blocking": blocking}
setattr(wrapper, MAGIC_ATTR, attrs)
return wrapper
return decorator
|
verl__single_controller__base__worker.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
the class for Worker
"""
import os
import socket
import warnings
from dataclasses import dataclass
import ray
from verl.utils.device import (
get_torch_device,
get_visible_devices_keyword,
is_npu_available,
)
from .decorator import Dispatch, Execute, register
@dataclass
class DistRankInfo:
tp_rank: int
dp_rank: int
pp_rank: int
cp_rank: int
@dataclass
class DistGlobalInfo:
tp_size: int
dp_size: int
pp_size: int
cp_size: int
class WorkerHelper:
@staticmethod
def _get_node_ip():
if os.getenv("WG_BACKEND", None) == "ray":
return ray.util.get_node_ip_address()
else:
raise NotImplementedError("WG_BACKEND now just support ray mode.")
@staticmethod
def _get_free_port():
with socket.socket() as sock:
sock.bind(("", 0))
return sock.getsockname()[1]
def get_availale_master_addr_port(self):
warnings.warn(
"This function is deprecated due to typo in name; Please use `get_available_master_addr_port` instead",
stacklevel=2,
)
return self.get_available_master_addr_port()
def get_available_master_addr_port(self):
return self._get_node_ip().strip("[]"), str(self._get_free_port())
# we assume that in each WorkerGroup, there is a Master Worker
class Worker(WorkerHelper):
"""A distributed worker that handles initialization and configuration for distributed training.
This class manages worker initialization, configuration, and provides methods for executing
distributed operations. It handles communication settings, device configuration, and worker
metadata management.
"""
fused_worker_attr_name = "fused_worker_dict"
def _register_dispatch_collect_info(self, mesh_name: str, dp_rank: int, is_collect: bool):
"""Register the dp_rank for a given mesh name. This function is meant to be called by the worker
Args:
mesh_name (str):
Name of the mesh to register dp_rank for.
dp_rank (int):
dp_rank to register for the given mesh name.
is_collect (bool):
Whether the dp_rank is used for collect.
"""
if mesh_name in self.__dispatch_dp_rank or mesh_name in self.__collect_dp_rank:
raise ValueError(f"mesh_name {mesh_name} has been registered")
self.__dispatch_dp_rank[mesh_name] = dp_rank
self.__collect_dp_rank[mesh_name] = is_collect
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def _query_dispatch_info(self, mesh_name: str):
"""Query the dispatch info for a given mesh name.
Args:
mesh_name (str):
Name of the mesh to query dispatch info for.
Returns:
int:
The dp_rank for the given mesh name.
"""
assert mesh_name in self.__dispatch_dp_rank, f"{mesh_name} is not registered in {self.__class__.__name__}"
# note that each rank store its own dp_rank
return self.__dispatch_dp_rank[mesh_name]
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def _query_collect_info(self, mesh_name: str):
return self.query_collect_info(mesh_name)
def query_collect_info(self, mesh_name: str):
"""Query the collect info for a given mesh name.
Args:
mesh_name (str):
Name of the mesh to query collect info for.
Returns:
bool:
Whether the dp_rank is used for collect.
"""
assert mesh_name in self.__collect_dp_rank, f"{mesh_name} is not registered in {self.__class__.__name__}"
return self.__collect_dp_rank[mesh_name]
def get_dispatch_collect(self):
"""Get all registered dispatch and collect dp_ranks.
Returns:
dict[str, int]:
A dictionary mapping mesh names to their dispatch dp_ranks.
dict[str, bool]:
A dictionary mapping mesh names to whether they are used for collect.
"""
return {"dispatch_dp_rank": self.__dispatch_dp_rank, "collect_dp_rank": self.__collect_dp_rank}
def set_dispatch_collect(self, mesh_name: str, dispatch_dp_rank: dict[str, int], collect_dp_rank: dict[str, bool]):
"""Set the dispatch and collect dp_ranks for all registered meshes.
Args:
mesh_name (str): Mesh name to set dispatch and collect dp_ranks for.
dispatch_dp_rank (dict[str, int]):
A dictionary mapping mesh names to their dispatch dp_ranks.
collect_dp_rank (dict[str, bool]):
A dictionary mapping mesh names to whether they are used for collect.
"""
assert mesh_name not in self.__dispatch_dp_rank, (
f"{mesh_name} is already registered, {self.__dispatch_dp_rank.keys()}"
)
assert mesh_name not in self.__collect_dp_rank, (
f"{mesh_name} is already registered, {self.__collect_dp_rank.keys()}"
)
for dp_rank in dispatch_dp_rank.values():
self.__dispatch_dp_rank[mesh_name] = dp_rank
for is_collect in collect_dp_rank.values():
self.__collect_dp_rank[mesh_name] = is_collect
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=True)
def create_transferqueue_client(self, config):
from verl.utils.transferqueue_utils import create_transferqueue_client
create_transferqueue_client(
client_id=f"worker_{self.rank}",
config=config.transfer_queue,
)
@classmethod
def env_keys(cls):
"""The keys of the environment variables that are used to configure the Worker."""
return [
"WORLD_SIZE",
"RANK",
"LOCAL_WORLD_SIZE",
"LOCAL_RANK",
"MASTER_ADDR",
"MASTER_PORT",
get_visible_devices_keyword().upper(),
]
def __init__(self, cuda_visible_devices=None) -> None:
"""Initialize the worker with environment settings and device configuration.
Args:
cuda_visible_devices (str, optional):
CUDA visible devices configuration. Defaults to None.
"""
# construct a meta from environment variable. Note that the import must be inside the class because
# it is executed remotely
import os
self._setup_env_cuda_visible_devices()
world_size = int(os.environ["WORLD_SIZE"])
rank = int(os.environ["RANK"])
self._rank = rank
self._world_size = world_size
master_addr = os.environ["MASTER_ADDR"]
master_port = os.environ["MASTER_PORT"]
local_world_size = int(os.getenv("LOCAL_WORLD_SIZE", "1"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
store = {
"_world_size": world_size,
"_rank": rank,
"_local_world_size": local_world_size,
"_local_rank": local_rank,
"_master_addr": master_addr,
"_master_port": master_port,
}
if cuda_visible_devices is not None:
store[f"_{get_visible_devices_keyword()}".lower()] = cuda_visible_devices
self._configure_with_store(store=store)
self.fused_worker_dict = {}
self.__dispatch_dp_rank = {}
self.__collect_dp_rank = {}
def get_fused_worker_by_name(self, worker_name: str):
"""Get a fused worker by its name.
Args:
worker_name (str):
Name of the worker to retrieve
"""
return self.fused_worker_dict.get(worker_name, None)
def _setup_env_cuda_visible_devices(self):
from verl.utils.ray_utils import ray_noset_visible_devices
is_ray_noset_visible_devices = ray_noset_visible_devices()
# Prevent use of clashing `{CUDA/HIP/ROCR}_VISIBLE_DEVICES``
rocr_val = os.environ.get("ROCR_VISIBLE_DEVICES", None)
hip_val = os.environ.get("HIP_VISIBLE_DEVICES", None)
cuda_val = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if hip_val:
# Switch the use of HIP_VISIBLE_DEVICES to CUDA_VISIBLE_DEVICES for consistency.
# Make sure that the HIP_VISIBLE_DEVICES is set to the same value as CUDA_VISIBLE_DEVICES
# at this point.
val = os.environ.pop("HIP_VISIBLE_DEVICES")
hip_val = None
if cuda_val:
assert val == cuda_val, (
f"Please use the same HIP_VISIBLE_DEVICES or CUDA_VISIBLE_DEVICES, inconsistant values "
f"found: {val} and {cuda_val}."
)
else:
cuda_val = val
os.environ["CUDA_VISIBLE_DEVICES"] = val
# os.environ["HIP_VISIBLE_DEVICES"] = val
if rocr_val:
# You must take care if both HIP/CUDA and ROCR env vars are set as they have
# different meanings. Both env vars accept either a list of ints or a
# list of UUIDs. The ROCR env var is processed first which then reduces
# the number of GPUs that HIP can select from.
# https://github.com/pytorch/pytorch/pull/144026
# To avoid the complexity of this, we simply gives out error if both are set
# (Also to keep consistency with ray's practice with 2.45.0).
# Otherwise, we will set ROCR_VISIBLE_DEVICES to CUDA_VISIBLE_DEVICES
# and remove ROCR_VISIBLE_DEVICES.
if cuda_val:
raise ValueError("Please don't set ROCR_VISIBLE_DEVICES when HIP/CUDA_VISIBLE_DEVICES is set.")
cuda_val = os.environ.pop("ROCR_VISIBLE_DEVICES")
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_val
rocr_val = None
if is_ray_noset_visible_devices:
# NOTE: Ray will automatically set the *_VISIBLE_DEVICES
# environment variable for each actor, unless
# RAY_EXPERIMENTAL_NOSET_*_VISIBLE_DEVICES is set,
# so we need to set local rank when the flag is set.
device_name = "NPU" if is_npu_available else "GPU"
local_rank = ray.get_runtime_context().get_accelerator_ids()[device_name][0]
os.environ["LOCAL_RANK"] = local_rank
get_torch_device().set_device(int(local_rank))
def _configure_with_store(self, store: dict):
"""
This function should only be called inside by WorkerGroup
"""
store_env_dict = {f"_{key.lower()}": store.get(f"_{key.lower()}", None) for key in type(self).env_keys()}
self.__dict__.update(store_env_dict) # this is hacky
# print(f"__dict__: {self.__dict__}")
for key in type(self).env_keys():
val = self.__dict__.get(f"_{key.lower()}", None)
if val is not None:
# print(f"set {key} to {val}")
os.environ[key] = str(val)
os.environ["REDIS_STORE_SERVER_HOST"] = (
str(self._master_addr).replace("[", "").replace("]", "") if self._master_addr else ""
)
def get_master_addr_port(self):
"""Get the master address and port for distributed communication."""
return self._master_addr, self._master_port
def get_cuda_visible_devices(self):
"""Get the CUDA visible devices configuration."""
import os
visible_devices = os.environ.get(get_visible_devices_keyword().upper(), "not set")
return visible_devices
@property
def world_size(self):
"""Get the total number of workers in the distributed setup."""
return self._world_size
@property
def rank(self):
"""Get the rank of this worker in the distributed setup."""
return self._rank
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO_WITH_FUNC)
def execute_with_func_generator(self, func, *args, **kwargs):
"""Execute a function with function generator dispatch mode.
Args:
func:
Function to execute
*args:
Positional arguments for the function
**kwargs:
Keyword arguments for the function
"""
ret_proto = func(self, *args, **kwargs)
return ret_proto
@register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.RANK_ZERO)
def execute_func_rank_zero(self, func, *args, **kwargs):
"""Execute a function in rank zero execution mode.
Args:
func:
Function to execute
*args:
Positional arguments for the function
**kwargs:
Keyword arguments for the function
"""
result = func(*args, **kwargs)
return result
|
verl__single_controller__ray__base.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import os
import socket
from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Optional
import numpy as np
import ray
from ray.experimental.state.api import get_actor
from ray.util.placement_group import PlacementGroup, placement_group
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy, PlacementGroupSchedulingStrategy
from verl.protocol import DataProto, _padding_size_key
from verl.single_controller.base import ClassWithInitArgs, ResourcePool, Worker, WorkerGroup
from verl.single_controller.base.decorator import MAGIC_ATTR, Dispatch
from verl.utils.device import get_device_name
from verl.utils.py_functional import temp_env_var
__all__ = ["Worker"]
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_random_string(length: int) -> str:
import random
import string
letters_digits = string.ascii_letters + string.digits
return "".join(random.choice(letters_digits) for _ in range(length))
def func_generator(self, method_name, dispatch_fn, collect_fn, execute_fn, blocking):
class Functor:
def __call__(this, *args, **kwargs):
args, kwargs = dispatch_fn(self, *args, **kwargs)
padding_count = kwargs.pop(_padding_size_key, 0)
output = execute_fn(method_name, *args, **kwargs)
if blocking:
output = ray.get(output)
output = collect_fn(self, output)
if padding_count > 0:
if isinstance(output, DataProto):
indices = [i for i in range(len(output))][:-padding_count]
output = output.select_idxs(indices)
elif isinstance(output, list):
output = output[:-padding_count]
return output
# use class type to pass the method_name to get a better observability
return type(method_name, (Functor,), {})()
def sort_placement_group_by_node_ip(pgs: list[PlacementGroup]) -> list[PlacementGroup]:
"""
Sort the placement groups by node ip, all bundles in a single placement group should be on the same node.
FSDPCheckpointManager saves sharded model states and optimizer states in local storage, which requires RANK
to be consistent across nodes when resume from checkpoint.
With this function, if there's only one resource pool and there's no node change, RANK should be consistent
across nodes in multiple ray jobs, even if the whole ray cluster is restarted.
"""
node_ip = {node["NodeID"]: node["NodeManagerAddress"] for node in ray.nodes()}
pg_ip = {}
for pg in pgs:
specs = ray._private.state.state.placement_group_table(pg.id)
# all bunles should be on the same node
node_id = specs["bundles_to_node_id"][0]
pg_ip[pg.id] = node_ip[node_id]
return sorted(pgs, key=lambda pg: pg_ip[pg.id])
@ray.remote
def get_master_addr_port(master_port_range: Optional[list[int]] = None) -> tuple[str, str]:
addr = ray.util.get_node_ip_address().strip("[]")
if master_port_range is None:
with socket.socket() as s:
s.bind(("", 0))
port = s.getsockname()[1]
else:
port = master_port_range[0]
while port < master_port_range[1]:
try:
with socket.socket() as s:
s.bind(("", port))
break
except OSError:
port += 1 # Increment port number if already in use
logger.info("Port %d is already in use, trying port %d", port - 1, port)
else:
raise RuntimeError(f"Could not find a free port in range {master_port_range}")
return addr, str(port)
class RayResourcePool(ResourcePool):
def __init__(
self,
process_on_nodes: Optional[list[int]] = None,
use_gpu: bool = True,
name_prefix: str = None,
max_colocate_count: int = 10,
detached=False,
accelerator_type: Optional[str] = None,
) -> None:
super().__init__(process_on_nodes, max_colocate_count)
self.use_gpu = use_gpu
# print(f"in RayProcessDispatchConfiguration: name_prefix = {name_prefix}")
self.name_prefix = get_random_string(length=6) if name_prefix is None else name_prefix
self.pgs = None
self.detached = detached
self.accelerator_type = accelerator_type
def get_placement_groups(self, strategy="STRICT_PACK", name=None, device_name="cuda"):
if self.pgs is not None:
return self.pgs
pg_name_prefix = (
name if name else f"{self.name_prefix}verl_group_{'_'.join([str(count) for count in self._store])}:"
)
# print(f"pg_name_prefix = {pg_name_prefix}")
if device_name == "npu":
device_name = "NPU"
elif device_name == "cuda":
device_name = "GPU"
bundle = {"CPU": self.max_colocate_count}
if self.use_gpu:
bundle[device_name] = 1
if self.accelerator_type is not None:
bundle[self.accelerator_type] = 1e-4
pg_scheme = [[bundle.copy() for _ in range(process_count)] for process_count in self._store]
lifetime = "detached" if self.detached else None
pgs = [
placement_group(bundles=bundles, strategy=strategy, name=pg_name_prefix + str(idx), lifetime=lifetime)
for idx, bundles in enumerate(pg_scheme)
]
ray.get([pg.ready() for pg in pgs])
self.pgs = sort_placement_group_by_node_ip(pgs)
return pgs
class SubRayResourcePool(RayResourcePool):
def __init__(
self,
placement_groups: list[PlacementGroup],
start_bundle_index: int,
subgroup_world_size: int,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pgs = placement_groups
self.start_bundle_index = start_bundle_index
self.subgroup_world_size = subgroup_world_size
@property
def world_size(self):
return self.subgroup_world_size
@dataclass
class ResourcePoolManager:
"""
Define a resource pool specification. Resource pool will be initialized first.
"""
resource_pool_spec: dict[str, list[int]]
mapping: dict[int, str]
resource_pool_dict: dict[str, RayResourcePool] = field(default_factory=dict)
def create_resource_pool(self):
"""Create Ray resource pools for distributed training.
Initializes resource pools based on the resource pool specification,
with each pool managing GPU resources across multiple nodes.
For FSDP backend, uses max_colocate_count=1 to merge WorkerGroups.
For Megatron backend, uses max_colocate_count>1 for different models.
"""
for resource_pool_name, process_on_nodes in self.resource_pool_spec.items():
# max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool
# For FSDP backend, using max_colocate_count=3: actor_critic_ref, rollout, reward model (optional)
# For Megatron backend, we recommend using max_colocate_count>1
# that can utilize different WorkerGroup for differnt models
resource_pool = RayResourcePool(
process_on_nodes=process_on_nodes, use_gpu=True, max_colocate_count=3, name_prefix=resource_pool_name
)
self.resource_pool_dict[resource_pool_name] = resource_pool
self._check_resource_available()
def get_resource_pool(self, role) -> RayResourcePool:
"""Get the resource pool of the worker_cls"""
return self.resource_pool_dict[self.mapping[role]]
def get_n_gpus(self) -> int:
"""Get the number of gpus in this cluster."""
return sum([n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes])
def _check_resource_available(self):
"""Check if the resource pool can be satisfied in this ray cluster."""
node_available_resources = ray._private.state.available_resources_per_node()
node_available_gpus = {
node: node_info.get("GPU", 0) if "GPU" in node_info else node_info.get("NPU", 0)
for node, node_info in node_available_resources.items()
}
# check total required gpus can be satisfied
total_available_gpus = sum(node_available_gpus.values())
total_required_gpus = sum(
[n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes]
)
if total_available_gpus < total_required_gpus:
raise ValueError(
f"Total available GPUs {total_available_gpus} is less than total desired GPUs {total_required_gpus}"
)
def extract_pg_from_exist(
resource_pools: dict[str, RayResourcePool], src_role_names: list[str], resource_pool: RayResourcePool
) -> list:
src_pgs = [
pg
for role_name, resource_pool in resource_pools.items()
for pg in resource_pool.get_placement_groups()
if role_name in src_role_names
]
sorted_src_pgs = sorted(src_pgs, key=lambda pg: pg.bundle_count, reverse=True)
sorted_process_on_nodes = sorted([(val, idx) for idx, val in enumerate(resource_pool.store)], reverse=True)
unsorted_pgs: list[tuple[int, PlacementGroup]] = []
searching_idx = 0
for request_process, original_idx in sorted_process_on_nodes:
assert searching_idx < len(sorted_src_pgs), f"no enough nodes for request: searching {searching_idx} th node"
assert request_process <= sorted_src_pgs[searching_idx].bundle_count, (
f"requesting {request_process} processes, bundle count cannot satisfy"
)
unsorted_pgs.append((original_idx, sorted_src_pgs[searching_idx]))
searching_idx += 1
return [pg for _, pg in sorted(unsorted_pgs)]
# split a RayResourcePool or SubRayResourcePool into multiple SubRayResourcePool
def split_resource_pool(
resource_pool: RayResourcePool | SubRayResourcePool, split_size: int | list[int]
) -> list[SubRayResourcePool]:
"""
Split a RayResourcePool into multiple SubRayResourcePool.
resouce_pool can also be a SubRayResourcePool (have been splited) for multiple-time spliting.
Args:
resource_pool (RayResourcePool | SubRayResourcePool): The resource pool to split.
split_size (int | list[int]): The size of each split. If int, all splits will have the same size.
If list[int], each element in the list represents the size of a split.
Returns:
list[SubRayResourcePool]: A list of SubRayResourcePool after splitting.
"""
# convert split_size to list[int]
if isinstance(split_size, int):
assert resource_pool.world_size % split_size == 0, "split_size must be a divisor of world_size"
num_replica = resource_pool.world_size // split_size
split_size_list = [split_size] * num_replica
else:
split_size_list = split_size
assert sum(split_size_list) == resource_pool.world_size, "split_size must sum up to world_size"
# judge if this resource pool has been splited
if isinstance(resource_pool, SubRayResourcePool):
start_bundle_idx_list = np.cumsum([resource_pool.start_bundle_index] + split_size_list[:-1])
else:
start_bundle_idx_list = np.cumsum([0] + split_size_list[:-1])
# ensure resource_pool.pgs has been initialized
placement_groups = resource_pool.get_placement_groups()
split_resource_pools = [
SubRayResourcePool(
process_on_nodes=resource_pool.store,
use_gpu=resource_pool.use_gpu,
name_prefix=f"{resource_pool.name_prefix}_split_{split_idx}",
max_colocate_count=resource_pool.max_colocate_count,
placement_groups=placement_groups,
start_bundle_index=start_bundle_idx_list[split_idx],
subgroup_world_size=split_size_list[split_idx],
)
for split_idx in range(len(split_size_list))
]
return split_resource_pools
def merge_resource_pool(rp1: RayResourcePool, rp2: RayResourcePool) -> RayResourcePool:
assert rp1.use_gpu == rp2.use_gpu, "Both RayResourcePool must either use_gpu or not"
assert rp1.max_colocate_count == rp2.max_colocate_count, "Both RayResourcePool must has the same max_colocate_count"
assert rp1.n_gpus_per_node == rp2.n_gpus_per_node, "Both RayResourcePool must has the same n_gpus_per_node"
assert rp1.detached == rp2.detached, "Detached ResourcePool cannot be merged with non-detached ResourcePool"
new_store = rp1.store + rp2.store
merged = type(rp1)(
new_store, rp1.use_gpu, f"{rp1.name_prefix}_{rp2.name_prefix}", rp1.max_colocate_count, rp1.detached
)
merged.pgs = rp1.get_placement_groups(device_name=get_device_name()) + rp2.get_placement_groups(
device_name=get_device_name()
)
return merged
class RayClassWithInitArgs(ClassWithInitArgs):
"""A wrapper class for Ray actors with initialization arguments.
This class extends ClassWithInitArgs to provide additional functionality for
configuring and creating Ray actors with specific resource requirements and
scheduling strategies.
"""
def __init__(self, cls, *args, **kwargs) -> None:
# self._options = kwargs.pop('options', dict())
super().__init__(cls, *args, **kwargs)
self._options = {}
self._additional_resource = {}
def set_additional_resource(self, additional_resource):
"""Set additional resource requirements for the actor.
Args:
additional_resource: Dictionary specifying additional resource requirements
"""
self._additional_resource = additional_resource
def update_options(self, options: dict):
"""Update the Ray actor creation options.
Args:
options: Dictionary of options to update
"""
self._options.update(options)
def __call__(
self,
placement_group,
placement_group_bundle_idx,
use_gpu: bool = True,
num_gpus=1,
sharing_with=None,
device_name="cuda",
) -> Any:
"""Create and return a Ray actor with the configured options.
Args:
placement_group: Ray placement group for scheduling
placement_group_bundle_idx: Index of the bundle in the placement group
use_gpu: Whether to use GPU resources
num_gpus: Number of GPUs to allocate
sharing_with: Actor to share resources with
device_name: Device for training
Returns:
A Ray actor handle with the configured options
"""
if sharing_with is not None:
target_node_id = ray.get(sharing_with.get_node_id.remote())
visible_devices = ray.get(sharing_with.get_cuda_visible_devices.remote())
options = {"scheduling_strategy": NodeAffinitySchedulingStrategy(node_id=target_node_id, soft=False)}
return self.cls.options(**options).remote(*self.args, cuda_visible_devices=visible_devices, **self.kwargs)
options = {
"scheduling_strategy": PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=placement_group_bundle_idx
)
}
options.update(self._options)
if use_gpu and device_name == "cuda":
options["num_gpus"] = num_gpus
if use_gpu and device_name == "npu":
options["resources"] = {"NPU": num_gpus}
if len(self._additional_resource) > 1:
for k, v in self._additional_resource.items():
options[k] = v
# print("cls:", self.cls)
# print("args: ", self.args)
# print("kwargs: ", self.kwargs)
return self.cls.options(**options).remote(*self.args, **self.kwargs)
class RayWorkerGroup(WorkerGroup):
"""A group of Ray workers that can be managed collectively.
This class extends WorkerGroup to provide Ray-specific functionality for
creating and managing groups of Ray actors with specific resource requirements
and scheduling strategies.
"""
def __init__(
self,
resource_pool: RayResourcePool = None,
ray_cls_with_init: RayClassWithInitArgs = None,
bin_pack: bool = True,
name_prefix: str = None,
detached=False,
worker_names=None,
worker_handles: list[ray.actor.ActorHandle] = None,
ray_wait_register_center_timeout: int = 300,
**kwargs,
) -> None:
"""Initialize a RayWorkerGroup.
Args:
resource_pool: Resource pool for worker allocation
ray_cls_with_init: Class with initialization arguments for workers
bin_pack: Whether to use strict bin packing for resource allocation
name_prefix: Prefix for worker names
detached: Whether workers should be detached
worker_names: Names of existing workers to attach to
ray_wait_register_center_timeout: Timeout for waiting on register center
**kwargs: Additional keyword arguments
"""
self._master_addr = kwargs.pop("master_addr", None)
self._master_port = kwargs.pop("master_port", None)
self.use_gpu = kwargs.pop("use_gpu", resource_pool.use_gpu if resource_pool is not None else True)
self._ray_master_port_range = kwargs.pop("master_port_range", None)
super().__init__(resource_pool=resource_pool, **kwargs)
self.ray_cls_with_init = ray_cls_with_init
self.name_prefix = get_random_string(length=6) if name_prefix is None else name_prefix
self._ray_wait_register_center_timeout = ray_wait_register_center_timeout
# Whether the WorkerGroup is a Colocate WorkerGroup created by FusedWorker.
self.fused_worker_used = False if ray_cls_with_init is None else ray_cls_with_init.fused_worker_used
# if a WorkerGroup is spawned from Colocate WorkerGroup, this indicates which sub-class is binded to
# this WorkerGroup.
self.sub_cls_name = ""
self.device_name = kwargs.get("device_name", "cuda")
self.profile_steps = kwargs.get("profile_steps", None)
self.worker_nsight_options = kwargs.get("worker_nsight_options", None)
self.customized_worker_env = kwargs.get("worker_env", {})
if self.worker_nsight_options is not None and self.worker_nsight_options["capture-range-end"] is None:
self.worker_nsight_options["capture-range-end"] = f"repeat-shutdown:{6 * len(self.profile_steps)}"
if worker_names is not None and (not self.fused_worker_used):
assert self._is_init_with_detached_workers
self._worker_names = worker_names
if self._is_init_with_detached_workers:
self._init_with_detached_workers(worker_names=worker_names, worker_handles=worker_handles)
elif isinstance(resource_pool, SubRayResourcePool):
self._init_with_subresource_pool(
resource_pool=resource_pool,
ray_cls_with_init=ray_cls_with_init,
bin_pack=bin_pack,
detached=detached,
worker_env=self.customized_worker_env,
)
else:
self._init_with_resource_pool(
resource_pool=resource_pool,
ray_cls_with_init=ray_cls_with_init,
bin_pack=bin_pack,
detached=detached,
worker_env=self.customized_worker_env,
)
if ray_cls_with_init is not None:
self._bind_worker_method(self.ray_cls_with_init.cls, func_generator)
self.wg_dict = None
self.method_names = []
def _is_worker_alive(self, worker: ray.actor.ActorHandle):
"""Check if a worker actor is still alive.
Args:
worker: Ray actor handle to check
Returns:
bool: True if the worker is alive, False otherwise
"""
worker_state_dict = get_actor(worker._actor_id.hex())
return worker_state_dict.get("state", "undefined") == "ALIVE" if worker_state_dict is not None else False
def _init_with_detached_workers(self, worker_names, worker_handles):
# ray.get_actor holds a weak reference to the actor, which causes actors garbage collected unexpectedly
# if we only hold spawn RayWorkerGroup. By passing actor handle explicitly, spawn RayWorkerGroup have
# strong reference to these actors.
# https://github.com/ray-project/ray/pull/45699
workers = worker_handles if worker_handles else [ray.get_actor(name=name) for name in worker_names]
self._workers = workers
self._world_size = len(workers)
def _get_master_addr_port(self, pg, bundle_index=0, master_port_range=None):
"""Get master addr and port for this worker group"""
if self._master_addr is None and self._master_port is None:
self._master_addr, self._master_port = ray.get(
get_master_addr_port.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg, placement_group_bundle_index=bundle_index
),
).remote(master_port_range=master_port_range)
)
elif self._master_addr is not None and self._master_port is not None:
logger.debug(f"{self._master_addr=} {self._master_port=}")
else:
raise ValueError(
"Both 'master_addr' and 'master_port' must be provided if you intend to manually specify them, "
"or neither should be provided to use Ray's default assignment."
)
def _init_with_resource_pool(
self,
resource_pool,
ray_cls_with_init,
bin_pack,
detached,
worker_env=None,
):
"""Initialize the worker group by creating new workers from a resource pool.
Args:
resource_pool: Resource pool for worker allocation
ray_cls_with_init: Class with initialization arguments for workers
bin_pack: Whether to use strict bin packing for resource allocation
detached: Whether workers should be detached
"""
self.resource_pool = resource_pool
strategy = "PACK"
if bin_pack:
strategy = "STRICT_PACK"
pgs = resource_pool.get_placement_groups(strategy=strategy, device_name=self.device_name)
world_size = resource_pool.world_size
self._world_size = world_size
# cia.add_kwarg("_world_size", world_size)
rank = -1
local_world_size = resource_pool.store[0]
for pg_idx, pg in enumerate(sort_placement_group_by_node_ip(pgs)):
assert local_world_size <= pg.bundle_count, f"when generating for {self.name_prefix}, for the "
if pg_idx == 0:
self._get_master_addr_port(pg, bundle_index=0, master_port_range=self._ray_master_port_range)
for local_rank in range(local_world_size):
rank += 1
self._create_worker(
rank=rank,
pg_idx=pg_idx,
pg=pg,
local_rank=local_rank,
resource_pool=resource_pool,
ray_cls_with_init=ray_cls_with_init,
worker_env=worker_env,
detached=detached,
)
def _init_with_subresource_pool(self, resource_pool, ray_cls_with_init, bin_pack, detached, worker_env=None):
"""Initialize the worker group by creating new workers from a resource pool or sub resource pool.
Args:
resource_pool: Resource pool for worker allocation
ray_cls_with_init: Class with initialization arguments for workers
bin_pack: Whether to use strict bin packing for resource allocation
detached: Whether workers should be detached
"""
strategy = "PACK"
if bin_pack:
strategy = "STRICT_PACK"
pgs = resource_pool.get_placement_groups(strategy=strategy, device_name=self.device_name)
world_size = resource_pool.world_size
self._world_size = world_size
rank = -1
local_world_size = resource_pool.store[0]
self._get_master_addr_port(
pgs[resource_pool.start_bundle_index // local_world_size],
bundle_index=resource_pool.start_bundle_index % local_world_size,
master_port_range=self._ray_master_port_range,
)
for curr_rank in range(resource_pool.start_bundle_index, resource_pool.start_bundle_index + world_size):
pg_idx = curr_rank // local_world_size
pg = pgs[pg_idx]
local_rank = curr_rank % local_world_size
assert local_world_size <= pg.bundle_count, f"when generating for {self.name_prefix}, for the "
rank += 1
self._create_worker(
rank=rank,
pg_idx=pg_idx,
pg=pg,
local_rank=local_rank,
resource_pool=resource_pool,
ray_cls_with_init=ray_cls_with_init,
worker_env=worker_env,
detached=detached,
)
def _create_worker(self, rank, pg_idx, pg, local_rank, resource_pool, ray_cls_with_init, worker_env, detached):
world_size = resource_pool.world_size
use_gpu = resource_pool.use_gpu
if self.use_gpu and not use_gpu:
raise ValueError("use_gpu is True but resource_pool.use_gpu is False")
local_world_size = resource_pool.store[0]
num_gpus = 1 / resource_pool.max_colocate_count
# we pass in environment variable at option so that Worker can use environment variable to set
env_vars = {
"WORLD_SIZE": str(world_size),
"RANK": str(rank),
"WG_PREFIX": self.name_prefix,
"WG_BACKEND": "ray",
"RAY_LOCAL_WORLD_SIZE": str(local_world_size),
"MASTER_ADDR": self._master_addr,
"MASTER_PORT": self._master_port,
}
if worker_env is not None:
logging.debug(f"Appending ray class env, origin: {env_vars}, customized env: {worker_env}")
conflict_env_vars = set(env_vars.keys()) & set(worker_env.keys())
if len(conflict_env_vars) > 0:
logging.error(
f"User customized env vars conflict with system env: {conflict_env_vars} "
f"Overriding may cause unexpected behavior."
)
raise ValueError(f"Cannot override protected system env: {conflict_env_vars}")
env_vars.update(worker_env)
import re
cia_name = type(ray_cls_with_init.cls).__name__
match = re.search(r"ActorClass\(([^)]+)\)", cia_name) # ray.remote(Obj) -> "ActorClass(Obj)"
cia_name = match.group(1) if match else cia_name # "ActorClass(Obj)" -> "Obj"
name = f"{self.name_prefix}{cia_name}_{pg_idx}:{local_rank}" # e.g. Worker_2:5
if self.profile_steps and self.device_name == "cuda":
ray_cls_with_init.update_options(
{
"runtime_env": {
"env_vars": env_vars,
"nsight": self.worker_nsight_options,
},
"name": name,
}
)
else:
ray_cls_with_init.update_options({"runtime_env": {"env_vars": env_vars}, "name": name})
if detached:
ray_cls_with_init.update_options({"lifetime": "detached"})
# create a worker
worker = ray_cls_with_init(
placement_group=pg,
placement_group_bundle_idx=local_rank,
use_gpu=self.use_gpu,
num_gpus=num_gpus,
device_name=self.device_name,
)
self._workers.append(worker)
self._worker_names.append(name)
@property
def worker_names(self):
return self._worker_names
@classmethod
def from_detached(
cls,
name_prefix=None,
worker_names=None,
worker_handles=None,
ray_cls_with_init=None,
**kwargs,
):
"""Create a worker group from existing detached workers.
Args:
name_prefix: Prefix for worker names
worker_names: Names of existing workers to attach to
ray_cls_with_init: Class with initialization arguments for workers
Returns:
A new RayWorkerGroup instance
"""
worker_group = cls(
resource_pool=None,
ray_cls_with_init=ray_cls_with_init,
name_prefix=name_prefix,
worker_names=worker_names,
worker_handles=worker_handles,
**kwargs,
)
return worker_group
def spawn(self, prefix_set):
"""Spawn to a dictionary of worker groups, each with a subset of method with prefix.
Args:
prefix_set: Set of prefixes to create worker groups for
Returns:
Dictionary of worker groups keyed by prefix
"""
if self.fused_worker_used:
return self.spawn_fused(prefix_set)
def _rebind_actor_methods(worker_group, actor_name):
prefix: str = actor_name + "_"
for method_name in dir(worker_group):
if method_name.startswith(prefix):
original_method_name = method_name.removeprefix(prefix)
method = getattr(worker_group, method_name)
setattr(worker_group, original_method_name, method)
new_worker_group_dict = {}
for prefix in prefix_set:
new_worker_group = self.from_detached(
name_prefix=self.name_prefix,
worker_names=self._worker_names,
worker_handles=self._workers,
ray_cls_with_init=self.ray_cls_with_init,
profile_steps=self.profile_steps,
worker_nsight_options=self.worker_nsight_options,
)
_rebind_actor_methods(new_worker_group, prefix)
new_worker_group_dict[prefix] = new_worker_group
return new_worker_group_dict
def spawn_fused(self, prefix_set):
"""Create a dictionary of worker groups for fused workers.
Args:
prefix_set: Set of prefixes to create worker groups for
Returns:
Dictionary of worker groups keyed by prefix
"""
wg_dict = dict()
for key in prefix_set:
new_wg = deepcopy(self)
new_wg._bind_worker_method(self.ray_cls_with_init.cls.raw_cls_dict[key], func_generator)
new_wg.sub_cls_name = key
wg_dict[key] = new_wg
return wg_dict
def fuse(self, prefix_set):
"""Fuse multiple worker groups into the current worker group.
Args:
prefix_set: Set of prefixes to fuse into the worker group
"""
if self.wg_dict is None:
self.wg_dict = self.spawn(prefix_set)
for role_name, role_wg in self.wg_dict.items():
setattr(self, role_name, role_wg)
self.method_names = self._bind_worker_method(self.ray_cls_with_init.cls, func_generator)
def _execute_remote_single_worker(self, worker, method_name: str, *args, **kwargs):
"""Execute a method on a single worker remotely.
Args:
worker: The worker actor handle
method_name: Name of the method to execute
*args: Positional arguments for the method
**kwargs: Keyword arguments for the method
Returns:
Remote object reference to the method execution
"""
if self.fused_worker_used and method_name not in self.method_names:
remote_call = getattr(worker, self.fused_worker_execute_fn_name)
return remote_call.remote(f"{self.sub_cls_name}_fwmn_{method_name}", *args, **kwargs)
# fused worker not used
remote_call = getattr(worker, method_name)
return remote_call.remote(*args, **kwargs)
def execute_rank_zero_sync(self, method_name: str, *args, **kwargs):
"""Execute a method on rank zero worker synchronously.
Args:
method_name: Name of the method to execute
*args: Positional arguments for the method
**kwargs: Keyword arguments for the method
Returns:
Result of the method execution
"""
return ray.get(self.execute_rank_zero_async(method_name, *args, **kwargs))
def execute_rank_zero_async(self, method_name: str, *args, **kwargs):
"""Execute a method on rank zero worker asynchronously.
Args:
method_name: Name of the method to execute
*args: Positional arguments for the method
**kwargs: Keyword arguments for the method
Returns:
Remote object reference to the method execution
"""
return self._execute_remote_single_worker(self._workers[0], method_name, *args, **kwargs)
def execute_rank_zero(self, method_name: str, *args, **kwargs):
"""Alias for execute_rank_zero_async.
Args:
method_name: Name of the method to execute
*args: Positional arguments for the method
**kwargs: Keyword arguments for the method
Returns:
Remote object reference to the method execution
"""
return self.execute_rank_zero_async(method_name, *args, **kwargs)
def execute_all(self, method_name: str, *args, **kwargs):
"""Alias for execute_all_async.
Args:
method_name: Name of the method to execute
*args: Positional arguments for the method
**kwargs: Keyword arguments for the method
Returns:
List of remote object references to the method executions
"""
return self.execute_all_async(method_name, *args, **kwargs)
def execute_all_sync(self, method_name: str, *args, **kwargs):
"""Execute a method on all workers synchronously.
Args:
method_name: Name of the method to execute
*args: Positional arguments for the method
**kwargs: Keyword arguments for the method
Returns:
List of results from all workers
"""
return ray.get(self.execute_all_async(method_name, *args, **kwargs))
def execute_all_async(self, method_name: str, *args, **kwargs):
"""Execute a method on all workers asynchronously.
Args:
method_name: Name of the method to execute
*args: Positional arguments for the method
**kwargs: Keyword arguments for the method
Returns:
List of remote object references to the method executions
"""
# Here, we assume that if all arguments in args and kwargs are lists,
# and their lengths match len(self._workers), we'll distribute each
# element in these lists to the corresponding worker
# print(f"execute_all_async: method {method_name}({args}, {kwargs})")
length = len(self._workers)
if all(isinstance(arg, list) for arg in args) and all(isinstance(kwarg, list) for kwarg in kwargs.values()):
if all(len(arg) == length for arg in args) and all(len(kwarg) == length for kwarg in kwargs.values()):
# print(f"splitting args and kwargs into {length} shards")
result = []
for i in range(length):
sliced_args = tuple(arg[i] for arg in args)
sliced_kwargs = {k: v[i] for k, v in kwargs.items()}
result.append(
self._execute_remote_single_worker(self._workers[i], method_name, *sliced_args, **sliced_kwargs)
)
return result
return [self._execute_remote_single_worker(worker, method_name, *args, **kwargs) for worker in self._workers]
@property
def master_address(self):
return self._master_addr
@property
def master_port(self):
return self._master_port
@property
def workers(self):
return self._workers
@property
def world_size(self):
return self._world_size
"""
Utilities that enables creating workers inside the same ray.Actor,
with code written in separate ray.Actors.
"""
# deprecated, switching to FusedWorker
def _bind_workers_method_to_parent(cls, key, user_defined_cls):
"""
Binds the methods of each worker to the WorkerDict.
Note that we only bind public methods that are decorated by register
"""
for method_name in dir(user_defined_cls):
try:
method = getattr(user_defined_cls, method_name)
assert callable(method), f"{method_name} in {user_defined_cls} is not callable"
except Exception:
# if it is a property, it will fail because Class doesn't have instance property
continue
if hasattr(method, MAGIC_ATTR):
def generate_function(name, key=key):
def func(self, *args, **kwargs):
# dispatch to the actual worker
return getattr(self.worker_dict[key], name)(*args, **kwargs)
async def async_func(self, *args, **kwargs):
# dispatch to the actual worker
return await getattr(self.worker_dict[key], name)(*args, **kwargs)
wrapper = async_func if inspect.iscoroutinefunction(method) else func # noqa: B023
return wrapper
func = generate_function(method_name)
# pass MAGIC_ATTR for outer worker group
attrs = getattr(method, MAGIC_ATTR)
setattr(func, MAGIC_ATTR, attrs)
try:
# bind direct rollout method to class without prefix
if attrs["dispatch_mode"] == Dispatch.DIRECT_ROLLOUT_METHOD and "rollout" in key:
assert not hasattr(cls, method_name), (
f"conflict direct rollout method {method_name} with role {key}"
)
setattr(cls, method_name, func)
print(f"bind role {key} method {method_name} to class {cls}")
else:
method_name_with_prefix = key + "_" + method_name
setattr(cls, method_name_with_prefix, func)
except Exception as e:
raise ValueError(f"Fail to set method_name {method_name}") from e
def _unwrap_ray_remote(cls):
if hasattr(cls, "__ray_actor_class__"):
cls = cls.__ray_actor_class__
return cls
def _determine_fsdp_megatron_base_class(mros: list):
"""
- megatron: base class should be MegatronWorker
- fsdp: base class should be Worker
"""
for cls in mros[0]:
if cls.__name__ == "MegatronWorker":
return cls
if cls.__name__ == "Worker":
return cls
raise ValueError(f"Cannot determine base class for {mros}")
# deprecated, switching to FusedWorker
def create_colocated_worker_cls(class_dict: dict[str, RayClassWithInitArgs]):
"""
This function should return a class instance that delegates the calls to every
cls in cls_dict
"""
cls_dict = {}
init_args_dict = {}
worker_cls = _determine_fsdp_megatron_base_class(
[cls.cls.__ray_actor_class__.__mro__ for cls in class_dict.values()]
)
assert issubclass(worker_cls, Worker), f"worker_cls {worker_cls} should be a subclass of Worker"
print(f"colocated worker base class {worker_cls}")
for key, cls in class_dict.items():
cls_dict[key] = cls.cls
init_args_dict[key] = {"args": cls.args, "kwargs": cls.kwargs}
assert cls_dict.keys() == init_args_dict.keys()
# TODO: create a class with customizable name
class WorkerDict(worker_cls):
def __init__(self):
super().__init__()
self.worker_dict = {}
for key, user_defined_cls in cls_dict.items():
user_defined_cls = _unwrap_ray_remote(user_defined_cls)
# directly instantiate the class without remote
# in worker class, e.g. <verl.single_controller.base.worker.Worker>
# when DISABLE_WORKER_INIT == 1 it will return immediately
with temp_env_var("DISABLE_WORKER_INIT", "1"):
self.worker_dict[key] = user_defined_cls(
*init_args_dict[key].get("args", ()), **init_args_dict[key].get("kwargs", {})
)
# now monkey-patch the methods from inner class to WorkerDict
for key, user_defined_cls in cls_dict.items():
user_defined_cls = _unwrap_ray_remote(user_defined_cls)
_bind_workers_method_to_parent(WorkerDict, key, user_defined_cls)
remote_cls = ray.remote(WorkerDict)
remote_cls = RayClassWithInitArgs(cls=remote_cls)
return remote_cls
FusedWorkerCLSName = "FusedWorker"
def create_colocated_worker_raw_cls(class_dict: dict[str, RayClassWithInitArgs]):
"""
This function returns a FusedWorker class.
`FusedWorker.{class_name}` -> FusedClass
Use `class_name` as a param to directly access the underlying class.
`FusedWorker._fuw_execute("{class_name}_fwmn_{method_name}", *args, **kwargs)`
First param must be "{class_name}_fwmn_{method_name}" in order to access `method_name`
of underlying class `{class_name}`.
`FusedWorker.fused_worker_dict` -> {"class_name": FusedClass}
Stores all underlying classes.
`FusedClass.fused_worker_dict` -> {"class_name": FusedClass}
The same as `FusedWorker.fused_worker_dict`, enables underlying class to access other
underlying classes.
"""
raw_cls_dict = {cls_name: _unwrap_ray_remote(cia.cls) for cls_name, cia in class_dict.items()}
init_args_dict = {cls_name: cia.args for cls_name, cia in class_dict.items()}
init_kwargs_dict = {cls_name: cia.kwargs for cls_name, cia in class_dict.items()}
cls_names = list(class_dict.keys())
# FusedWorker_Actor_Critic
class_name_renamed = "_".join([FusedWorkerCLSName] + cls_names)
class FusedWorker(Worker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cls_names = cls_names
self.raw_cls_dict = raw_cls_dict
self.init_args_dict = init_args_dict
self.init_kwargs_dict = init_kwargs_dict
for cls_name, udc, ud_args, ud_kwargs in zip(
self.cls_names,
self.raw_cls_dict.values(),
self.init_args_dict.values(),
self.init_kwargs_dict.values(),
strict=True,
):
with temp_env_var("DISABLE_WORKER_INIT", "1"):
udc._get_ray_actor_cls_name = lambda x, name_renamed=class_name_renamed: name_renamed
udc._get_ray_method_prefix = lambda x, name_prefixed=cls_name: f"{name_prefixed}_"
# cls_name = "actor", "critic", udc = ActorWorker, CriticWorker
self.fused_worker_dict[cls_name] = udc(*ud_args, **ud_kwargs)
setattr(self, cls_name, self.fused_worker_dict[cls_name])
# injecting fused_worker to each sub worker so they can be aware of existence of each other
for _, worker in self.fused_worker_dict.items():
setattr(worker, Worker.fused_worker_attr_name, self.fused_worker_dict)
def _fuw_execute(self, method_name: str, *args, **kwargs):
# for fused_worker, method_name is in a form of "{cls_name}_fwmn_{method_name}"
# where fwmn stands "fused worker method name"
names = method_name.split("_fwmn_")
cls_name = names[0]
method_name = names[1]
assert cls_name in self.fused_worker_dict, (
f"calling {cls_name}'s {method_name}, but {cls_name} not in fused_worker_dict"
)
udc_method = getattr(self.fused_worker_dict[cls_name], method_name)
return udc_method(*args, **kwargs)
renamed_fused_worker_cls = type(class_name_renamed, (FusedWorker,), {})
renamed_fused_worker_cls.is_fused_worker = True
renamed_fused_worker_cls.raw_cls_dict = raw_cls_dict
return renamed_fused_worker_cls
def create_colocated_worker_cls_fused(class_dict: dict[str, RayClassWithInitArgs]):
"""
This function returns a RayClassWithInitArgs instance of FusedWorker, which is an replacement
of `create_colocated_worker_cls`. WorkerGroup constructed using this class will be a colocated
WorkerGroup, which will be referenced as `ColocateWorkerGroup` below.
`ColocateWorkerGroup.spawn(prefix_set)`
returns a dict of WorkerGroup {"class_name": WorkerGroup}, WorkerGroup in this dict will
have methods of underlying class `class_name` attached.
`ColocateWorkerGroup.fuse(prefix_set)`
After executing this function, `ColocateWorkerGroup.{class_name}` will return WorkerGroup
with methods of underlying class `class_name` attached.
"""
raw_colocated_worker_cls = create_colocated_worker_raw_cls(class_dict)
remote_cls = ray.remote(raw_colocated_worker_cls)
cia = RayClassWithInitArgs(cls=remote_cls)
cia.fused_worker_used = True
return cia
|
verl__trainer__config__config.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from verl.base_config import BaseConfig
__all__ = ["CheckpointConfig", "ProfileConfig", "BaseModelConfig"]
@dataclass
class CheckpointConfig(BaseConfig):
"""Configuration for model checkpointing.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
save_contents (list[str]): What to include in saved checkpoints.
Options: 'model', 'optimizer', 'extra', 'hf_model'.
load_contents (list[str]): Contents to load from checkpoint. Defaults to same as save_contents.
async_save (bool): Whether to save checkpoints asynchronously. Only implemented for Megatron as of now.
"""
save_contents: list[str] = field(default_factory=lambda: ["model", "optimizer", "extra"])
load_contents: list[str] = field(default_factory=lambda: ["model", "optimizer", "extra"])
async_save: bool = False
mbridge_config: dict[str, Any] = field(default_factory=dict)
@dataclass
class ProfileConfig(BaseConfig):
"""Configuration for profiling.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
profile_ranks (Optional[list[int]]): List of ranks to profile. None means all ranks.
step_start (int): Starting step for profiling.
step_end (int): Ending step for profiling.
save_path (Optional[str]): Path to save profiling results.
"""
profile_ranks: Optional[list[int]] = None
step_start: int = -1
step_end: int = -1
save_path: Optional[str] = None
@dataclass
class BaseModelConfig(BaseConfig):
"""Base configuration for a model.
Contains core settings for loading and initializing a pretrained model checkpoint.
Args:
path (str): Path to pretrained model weights.
tokenizer_path (Optional[str]): Tokenizer path (defaults to actor's model path if not set).
override_config (dict): Hugging Face config override.
external_lib (Optional[str]): External model implementation (optional).
trust_remote_code (bool): Whether to trust remote code from Hugging Face models.
lora (dict[str, Any]): LoRA configuration dictionary.
"""
path: str = "~/models/deepseek-llm-7b-chat"
tokenizer_path: Optional[str] = None
override_config: dict[str, Any] = field(default_factory=dict)
external_lib: Optional[str] = None
trust_remote_code: bool = False
lora: dict[str, Any] = field(default_factory=dict)
@dataclass
class ModuleConfig(BaseConfig):
"""Configuration for external Python module, which can be loaded, executed (and optionally, ``import``ed).
Args:
path (str, optional): Path to the module file to load and execute.
name (str, optional): Name of the module to ``import``. Format: ``"import.path.to.module"``.
If ``None``, the module will be loaded with a hased name and
will not be added to ``sys.modules``, thus can not be ``import``ed as ``name``.
"""
path: Optional[str] = None
name: Optional[str] = None
|
verl__trainer__fsdp_sft_trainer.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A lightweight one-file FSDP SFT Trainer
TODO(zhangchi.usc1992)
- Add calculation of mfu
- Add validation
"""
import os
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
import logging
import re
import time
from contextlib import nullcontext
import hydra
import torch
import torch.distributed
from omegaconf import DictConfig, OmegaConf
from peft import LoraConfig, TaskType, get_peft_model
from tensordict import TensorDict
from torch import nn
from torch.distributed.device_mesh import DeviceMesh, init_device_mesh
from torch.distributed.fsdp import CPUOffload, MixedPrecision, ShardingStrategy
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.utils.data import Dataset, DistributedSampler
from torchdata.stateful_dataloader import StatefulDataLoader
from tqdm import tqdm
from transformers import AutoConfig, AutoModelForCausalLM, PreTrainedModel
import verl.utils.hdfs_io as hdfs_io
from verl.utils.attention_utils import index_first_axis, pad_input, rearrange, unpad_input
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, get_checkpoint_tracker_filename
from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager
from verl.utils.dataset import SFTDataset
from verl.utils.dataset.multiturn_sft_dataset import MultiTurnSFTDataset
from verl.utils.device import (
auto_set_device,
get_device_id,
get_device_name,
is_cuda_available,
is_npu_available,
)
from verl.utils.distributed import destroy_global_process_group, initialize_global_process_group
from verl.utils.fs import copy_to_local
from verl.utils.fsdp_utils import (
CPUOffloadPolicy,
MixedPrecisionPolicy,
apply_fsdp2,
fsdp2_clip_grad_norm_,
fsdp2_load_full_state_dict,
get_fsdp_wrap_policy,
get_init_weight_context_manager,
init_fn,
)
from verl.utils.logger import log_with_rank
from verl.utils.profiler import log_gpu_memory_usage
from verl.utils.py_functional import convert_to_regular_types
from verl.utils.torch_dtypes import PrecisionType
from verl.utils.torch_functional import get_cosine_schedule_with_warmup, get_wsd_schedule_with_warmup
from verl.utils.tracking import Tracking
from verl.utils.ulysses import (
gather_outputs_and_unpad,
get_ulysses_sequence_parallel_world_size,
ulysses_pad_and_slice_inputs,
)
from verl.workers.config.optimizer import build_optimizer
from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN"))
def extract_step(path):
match = re.search(r"global_step_(\d+)", path)
if match:
return int(match.group(1))
return None
class FSDPSFTTrainer:
def __init__(
self,
config,
device_mesh: DeviceMesh,
ulysses_device_mesh: DeviceMesh,
tokenizer,
train_dataset: Dataset,
val_dataset: Dataset,
):
self.config = config
self.device_mesh = device_mesh
self.ulysses_device_mesh = ulysses_device_mesh
self.sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh)
self.tokenizer = tokenizer
if self.config.data.chat_template is not None:
raise ValueError("Apply Chat template from config is not supported yet.")
# normalize dp size
self._normalize_config_bsz()
# Set sequence parallel size
self.config.ulysses_sequence_parallel_size = getattr(self.config, "ulysses_sequence_parallel_size", 1)
self.use_remove_padding = getattr(self.config, "use_remove_padding", False)
if self.device_mesh.get_rank() == 0:
print(f"Using sequence parallel size: {self.config.ulysses_sequence_parallel_size}")
print(f"Using remove padding: {self.use_remove_padding}")
self._build_dataloader(train_dataset, val_dataset)
self.lora = self.config.model.get("lora_adapter_path") is not None or self.config.model.lora_rank > 0
# Initialize resume-related variables
self.resume_global_step = 0
# build model
self._build_model_optimizer()
# Initialize checkpoint manager
self._init_checkpoint_manager()
self.load_checkpoint()
if self.device_mesh.get_rank() == 0:
print(self.config)
self.device_name = self.config.trainer.device
def _normalize_config_bsz(self):
dp_size = self.device_mesh.size(0) if not self.ulysses_device_mesh else self.ulysses_device_mesh.size(0)
if self.device_mesh.get_rank() == 0:
print(f"Normalize batch size by dp {dp_size}")
assert self.config.data.train_batch_size % dp_size == 0, (
f"Global batch size {self.config.data.train_batch_size} is not divisible by dp size {dp_size}"
)
self.config.data.train_batch_size //= dp_size
assert self.config.data.train_batch_size % self.config.data.micro_batch_size_per_gpu == 0
def _build_dataloader(self, train_dataset, val_dataset):
# build dataset
config = self.config
self.train_dataset, self.val_dataset = train_dataset, val_dataset
# build dataloader
# Use data parallel rank and size instead of global rank and world size
# If doing SP, we need to use the local rank and size
if self.config.ulysses_sequence_parallel_size > 1:
rank = self.ulysses_device_mesh.get_local_rank("dp")
world_size = self.ulysses_device_mesh.size(0)
if self.ulysses_device_mesh.get_rank() == 0:
print(f"Using SP rank {rank} and size {world_size} for data distribution")
print("Each SP rank gets different data, but the same data WITHIN the same rank")
else:
rank = self.device_mesh.get_rank()
world_size = self.device_mesh.size()
if self.device_mesh.get_rank() == 0:
print(f"Using FSDP rank {rank} and size {world_size} for data distribution")
# Set pin_memory_device when pin_memory is enabled.
device_name = get_device_name()
self.train_sampler = DistributedSampler(
self.train_dataset, shuffle=True, num_replicas=world_size, rank=rank, drop_last=True
)
self.train_dataloader = StatefulDataLoader(
dataset=self.train_dataset,
batch_size=config.data.train_batch_size,
sampler=self.train_sampler,
num_workers=8,
pin_memory=True,
drop_last=True,
pin_memory_device=device_name,
)
self.val_sampler = DistributedSampler(
self.val_dataset, shuffle=False, num_replicas=world_size, rank=rank, drop_last=True
)
self.val_dataloader = StatefulDataLoader(
dataset=self.val_dataset,
batch_size=config.data.micro_batch_size_per_gpu,
sampler=self.val_sampler,
num_workers=8,
pin_memory=True,
drop_last=True,
pin_memory_device=device_name,
)
def _build_model_optimizer(self):
# TODO (zhangchi.usc1992):
# 1. support pretrain from random weights
# 2. support init directly from sharded weights
local_model_path = copy_to_local(src=self.config.model.partial_pretrain, verbose=True)
if self.config.model.get("external_lib", None) is not None:
# This is used to import external_lib into the huggingface systems
import importlib
importlib.import_module(self.config.model.external_lib)
log_gpu_memory_usage("Before model allocation", logger=logger)
trust_remote_code = self.config.model.trust_remote_code
torch_dtype = self.config.model.fsdp_config.get("model_dtype", "fp32")
torch_dtype = PrecisionType.to_dtype(torch_dtype)
# load config first
config = AutoConfig.from_pretrained(local_model_path, trust_remote_code=trust_remote_code)
self.model_config = config
if hasattr(self.model_config, "max_position_embeddings"):
self.model_config.max_position_embeddings = max(
self.model_config.max_position_embeddings, self.config.data.max_length
)
if self.config.ulysses_sequence_parallel_size > 1:
assert self.use_remove_padding, "Sequence parallel is only supported when remove_padding is enabled"
# This may be very large
init_context = get_init_weight_context_manager(
use_meta_tensor=not config.tie_word_embeddings, mesh=self.device_mesh
)
with init_context():
self.model: PreTrainedModel = AutoModelForCausalLM.from_pretrained(
local_model_path,
config=config,
torch_dtype=torch_dtype,
attn_implementation="flash_attention_2",
trust_remote_code=trust_remote_code,
)
if self.use_remove_padding or self.config.ulysses_sequence_parallel_size > 1:
from verl.models.transformers.monkey_patch import apply_monkey_patch
apply_monkey_patch(model=self.model, ulysses_sp_size=self.config.ulysses_sequence_parallel_size)
# Apply Liger kernel if use_liger is enabled
if self.config.model.get("use_liger", False):
from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance
_apply_liger_kernel_to_instance(model=self.model)
if self.lora:
self.model.enable_input_require_grads()
lora_adapter_path = self.config.model.get("lora_adapter_path")
if lora_adapter_path is not None:
from peft import PeftModel
print(f"Loading pre-trained LoRA adapter for sft from: {lora_adapter_path}")
local_adapter_path = copy_to_local(lora_adapter_path, use_shm=self.config.model.use_shm)
self.model = PeftModel.from_pretrained(self.model, local_adapter_path, is_trainable=True)
peft_config = self.model.peft_config["default"]
# Ensure task_type is TaskType enum, not string
if isinstance(peft_config.task_type, str):
peft_config.task_type = TaskType.CAUSAL_LM
else:
# Convert config to regular Python types before creating PEFT model
lora_config = {
"task_type": TaskType.CAUSAL_LM,
"r": self.config.model.lora_rank,
"lora_alpha": self.config.model.lora_alpha,
"target_modules": convert_to_regular_types(self.config.model.target_modules),
"bias": "none",
}
self.model = get_peft_model(self.model, LoraConfig(**lora_config))
self.model = self.model.to(torch_dtype)
if self.config.model.enable_gradient_checkpointing:
self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False})
log_gpu_memory_usage("After model allocation", logger=logger)
mixed_precision = MixedPrecision(
param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32
)
auto_wrap_policy = get_fsdp_wrap_policy(
self.model,
config=self.config.model.fsdp_config.wrap_policy,
is_lora=self.lora,
)
if self.device_mesh.get_rank() == 0:
print(auto_wrap_policy)
if not self.config.model.fsdp_config.cpu_offload:
cpu_offload = None
else:
cpu_offload = CPUOffload(offload_params=self.config.model.fsdp_config.offload_params)
fsdp_strategy = self.config.model.strategy
if fsdp_strategy == "fsdp":
self.fsdp_model = FSDP(
self.model,
cpu_offload=cpu_offload,
param_init_fn=init_fn,
use_orig_params=False,
auto_wrap_policy=auto_wrap_policy,
device_id=get_device_id(),
sharding_strategy=ShardingStrategy.FULL_SHARD,
mixed_precision=mixed_precision,
sync_module_states=True,
device_mesh=self.device_mesh,
forward_prefetch=False,
)
elif fsdp_strategy == "fsdp2":
assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)"
mp_policy = MixedPrecisionPolicy(
param_dtype=torch.bfloat16, reduce_dtype=torch.float32, cast_forward_inputs=True
)
fsdp_kwargs = {
"mesh": self.device_mesh,
"mp_policy": mp_policy,
"offload_policy": cpu_offload,
"reshard_after_forward": True,
}
full_state = self.model.state_dict()
apply_fsdp2(self.model, fsdp_kwargs, self.config.model.fsdp_config)
fsdp2_load_full_state_dict(self.model, full_state, self.device_mesh, cpu_offload)
self.fsdp_model = self.model
else:
raise NotImplementedError(f"not implement {fsdp_strategy}")
log_gpu_memory_usage("After FSDP wrapping", logger=logger)
self.optimizer = build_optimizer(self.fsdp_model.parameters(), self.config.optim)
log_gpu_memory_usage("After initialize optimizer", logger=logger)
self.steps_per_epoch = len(self.train_dataloader)
self.total_steps = self.steps_per_epoch * self.config.trainer.total_epochs
if self.device_mesh.get_rank() == 0:
print(
f"Number of steps/epoch {self.steps_per_epoch}, number of epochs "
f"{self.config.trainer.total_epochs}, total number of steps {self.total_steps}"
)
num_warmup_steps = int(self.total_steps * self.config.optim.lr_warmup_steps_ratio)
if not hasattr(self.config.optim, "lr_scheduler") or self.config.optim.lr_scheduler == "cosine":
self.lr_scheduler = get_cosine_schedule_with_warmup(
optimizer=self.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=self.total_steps
)
elif self.config.optim.lr_scheduler == "wsd":
self.lr_scheduler = get_wsd_schedule_with_warmup(
optimizer=self.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=self.total_steps
)
else:
raise ValueError(f"Unknown lr scheduler: {self.config.optim.lr_scheduler}")
def _compute_loss_and_backward(self, batch, do_backward=True, n_micro_batches=1):
"""Compute loss with optional sequence parallelism and remove padding features"""
use_sp = self.use_remove_padding and self.config.ulysses_sequence_parallel_size > 1
# Move inputs to GPU and prepare loss mask
input_ids = batch["input_ids"].to(self.device_name)
attention_mask = batch["attention_mask"].to(self.device_name)
position_ids = batch["position_ids"].to(self.device_name)
loss_mask = batch.pop("loss_mask")[:, 1:].reshape(-1).to(self.device_name)
loss_fct = nn.CrossEntropyLoss(reduction="none")
# Context manager for sequence parallel if needed
context = self.sharding_manager if use_sp else nullcontext()
with context, torch.autocast(device_type=self.device_name, dtype=torch.bfloat16):
if not use_sp:
# Standard forward pass without sequence parallel
labels = input_ids[:, 1:].contiguous()
output = self.fsdp_model(
input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False
)
logits = output.logits
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels.contiguous()
# Flatten the tokens
shift_logits = shift_logits.view(-1, self.model.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
loss = loss * loss_mask.to(loss.device)
else:
# IMPORTANT: We have a big assumption here, so we can shard the SAME sequence across SP ranks
# i.e., each GPU has <1 sequence, and each SP group has 1 sequence
# 1. All SP ranks will receive the *SAME* batch
# 2. Different SP groups will receive *DIFFERENT* batches
# This is implemented by the DistributedSampler
batch_size, seqlen = input_ids.shape
# Remove padding
input_ids_rmpad, indices, *_ = unpad_input(
input_ids.unsqueeze(-1), attention_mask
) # input_ids_rmpad (total_nnz, ...)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz)
# Unpad position_ids to align rotary
position_ids_rmpad = index_first_axis(
rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices
).transpose(0, 1)
# Pad and slice inputs for sequence parallelism
input_ids_rmpad_sliced, position_ids_rmpad_padded, pad_size = ulysses_pad_and_slice_inputs(
input_ids_rmpad, position_ids_rmpad, sp_size=get_ulysses_sequence_parallel_world_size()
)
# For computing loss
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz)
input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs(
input_ids_rmpad_rolled, None, get_ulysses_sequence_parallel_world_size()
)
input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad)
# Forward pass
output = self.fsdp_model(
input_ids=input_ids_rmpad_sliced,
attention_mask=None, # Not needed with flash attention varlen
position_ids=position_ids_rmpad_padded,
use_cache=False,
)
# Compute loss locally then aggregate
logits_rmpad = output.logits.squeeze(0)
input_ids_rmpad_rolled = input_ids_rmpad_rolled.to(logits_rmpad.device)
loss = loss_fct(logits_rmpad, input_ids_rmpad_rolled)
# Gather and unpad for sequence parallelism
loss = gather_outputs_and_unpad(loss, gather_dim=0, unpad_dim=0, padding_size=pad_size)
# This is the loss collected from all ulysses ranks
full_loss = pad_input(
hidden_states=loss.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen
)
full_loss = full_loss.squeeze(-1)[:, :-1] # Remove last token's loss
full_loss = full_loss.reshape(-1)
loss_mask = loss_mask.to(full_loss.device)
loss = full_loss * loss_mask
valid_token_this_rank = torch.sum(loss_mask)
if self.config.data.balance_dp_token:
torch.distributed.all_reduce(valid_token_this_rank)
dp_size = self.ulysses_device_mesh.size("dp") if use_sp else torch.distributed.get_world_size()
else:
dp_size = 1
loss = torch.sum(loss) / (valid_token_this_rank + 1e-8) * dp_size
loss = loss / n_micro_batches # normalize loss
if do_backward:
loss.backward()
return loss
def training_step(self, batch: TensorDict):
start_time = time.time()
self.fsdp_model.train()
log_gpu_memory_usage("Before optimizer zero_grad", logger=logger)
self.optimizer.zero_grad()
log_gpu_memory_usage("After optimizer zero_grad", logger=logger)
micro_batches = batch.split(self.config.data.micro_batch_size_per_gpu)
n_micro_batches = len(micro_batches)
step_loss = 0
for micro_batch in micro_batches:
loss = self._compute_loss_and_backward(batch=micro_batch, n_micro_batches=n_micro_batches)
step_loss += loss.item()
if self.config.model.strategy == "fsdp":
grad_norm = self.fsdp_model.clip_grad_norm_(max_norm=self.config.optim.clip_grad)
elif self.config.model.strategy == "fsdp2":
grad_norm = fsdp2_clip_grad_norm_(self.fsdp_model.parameters(), max_norm=self.config.optim.clip_grad)
else:
raise NotImplementedError(f"not implement {self.config.model.strategy}")
log_gpu_memory_usage("Before optimizer step", logger=logger)
# if grad_norm is not finite, skip the update
if not torch.isfinite(grad_norm):
print(f"WARN: grad_norm is not finite: {grad_norm}")
self.optimizer.zero_grad()
else:
self.optimizer.step()
log_gpu_memory_usage("After optimizer step", logger=logger)
self.lr_scheduler.step()
# reduce loss across dp ranks
lr = self.lr_scheduler.get_last_lr()[0]
log_gpu_memory_usage("After offload weights", logger=logger)
step_loss = torch.tensor(step_loss).to(self.device_name)
# compute time spent per step
end_time = time.time()
spend_time_per_step = end_time - start_time
if is_cuda_available:
torch.distributed.all_reduce(step_loss, op=torch.distributed.ReduceOp.AVG)
elif is_npu_available:
torch.distributed.all_reduce(step_loss)
step_loss /= self.device_mesh.size(0)
return {
"train/loss": step_loss.detach().item(),
"train/lr(1e-3)": lr * 1e3,
"train/time(s)": spend_time_per_step,
}
def validation_step(self, batch: TensorDict):
self.fsdp_model.eval()
with torch.no_grad():
loss = self._compute_loss_and_backward(batch, do_backward=False)
if is_cuda_available:
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.AVG)
elif is_npu_available:
torch.distributed.all_reduce(loss)
loss /= self.device_mesh.size(0)
return loss
def save_checkpoint(self, step):
"""Save checkpoint using FSDPCheckpointManager with improved tracking"""
from verl.utils.fs import local_mkdir_safe
# Determine checkpoint path
local_global_step_folder = os.path.join(self.config.trainer.default_local_dir, f"global_step_{step}")
if self.device_mesh.get_rank() == 0:
print(f"Saving checkpoint to: {local_global_step_folder}")
# Get max checkpoints to keep
max_ckpt_to_keep = getattr(self.config.trainer, "max_ckpt_to_keep", None)
# Use checkpoint manager to save
self.checkpoint_manager.save_checkpoint(
local_path=local_global_step_folder, global_step=step, max_ckpt_to_keep=max_ckpt_to_keep
)
# Save dataloader state
if self.device_mesh.get_rank() == 0:
local_mkdir_safe(local_global_step_folder)
dataloader_local_path = os.path.join(local_global_step_folder, "data.pt")
# Use StatefulDataLoader's built-in state dict functionality
dataloader_state_dict = self.train_dataloader.state_dict()
torch.save(dataloader_state_dict, dataloader_local_path)
print(f"Saved dataloader state to: {dataloader_local_path}")
# Update latest checkpoint tracker (atomic write)
tracker_file = get_checkpoint_tracker_filename(self.config.trainer.default_local_dir)
temp_tracker_file = tracker_file + ".tmp"
with open(temp_tracker_file, "w") as f:
f.write(str(step))
os.rename(temp_tracker_file, tracker_file)
print(f"Updated checkpoint tracker: {tracker_file}")
# Copy to HDFS if configured
if self.device_mesh.get_rank() == 0 and getattr(self.config.trainer, "default_hdfs_dir", None):
hdfs_io.makedirs(self.config.trainer.default_hdfs_dir, exist_ok=True)
hdfs_io.copy(src=local_global_step_folder, dst=self.config.trainer.default_hdfs_dir, dirs_exist_ok=True)
torch.distributed.barrier()
def _init_checkpoint_manager(self):
"""Initialize checkpoint manager with proper configuration"""
# Get checkpoint configuration from config, with defaults
checkpoint_config = getattr(self.config.trainer, "checkpoint", {})
# Set default values if not specified
save_contents = checkpoint_config.get("save_contents", ["model", "optimizer", "extra"])
load_contents = checkpoint_config.get("load_contents", save_contents)
# Create checkpoint config dict
checkpoint_config_dict = {
"load_contents": load_contents,
"save_contents": save_contents,
}
# Convert to DictConfig for compatibility
checkpoint_config_dict = DictConfig(checkpoint_config_dict)
# Initialize checkpoint manager
self.checkpoint_manager = FSDPCheckpointManager(
model=self.fsdp_model,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
processing_class=self.tokenizer,
checkpoint_config=checkpoint_config_dict,
trust_remote_code=self.config.model.trust_remote_code,
)
def load_checkpoint(self):
# Determine resume path based on configuration
checkpoint_path = self._determine_resume_path()
if checkpoint_path is None:
return 0
# extract resume step from checkpoint path
resume_step = extract_step(checkpoint_path)
if resume_step is None:
log_with_rank(
f"Warning: Could not extract step number from {checkpoint_path}, starting from step 0",
logger=logger,
rank=self.device_mesh.get_rank(),
level=logging.WARNING,
log_only_rank_0=True,
)
return 0
self.resume_global_step = resume_step
# Use checkpoint manager to load model state
self.checkpoint_manager.load_checkpoint(checkpoint_path)
log_with_rank(
f"Successfully loaded model checkpoint from {checkpoint_path} (step {resume_step})",
logger=logger,
rank=self.device_mesh.get_rank(),
log_only_rank_0=True,
)
# Always load dataloader state for StatefulDataLoader
self._load_dataloader_state(checkpoint_path)
return resume_step
def _load_dataloader_state(self, checkpoint_path: str):
"""Load dataloader state from checkpoint"""
dataloader_path = os.path.join(checkpoint_path, "data.pt")
if os.path.exists(dataloader_path):
# Use StatefulDataLoader's built-in state dict functionality
dataloader_state_dict = torch.load(dataloader_path, map_location="cpu", weights_only=False)
self.train_dataloader.load_state_dict(dataloader_state_dict)
log_with_rank(
f"Successfully loaded dataloader state from {dataloader_path}",
logger=logger,
rank=self.device_mesh.get_rank(),
log_only_rank_0=True,
)
else:
log_with_rank(
f"Warning: No dataloader state found at {dataloader_path}, will start from scratch",
logger=logger,
rank=self.device_mesh.get_rank(),
level=logging.WARNING,
log_only_rank_0=True,
)
def _determine_resume_path(self):
"""Determine the path to resume from based on resume_mode configuration"""
resume_mode = getattr(self.config.trainer, "resume_mode", "auto")
resume_from_path = getattr(self.config.trainer, "resume_from_path", None)
if resume_mode == "disable":
return None
elif resume_mode == "auto":
if resume_from_path is not None:
assert os.path.exists(resume_from_path), (
"resume_from_path must be null or an existing path when resume_mode is 'auto'"
)
assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps"
return resume_from_path
# Try to find the latest checkpoint in the default directory
return self._find_latest_checkpoint()
elif resume_mode == "resume_path":
assert os.path.exists(resume_from_path), (
"resume_from_path must be an existing path when resume_mode is 'resume_path'"
)
assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps"
return resume_from_path
else:
raise ValueError(f"Invalid resume_mode: {resume_mode}. Must be 'auto', 'disable', or 'resume_path'")
def _find_latest_checkpoint(self):
"""Find the latest checkpoint in the default local directory"""
checkpoint_dir = self.config.trainer.default_local_dir
if not os.path.exists(checkpoint_dir):
return None
latest_checkpoint = find_latest_ckpt_path(checkpoint_dir)
if latest_checkpoint and self.device_mesh.get_rank() == 0:
step_num = extract_step(latest_checkpoint)
print(f"Found latest checkpoint: {latest_checkpoint} (step {step_num})")
return latest_checkpoint
def fit(self):
rank = self.device_mesh.get_rank()
# TODO: add a unified tracking
if rank == 0:
tracking = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
global_step = self.resume_global_step # Start from resumed step
last_valid_metric = None
# compute the total training steps.
# the total training steps in SFT is mainly for early exit
total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
if self.config.trainer.total_training_steps is not None:
total_training_steps = self.config.trainer.total_training_steps
self.total_training_steps = total_training_steps
log_with_rank(
f"Total training steps: {self.total_training_steps},",
logger=logger,
rank=self.device_mesh.get_rank(),
log_only_rank_0=True,
)
# With StatefulDataLoader, we don't need to manually calculate epochs and steps
# The dataloader will automatically resume from where it left off
if global_step > 0:
log_with_rank(
f"StatefulDataLoader will automatically resume from global step: {global_step}",
logger=logger,
rank=self.device_mesh.get_rank(),
log_only_rank_0=True,
)
# Calculate which epoch we're starting from for sampler.set_epoch()
start_epoch = global_step // self.steps_per_epoch
train_time = 0
for epoch in range(start_epoch, self.config.trainer.total_epochs):
self.train_sampler.set_epoch(epoch=epoch)
for step_in_epoch, data in enumerate(
tqdm(
self.train_dataloader,
initial=global_step % self.steps_per_epoch if epoch == start_epoch else 0,
total=self.steps_per_epoch,
desc=f"Epoch {epoch + 1}/{self.config.trainer.total_epochs}",
disable=rank != 0,
)
):
global_step += 1
data = TensorDict(data, batch_size=self.config.data.train_batch_size).to(self.device_name)
metric = self.training_step(data)
train_time += metric["train/time(s)"]
if rank == 0:
tracking.log(data=metric, step=global_step)
is_last_step = global_step >= self.total_training_steps
is_valid_step = global_step % self.config.trainer.test_freq == 0
is_save_step = global_step % self.config.trainer.save_freq == 0
# early exit or validation step
if is_last_step or (self.config.trainer.test_freq > 0 and is_valid_step):
# Perform validation
val_losses = []
for val_data in self.val_dataloader:
val_data = TensorDict(val_data, batch_size=self.config.data.micro_batch_size_per_gpu).to(
self.device_name
)
val_loss = self.validation_step(val_data)
val_losses.append(val_loss)
if rank == 0:
val_loss = torch.mean(torch.stack(val_losses))
metric = {"val/loss": val_loss.detach().item()}
tracking.log(data=metric, step=global_step)
last_valid_metric = metric
torch.distributed.barrier()
if is_last_step or (self.config.trainer.save_freq > 0 and is_save_step):
self.save_checkpoint(step=global_step)
if is_last_step:
if rank == 0:
print(f"Total time for train steps: {train_time:.2f}s")
print(f"Final validation metrics: {last_valid_metric}")
return
def run_sft(config):
device_name = get_device_name()
local_rank, rank, world_size = initialize_global_process_group()
device_mesh = init_device_mesh(device_type=device_name, mesh_shape=(world_size,), mesh_dim_names=("fsdp",))
dp_size = world_size // config.ulysses_sequence_parallel_size
ulysses_device_mesh = init_device_mesh(
device_type=device_name,
mesh_shape=(dp_size, config.ulysses_sequence_parallel_size),
mesh_dim_names=("dp", "sp"),
)
# build tokenizer and datasets first
from verl.utils import hf_tokenizer
local_model_path = copy_to_local(src=config.model.partial_pretrain, verbose=True)
tokenizer = hf_tokenizer(local_model_path, trust_remote_code=config.model.trust_remote_code)
train_dataset = create_sft_dataset(
config.data.train_files, config.data, tokenizer, max_samples=config.data.get("train_max_samples", -1)
)
val_dataset = create_sft_dataset(
config.data.val_files, config.data, tokenizer, max_samples=config.data.get("val_max_samples", -1)
)
trainer = FSDPSFTTrainer(
config=config,
device_mesh=device_mesh,
ulysses_device_mesh=ulysses_device_mesh,
tokenizer=tokenizer,
train_dataset=train_dataset,
val_dataset=val_dataset,
)
trainer.fit()
destroy_global_process_group()
@hydra.main(config_path="config", config_name="sft_trainer", version_base=None)
def main(config):
# Automatically set `config.trainer.device = npu` when running on Ascend NPU.
auto_set_device(config)
run_sft(config)
def create_sft_dataset(data_paths, data_config, tokenizer, max_samples=-1):
"""Create a dataset."""
# build dataset
# First check if a custom dataset class is specified
if data_config.custom_cls.get("path", None):
from verl.utils.import_utils import load_extern_object
dataset_cls = load_extern_object(data_config.custom_cls.path, data_config.custom_cls.name)
# Then check if multi-turn dataset should be used
elif data_config.get("multiturn", {}).get("enable", False):
dataset_cls = MultiTurnSFTDataset
# Default to single-turn dataset
else:
dataset_cls = SFTDataset
# Create datasets based on the selected class
dataset = dataset_cls(parquet_files=data_paths, tokenizer=tokenizer, config=data_config, max_samples=max_samples)
return dataset
if __name__ == "__main__":
main()
|
verl__trainer__main_eval.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Offline evaluate the performance of a generated file using reward model and ground truth verifier.
The input is a parquet file that contains N generated sequences and (optional) the ground truth.
"""
from collections import defaultdict
import hydra
import numpy as np
import pandas as pd
import ray
from omegaconf import OmegaConf
from tqdm import tqdm
from verl.trainer.ppo.reward import get_custom_reward_fn
from verl.utils.fs import copy_to_local
@ray.remote
def process_item(config, data_source, response_lst, reward_data):
reward_fn = get_custom_reward_fn(config)
ground_truth = reward_data["ground_truth"]
score_lst = [reward_fn(data_source, r, ground_truth) for r in response_lst]
return data_source, np.mean(score_lst)
@hydra.main(config_path="config", config_name="evaluation", version_base=None)
def main(config):
local_path = copy_to_local(config.data.path, use_shm=config.data.get("use_shm", False))
dataset = pd.read_parquet(local_path)
responses = dataset[config.data.response_key]
data_sources = dataset[config.data.data_source_key]
reward_model_data = dataset[config.data.reward_model_key]
total = len(dataset)
# Initialize Ray
if not ray.is_initialized():
ray.init(**OmegaConf.to_container(config.ray_kwargs.get("ray_init", {})))
# evaluate test_score based on data source
data_source_reward = defaultdict(list)
# Create remote tasks
remote_tasks = [
process_item.remote(config, data_sources[i], responses[i], reward_model_data[i]) for i in range(total)
]
# Process results as they come in
with tqdm(total=total) as pbar:
while len(remote_tasks) > 0:
# Use ray.wait to get completed tasks
done_ids, remote_tasks = ray.wait(remote_tasks)
for result_id in done_ids:
data_source, score = ray.get(result_id)
data_source_reward[data_source].append(score)
pbar.update(1)
metric_dict = {}
for data_source, rewards in data_source_reward.items():
metric_dict[f"test_score/{data_source}"] = np.mean(rewards)
print(metric_dict)
if __name__ == "__main__":
main()
|
verl__trainer__main_generation.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate responses given a dataset of prompts
"""
import os
import hydra
import numpy as np
import ray
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
# os.environ['TORCH_COMPILE_DISABLE'] = '1'
from pprint import pprint
import pandas as pd
from omegaconf import OmegaConf
from verl import DataProto
from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
from verl.utils import hf_tokenizer
from verl.utils.fs import copy_to_local
from verl.utils.hdfs_io import makedirs
from verl.utils.model import compute_position_id_with_mask
from verl.workers.fsdp_workers import ActorRolloutRefWorker
@hydra.main(config_path="config", config_name="generation", version_base=None)
def main(config):
run_generation(config)
def run_generation(config) -> None:
if not ray.is_initialized():
# this is for local ray cluster
default_runtime_env = {"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN"}}
ray_init_kwargs = config.ray_kwargs.get("ray_init", {})
runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {})
runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs)
ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env})
print(f"ray init kwargs: {ray_init_kwargs}")
ray.init(**OmegaConf.to_container(ray_init_kwargs))
ray.get(main_task.remote(config))
@ray.remote(num_cpus=1)
def main_task(config):
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
local_path = copy_to_local(config.model.path)
trust_remote_code = config.data.get("trust_remote_code", False)
tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code)
if config.rollout.temperature == 0.0:
assert config.data.n_samples == 1, "When temperature=0, n_samples must be 1."
assert config.data.n_samples >= 1, "n_samples should always >= 1"
# read dataset. Note that the dataset should directly contain chat template format (e.g., a list of dictionary)
dataset = pd.read_parquet(config.data.path)
chat_lst = dataset[config.data.prompt_key].tolist()
chat_lst = [chat.tolist() for chat in chat_lst]
tokenizer.padding_side = "left"
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
ray_cls_with_init = RayClassWithInitArgs(cls=ray.remote(ActorRolloutRefWorker), config=config, role="rollout")
resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes)
wg = RayWorkerGroup(
resource_pool=resource_pool,
ray_cls_with_init=ray_cls_with_init,
device_name=config.trainer.device,
)
wg.init_model()
total_samples = len(dataset)
config_batch_size = config.data.batch_size
apply_chat_template_kwargs = config.data.get("apply_chat_template_kwargs", {})
num_batch = -(-total_samples // config_batch_size)
output_lst = [[] for _ in range(config.data.n_samples)]
for batch_idx in range(num_batch):
print(f"[{batch_idx + 1}/{num_batch}] Start to process.")
batch_chat_lst = chat_lst[batch_idx * config_batch_size : (batch_idx + 1) * config_batch_size]
inputs = tokenizer.apply_chat_template(
batch_chat_lst,
add_generation_prompt=True,
padding=True,
truncation=True,
max_length=config.rollout.prompt_length,
return_tensors="pt",
return_dict=True,
tokenize=True,
**apply_chat_template_kwargs,
)
input_ids = inputs["input_ids"]
attention_mask = inputs["attention_mask"]
position_ids = compute_position_id_with_mask(attention_mask)
batch_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids}
data = DataProto.from_dict(batch_dict)
data_padded, pad_size = pad_dataproto_to_divisor(data, wg.world_size)
# START TO GENERATE FOR n_samples TIMES
print(f"[{batch_idx + 1}/{num_batch}] Start to generate.")
for n_sample in range(config.data.n_samples):
output_padded = wg.generate_sequences(data_padded)
output = unpad_dataproto(output_padded, pad_size=pad_size)
output_texts = []
for i in range(len(output)):
data_item = output[i]
prompt_length = data_item.batch["prompts"].shape[-1]
valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum()
valid_response_ids = data_item.batch["responses"][:valid_response_length]
response_str = tokenizer.decode(valid_response_ids, skip_special_tokens=True)
output_texts.append(response_str)
output_lst[n_sample].extend(output_texts)
# convert output_lst from (n_samples, n_data) to (n_data, n_sampels)
output_lst = np.array(output_lst, dtype=object)
output_lst = np.transpose(output_lst, axes=(1, 0)).tolist()
# add to the data frame
dataset["responses"] = output_lst
# write to a new parquet
output_dir = os.path.dirname(config.data.output_path)
makedirs(output_dir, exist_ok=True)
dataset.to_parquet(config.data.output_path)
if __name__ == "__main__":
main()
|
verl__trainer__main_generation_server.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate responses given a dataset of prompts
"""
import os
import aiohttp
import hydra
import numpy as np
import ray
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
# os.environ['TORCH_COMPILE_DISABLE'] = '1'
import asyncio
from pprint import pprint
import pandas as pd
from omegaconf import OmegaConf
from openai.types.chat import ChatCompletion
from verl.utils.hdfs_io import makedirs
from verl.workers.rollout.replica import get_rollout_replica_class
async def start_server(config):
tp_size = config.actor_rollout_ref.rollout.tensor_model_parallel_size
num_replicas = (config.trainer.n_gpus_per_node * config.trainer.nnodes) // tp_size
rollout_config = config.actor_rollout_ref.rollout
model_config = config.actor_rollout_ref.model
# create standalone rollout server
rollout_server_class = get_rollout_replica_class(config.actor_rollout_ref.rollout.name)
rollout_servers = [
rollout_server_class(
replica_rank=replica_rank,
config=rollout_config,
model_config=model_config,
gpus_per_node=config.trainer.n_gpus_per_node,
)
for replica_rank in range(num_replicas)
]
await asyncio.gather(*[server.init_standalone() for server in rollout_servers])
server_handles = [server._server_handle for server in rollout_servers]
server_addresses = [server._server_address for server in rollout_servers]
assert len(server_handles) == num_replicas
assert len(server_addresses) == num_replicas
return server_handles, server_addresses
async def submit_request(server_address, **chat_complete_request):
try:
extra_headers = chat_complete_request.pop("extra_headers", {})
timeout = aiohttp.ClientTimeout(total=None)
session = aiohttp.ClientSession(timeout=timeout)
async with session.post(
url=f"http://{server_address}/v1/chat/completions",
headers={"Authorization": "Bearer token-abc123", **extra_headers},
json=chat_complete_request,
) as resp:
data = await resp.json()
return ChatCompletion(**data)
finally:
await session.close()
async def generate_per_replica(server_address, model_path: str, n_samples: int, sampling_params: dict, chat_lst: list):
# here we should sample n_samples for each chat_lst.
# we use aiohttp to avoid hang in AsyncOpenAI when the number of requests is large.
# client = AsyncOpenAI(
# api_key="123-abc",
# base_url=f"http://{server_address}/v1",
# )
chat_complete_request = [
{
"model": model_path,
"messages": messages,
**sampling_params,
}
for messages in chat_lst
for _ in range(n_samples)
]
tasks = [submit_request(server_address, **req) for req in chat_complete_request]
results = await asyncio.gather(*tasks)
return results
async def generate(
server_addresses: list, model_path: str, n_samples: int, sampling_params: dict, chat_numpy: np.ndarray
):
num_replicas = len(server_addresses)
chat_sub_array = np.array_split(chat_numpy, num_replicas)
chat_sub_array = [chat.tolist() for chat in chat_sub_array]
assert len(server_addresses) == len(chat_sub_array)
results = await asyncio.gather(
*[
generate_per_replica(server_addresses[i], model_path, n_samples, sampling_params, chat_sub_array[i])
for i in range(num_replicas)
]
)
return results
@hydra.main(config_path="config", config_name="ppo_trainer", version_base=None)
def main(config):
ray.init(runtime_env={"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_USE_V1": "1"}})
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
n_samples = config.actor_rollout_ref.rollout.n
if config.actor_rollout_ref.rollout.temperature == 0.0:
assert n_samples == 1, "When temperature=0, n_samples must be 1."
assert n_samples >= 1, "n_samples should always >= 1"
sampling_params = {
"temperature": config.actor_rollout_ref.rollout.temperature,
"top_p": config.actor_rollout_ref.rollout.top_p,
# "top_k": config.actor_rollout_ref.rollout.top_k,
"max_tokens": config.actor_rollout_ref.rollout.response_length,
}
from omegaconf import ListConfig
train_files = config.data.train_files
if not isinstance(train_files, list | ListConfig):
train_files = [train_files]
# read dataset. Note that the dataset should directly contain chat template format (e.g., a list of dictionary)
datasets = []
for train_file in train_files:
dataset = pd.read_parquet(train_file)
datasets.append(dataset)
# concat dataset
dataset = pd.concat(datasets, axis=0, ignore_index=True)
chat_lst = dataset[config.data.prompt_key].tolist()
chat_lst = [chat.tolist() for chat in chat_lst]
chat_numpy = np.array(chat_lst)
# start native server
server_handles, server_addresses = asyncio.run(start_server(config))
# run generate
gen_results = asyncio.run(
generate(server_addresses, config.actor_rollout_ref.model.path, n_samples, sampling_params, chat_numpy)
)
# reshape results into a numpy array
import itertools
results = list(itertools.chain.from_iterable(gen_results))
# extract content from results
results = np.array([result.choices[0].message.content for result in results])
results = np.reshape(results, (-1, n_samples))
assert results.shape == (len(chat_lst), n_samples)
results = results.tolist()
# add to the data frame
dataset["responses"] = results
# write to a new parquet
output_dir = os.path.dirname(config.data.output_path)
makedirs(output_dir, exist_ok=True)
print(f"Saving results to {config.data.output_path}")
dataset.to_parquet(config.data.output_path)
if __name__ == "__main__":
main()
|
verl__trainer__main_ppo.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Note that we don't combine the main with ray_trainer as ray_trainer is used by other mpain.
"""
import os
import socket
import hydra
import ray
from omegaconf import OmegaConf
from verl.experimental.dataset.sampler import AbstractSampler
from verl.experimental.reward_loop import migrate_legacy_reward_impl
from verl.trainer.constants_ppo import get_ppo_ray_runtime_env
from verl.trainer.ppo.ray_trainer import RayPPOTrainer
from verl.trainer.ppo.utils import need_critic, need_reference_policy
from verl.utils.config import validate_config
from verl.utils.device import auto_set_device, is_cuda_available
from verl.utils.import_utils import load_extern_object
@hydra.main(config_path="config", config_name="ppo_trainer", version_base=None)
def main(config):
"""Main entry point for PPO training with Hydra configuration management.
Args:
config: Hydra configuration dictionary containing training parameters.
"""
# Automatically set `config.trainer.device = npu` when running on Ascend NPU.
auto_set_device(config)
config = migrate_legacy_reward_impl(config)
run_ppo(config)
# Define a function to run the PPO-like training process
def run_ppo(config, task_runner_class=None) -> None:
"""Initialize Ray cluster and run distributed PPO training process.
Args:
config: Training configuration object containing all necessary parameters
for distributed PPO training including Ray initialization settings,
model paths, and training hyperparameters.
task_runner_class: For recipe to change TaskRunner.
"""
# Check if Ray is not initialized
if not ray.is_initialized():
# Initialize Ray with a local cluster configuration
# Set environment variables in the runtime environment to control tokenizer parallelism,
# NCCL debug level, VLLM logging level, and allow runtime LoRA updating
# `num_cpus` specifies the number of CPU cores Ray can use, obtained from the configuration
default_runtime_env = get_ppo_ray_runtime_env()
ray_init_kwargs = config.ray_kwargs.get("ray_init", {})
runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {})
if config.transfer_queue.enable:
# Add runtime environment variables for transfer queue
runtime_env_vars = runtime_env_kwargs.get("env_vars", {})
runtime_env_vars["TRANSFER_QUEUE_ENABLE"] = "1"
runtime_env_kwargs["env_vars"] = runtime_env_vars
runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs)
ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env})
print(f"ray init kwargs: {ray_init_kwargs}")
ray.init(**OmegaConf.to_container(ray_init_kwargs))
if task_runner_class is None:
task_runner_class = ray.remote(num_cpus=1)(TaskRunner) # please make sure main_task is not scheduled on head
# Create a remote instance of the TaskRunner class, and
# Execute the `run` method of the TaskRunner instance remotely and wait for it to complete
if (
is_cuda_available
and config.global_profiler.tool == "nsys"
and config.global_profiler.get("steps") is not None
and len(config.global_profiler.get("steps", [])) > 0
):
from verl.utils.import_utils import is_nvtx_available
assert is_nvtx_available(), "nvtx is not available in CUDA platform. Please 'pip3 install nvtx'"
nsight_options = OmegaConf.to_container(
config.global_profiler.global_tool_config.nsys.controller_nsight_options
)
runner = task_runner_class.options(runtime_env={"nsight": nsight_options}).remote()
else:
runner = task_runner_class.remote()
ray.get(runner.run.remote(config))
# [Optional] get the path of the timeline trace file from the configuration, default to None
# This file is used for performance analysis
timeline_json_file = config.ray_kwargs.get("timeline_json_file", None)
if timeline_json_file:
ray.timeline(filename=timeline_json_file)
class TaskRunner:
"""Ray remote class for executing distributed PPO training tasks.
This class encapsulates the main training logic and runs as a Ray remote actor
to enable distributed execution across multiple nodes and GPUs.
Attributes:
role_worker_mapping: Dictionary mapping Role enums to Ray remote worker classes
mapping: Dictionary mapping Role enums to resource pool IDs for GPU allocation
"""
def __init__(self):
self.role_worker_mapping = {}
self.mapping = {}
def add_actor_rollout_worker(self, config):
"""Add actor rollout worker based on the actor strategy."""
from verl.single_controller.ray import RayWorkerGroup
from verl.trainer.ppo.ray_trainer import Role
use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
# use new model engine implementation
if use_legacy_worker_impl == "disable":
from verl.workers.engine_workers import ActorRolloutRefWorker
actor_rollout_cls = ActorRolloutRefWorker
ray_worker_group_cls = RayWorkerGroup
lora_rank = config.actor_rollout_ref.model.get("lora", {}).get("rank", 0)
if lora_rank <= 0:
lora_rank = config.actor_rollout_ref.model.get("lora_rank", 0)
ref_in_actor = lora_rank > 0 or config.actor_rollout_ref.model.get("lora_adapter_path") is not None
# NOTE: In new model engine, ref policy and actor rollout are in same ActorRolloutRefWorker,
# while in legacy model engine, ref policy is in a separate ActorRolloutRefWorker.
if need_reference_policy(config) and not ref_in_actor:
role = Role.ActorRolloutRef
else:
role = Role.ActorRollout
self.role_worker_mapping[role] = ray.remote(actor_rollout_cls)
self.mapping[role] = "global_pool"
return actor_rollout_cls, ray_worker_group_cls
# Note: sync mode validation is now handled in RolloutConfig.__post_init__
# Always use async worker since sync mode is deprecated and rejected
if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}:
from verl.workers.fsdp_workers import AsyncActorRolloutRefWorker
actor_rollout_cls = AsyncActorRolloutRefWorker
ray_worker_group_cls = RayWorkerGroup
elif config.actor_rollout_ref.actor.strategy == "megatron":
from verl.workers.megatron_workers import AsyncActorRolloutRefWorker
actor_rollout_cls = AsyncActorRolloutRefWorker
ray_worker_group_cls = RayWorkerGroup
elif config.actor_rollout_ref.actor.strategy == "veomni":
raise NotImplementedError("VeOmni does not support legacy worker implementation")
else:
raise NotImplementedError
self.role_worker_mapping[Role.ActorRollout] = ray.remote(actor_rollout_cls)
self.mapping[Role.ActorRollout] = "global_pool"
return actor_rollout_cls, ray_worker_group_cls
def add_critic_worker(self, config):
"""Add critic worker to role mapping."""
use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
if config.critic.strategy in {"fsdp", "fsdp2"}:
if use_legacy_worker_impl in ["auto", "enable"]:
from verl.workers.fsdp_workers import CriticWorker
elif use_legacy_worker_impl == "disable":
# we don't need to specialize critic worker. Just use TrainingWorker
from verl.workers.engine_workers import TrainingWorker
CriticWorker = TrainingWorker
print("Using new worker implementation")
else:
raise ValueError(f"Invalid use_legacy_worker_impl: {use_legacy_worker_impl}")
elif config.critic.strategy == "megatron":
# TODO: switch this to TrainingWorker as well
from verl.workers.megatron_workers import CriticWorker
elif config.critic.strategy == "veomni":
if use_legacy_worker_impl == "disable":
from verl.workers.engine_workers import TrainingWorker
CriticWorker = TrainingWorker
print("Using new worker implementation")
else:
raise ValueError(f"Invalid use_legacy_worker_impl: {use_legacy_worker_impl}")
else:
raise NotImplementedError
from verl.trainer.ppo.ray_trainer import Role
self.role_worker_mapping[Role.Critic] = ray.remote(CriticWorker)
self.mapping[Role.Critic] = "global_pool"
def init_resource_pool_mgr(self, config):
"""Initialize resource pool manager."""
global_pool_id = "global_pool"
resource_pool_spec = {
global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes,
}
if config.reward.reward_model.enable_resource_pool:
if config.reward.reward_model.n_gpus_per_node <= 0:
raise ValueError("config.reward.reward_model.n_gpus_per_node must be greater than 0")
if config.reward.reward_model.nnodes <= 0:
raise ValueError("config.reward.reward_model.nnodes must be greater than 0")
reward_pool = [config.reward.reward_model.n_gpus_per_node] * config.reward.reward_model.nnodes
resource_pool_spec["reward_pool"] = reward_pool
else:
config.reward.reward_model.nnodes = config.trainer.nnodes
config.reward.reward_model.n_gpus_per_node = config.trainer.n_gpus_per_node
from verl.trainer.ppo.ray_trainer import ResourcePoolManager
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=self.mapping)
return resource_pool_manager
def add_reward_model_resource_pool(self, config):
"""Add reward model worker if enabled."""
from verl.trainer.ppo.ray_trainer import Role
if config.reward.reward_model.enable:
# we do not use reward model workers, so we only register reward model in resource pool
# without continue to register reward model worker in role mapping
if config.reward.reward_model.enable_resource_pool:
self.mapping[Role.RewardModel] = "reward_pool"
else:
self.mapping[Role.RewardModel] = "global_pool"
def add_ref_policy_worker(self, config, ref_policy_cls):
"""Add reference policy worker if KL loss or KL reward is used."""
from verl.trainer.ppo.ray_trainer import Role
# Ref policy has been fused into ActorRolloutRefWorker in new model engine,
# we don't need to add a separate ref policy worker group.
use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
if use_legacy_worker_impl == "disable":
return
if need_reference_policy(config):
self.role_worker_mapping[Role.RefPolicy] = ray.remote(ref_policy_cls)
self.mapping[Role.RefPolicy] = "global_pool"
def run(self, config):
"""Execute the main PPO training workflow.
This method sets up the distributed training environment, initializes
workers, datasets, and reward functions, then starts the training process.
Args:
config: Training configuration object containing all parameters needed
for setting up and running the PPO training process.
"""
# Print the initial configuration. `resolve=True` will evaluate symbolic values.
from pprint import pprint
from omegaconf import OmegaConf
from verl.utils.fs import copy_to_local
print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}")
pprint(OmegaConf.to_container(config, resolve=True))
OmegaConf.resolve(config)
actor_rollout_cls, ray_worker_group_cls = self.add_actor_rollout_worker(config)
self.add_critic_worker(config)
self.add_reward_model_resource_pool(config)
# Add a reference policy worker if KL loss or KL reward is used.
self.add_ref_policy_worker(config, actor_rollout_cls)
# validate config
validate_config(
config=config,
use_reference_policy=need_reference_policy(config),
use_critic=need_critic(config),
)
# Download the checkpoint from HDFS to the local machine.
# `use_shm` determines whether to use shared memory, which could lead to faster model loading if turned on
local_path = copy_to_local(
config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False)
)
# Instantiate the tokenizer and processor.
from verl.utils import hf_processor, hf_tokenizer
trust_remote_code = config.data.get("trust_remote_code", False)
tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code)
# Used for multimodal LLM, could be None
processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True)
resource_pool_manager = self.init_resource_pool_mgr(config)
from verl.utils.dataset.rl_dataset import collate_fn
# Create training and validation datasets.
train_dataset = create_rl_dataset(
config.data.train_files,
config.data,
tokenizer,
processor,
is_train=True,
max_samples=config.data.get("train_max_samples", -1),
)
val_dataset = create_rl_dataset(
config.data.val_files,
config.data,
tokenizer,
processor,
is_train=False,
max_samples=config.data.get("val_max_samples", -1),
)
train_sampler = create_rl_sampler(config.data, train_dataset)
# Initialize the PPO trainer.
trainer = RayPPOTrainer(
config=config,
tokenizer=tokenizer,
processor=processor,
role_worker_mapping=self.role_worker_mapping,
resource_pool_manager=resource_pool_manager,
ray_worker_group_cls=ray_worker_group_cls,
train_dataset=train_dataset,
val_dataset=val_dataset,
collate_fn=collate_fn,
train_sampler=train_sampler,
)
# Initialize the workers of the trainer.
trainer.init_workers()
# Start the training process.
trainer.fit()
def create_rl_dataset(data_paths, data_config, tokenizer, processor, is_train=True, max_samples: int = -1):
"""Create a dataset.
Arguments:
data_paths: List of paths to data files.
data_config: The data config.
tokenizer (Tokenizer): The tokenizer.
processor (Processor): The processor.
Returns:
dataset (Dataset): The dataset.
"""
from verl.utils.dataset.rl_dataset import get_dataset_class
# Get the dataset class
dataset_cls = get_dataset_class(data_config)
# Instantiate the dataset using the determined dataset class
dataset = dataset_cls(
data_files=data_paths,
tokenizer=tokenizer,
processor=processor,
config=data_config,
max_samples=max_samples,
)
return dataset
def create_rl_sampler(data_config, dataset):
"""Create a sampler for the dataset.
Arguments:
data_config: The data config.
dataset (Dataset): The dataset.
Returns:
sampler (Sampler): The sampler.
"""
import torch
from torch.utils.data import SequentialSampler
# torch.utils.data.RandomSampler could not recover properly
from torchdata.stateful_dataloader.sampler import RandomSampler
if data_config.sampler is not None and data_config.sampler.get("class_path", None) is not None:
curriculum_class = load_extern_object(
data_config.sampler.class_path,
data_config.sampler.class_name,
)
sampler = curriculum_class(
data_source=dataset,
data_config=data_config,
)
assert isinstance(sampler, AbstractSampler)
assert data_config.get("dataloader_num_workers", 8) == 0, (
"If using curriculum, num_workers must be 0 to prevent data caching. "
"If the dataloader caches data before the batch is done the "
"curriculum sampler won't have the opportunity to reorder it. "
)
# Use a sampler to facilitate checkpoint resumption.
# If shuffling is enabled in the data configuration, create a random sampler.
elif data_config.shuffle:
train_dataloader_generator = torch.Generator()
seed = data_config.get("seed")
if seed is not None:
train_dataloader_generator.manual_seed(seed)
sampler = RandomSampler(data_source=dataset, generator=train_dataloader_generator)
else:
# If shuffling is disabled, use a sequential sampler to iterate through the dataset in order.
sampler = SequentialSampler(data_source=dataset)
return sampler
if __name__ == "__main__":
main()
|
verl__trainer__ppo__metric_utils.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Metrics related to the PPO trainer.
"""
from collections import defaultdict
from functools import partial
from typing import Any, Callable
import numpy as np
import torch
import verl.utils.torch_functional as verl_F
from verl import DataProto
from verl.utils.import_utils import deprecated
@deprecated("verl.utils.metric.reduce_metrics")
def reduce_metrics(metrics: dict[str, list[Any]]) -> dict[str, Any]:
"""
Reduces a dictionary of metric lists by computing the mean of each list.
Args:
metrics: A dictionary mapping metric names to lists of metric values.
Returns:
A dictionary with the same keys but with each list replaced by its mean value.
Example:
>>> metrics = {"loss": [1.0, 2.0, 3.0], "accuracy": [0.8, 0.9, 0.7]}
>>> reduce_metrics(metrics)
{"loss": 2.0, "accuracy": 0.8}
"""
from verl.utils.metric import reduce_metrics
return reduce_metrics(metrics)
def _compute_response_info(batch: DataProto) -> dict[str, Any]:
"""
Computes information about prompts and responses from a batch.
This is an internal helper function that extracts masks and lengths for prompts and responses.
Args:
batch: A DataProto object containing batch data with responses and attention masks.
Returns:
A dictionary containing:
- response_mask: Attention mask for the response tokens
- prompt_length: Tensor of prompt lengths for each item in the batch
- response_length: Tensor of response lengths for each item in the batch
"""
response_length = batch.batch["responses"].shape[-1]
prompt_mask = batch.batch["attention_mask"][:, :-response_length]
response_mask = batch.batch["attention_mask"][:, -response_length:]
prompt_length = prompt_mask.sum(-1).float()
response_length = response_mask.sum(-1).float() # (batch_size,)
return dict(
response_mask=response_mask,
prompt_length=prompt_length,
response_length=response_length,
)
def compute_data_metrics(batch: DataProto, use_critic: bool = True) -> dict[str, Any]:
"""
Computes various metrics from a batch of data for PPO training.
This function calculates metrics related to scores, rewards, advantages, returns, values,
and sequence lengths from a batch of data. It provides statistical information (mean, max, min)
for each metric category.
Args:
batch: A DataProto object containing batch data with token-level scores, rewards, advantages, etc.
use_critic: Whether to include critic-specific metrics. Defaults to True.
Returns:
A dictionary of metrics including:
- critic/score/mean, max, min: Statistics about sequence scores
- critic/rewards/mean, max, min: Statistics about sequence rewards
- critic/advantages/mean, max, min: Statistics about advantages
- critic/returns/mean, max, min: Statistics about returns
- critic/values/mean, max, min: Statistics about critic values (if use_critic=True)
- critic/vf_explained_var: Explained variance of the value function (if use_critic=True)
- response_length/mean, max, min, clip_ratio: Statistics about response lengths
- prompt_length/mean, max, min, clip_ratio: Statistics about prompt lengths
- num_turns/mean, max, min: Statistics about the number of multi-turn conversations
"""
sequence_score = batch.batch["token_level_scores"].sum(-1)
sequence_reward = batch.batch["token_level_rewards"].sum(-1)
advantages = batch.batch["advantages"]
returns = batch.batch["returns"]
max_response_length = batch.batch["responses"].shape[-1]
prompt_mask = batch.batch["attention_mask"][:, :-max_response_length].bool()
response_mask = batch.batch["response_mask"].bool()
max_prompt_length = prompt_mask.size(-1)
response_info = _compute_response_info(batch)
prompt_length = response_info["prompt_length"]
response_length = response_info["response_length"]
aborted_mask = (response_length == 0).bool()
non_aborted_mask = ~aborted_mask
non_aborted_sequence_score = sequence_score[non_aborted_mask]
non_aborted_sequence_reward = sequence_reward[non_aborted_mask]
score_mean = torch.mean(non_aborted_sequence_score).detach().item()
score_max = torch.max(non_aborted_sequence_score).detach().item()
score_min = torch.min(non_aborted_sequence_score).detach().item()
reward_mean = torch.mean(non_aborted_sequence_reward).detach().item()
reward_max = torch.max(non_aborted_sequence_reward).detach().item()
reward_min = torch.min(non_aborted_sequence_reward).detach().item()
valid_adv = torch.masked_select(advantages, response_mask)
valid_returns = torch.masked_select(returns, response_mask)
if use_critic:
values = batch.batch["values"]
valid_values = torch.masked_select(values, response_mask)
return_diff_var = torch.var(valid_returns - valid_values)
return_var = torch.var(valid_returns)
# Aborted samples and non-aborted response length statistics
# response_length_non_aborted/*: statistics computed on non-aborted samples only
aborted_ratio = torch.mean(aborted_mask.float()).detach().item()
non_aborted_response_length = response_length[non_aborted_mask]
if non_aborted_response_length.numel() > 0:
non_aborted_response_length_mean = torch.mean(non_aborted_response_length).detach().item()
non_aborted_response_length_max = torch.max(non_aborted_response_length).detach().item()
non_aborted_response_length_min = torch.min(non_aborted_response_length).detach().item()
non_aborted_response_length_clip_ratio = (
torch.mean(torch.eq(non_aborted_response_length, max_response_length).float()).detach().item()
)
else:
raise ValueError("All samples are aborted, this should not happen.")
metrics = {
# score
"critic/score/mean": score_mean,
"critic/score/max": score_max,
"critic/score/min": score_min,
# reward
"critic/rewards/mean": reward_mean,
"critic/rewards/max": reward_max,
"critic/rewards/min": reward_min,
# adv
"critic/advantages/mean": torch.mean(valid_adv).detach().item(),
"critic/advantages/max": torch.max(valid_adv).detach().item(),
"critic/advantages/min": torch.min(valid_adv).detach().item(),
# returns
"critic/returns/mean": torch.mean(valid_returns).detach().item(),
"critic/returns/max": torch.max(valid_returns).detach().item(),
"critic/returns/min": torch.min(valid_returns).detach().item(),
**(
{
# values
"critic/values/mean": torch.mean(valid_values).detach().item(),
"critic/values/max": torch.max(valid_values).detach().item(),
"critic/values/min": torch.min(valid_values).detach().item(),
# vf explained var
"critic/vf_explained_var": (1.0 - return_diff_var / (return_var + 1e-5)).detach().item(),
}
if use_critic
else {}
),
# response length
"response_length/mean": torch.mean(response_length).detach().item(),
"response_length/max": torch.max(response_length).detach().item(),
"response_length/min": torch.min(response_length).detach().item(),
"response_length/clip_ratio": torch.mean(torch.eq(response_length, max_response_length).float())
.detach()
.item(),
# response length (non-aborted only)
# These statistics exclude aborted samples to avoid skew from zeros
"response_length_non_aborted/mean": non_aborted_response_length_mean,
"response_length_non_aborted/max": non_aborted_response_length_max,
"response_length_non_aborted/min": non_aborted_response_length_min,
"response_length_non_aborted/clip_ratio": non_aborted_response_length_clip_ratio,
# aborted ratio
# Fraction of samples whose response length is zero
"response/aborted_ratio": aborted_ratio,
# prompt length
"prompt_length/mean": torch.mean(prompt_length).detach().item(),
"prompt_length/max": torch.max(prompt_length).detach().item(),
"prompt_length/min": torch.min(prompt_length).detach().item(),
"prompt_length/clip_ratio": torch.mean(torch.eq(prompt_length, max_prompt_length).float()).detach().item(),
}
# multi-turn conversation
if "__num_turns__" in batch.non_tensor_batch:
num_turns = batch.non_tensor_batch["__num_turns__"]
metrics["num_turns/min"] = num_turns.min()
metrics["num_turns/max"] = num_turns.max()
metrics["num_turns/mean"] = num_turns.mean()
if "tool_call_counts" in batch.non_tensor_batch:
tool_call_counts = batch.non_tensor_batch["tool_call_counts"]
metrics["tool_call_counts/min"] = tool_call_counts.min()
metrics["tool_call_counts/max"] = tool_call_counts.max()
metrics["tool_call_counts/mean"] = tool_call_counts.mean()
return metrics
def compute_timing_metrics(batch: DataProto, timing_raw: dict[str, float]) -> dict[str, Any]:
"""
Computes timing metrics for different processing stages in PPO training.
This function calculates both raw timing metrics (in seconds) and per-token timing metrics
(in milliseconds) for various processing stages like generation, reference computation,
value computation, advantage computation, and model updates.
Args:
batch: A DataProto object containing batch data with responses and attention masks.
timing_raw: A dictionary mapping stage names to their execution times in seconds.
Returns:
A dictionary containing:
- timing_s/{name}: Raw timing in seconds for each stage
- timing_per_token_ms/{name}: Per-token timing in milliseconds for each stage
Note:
Different stages use different token counts for normalization:
- "gen" uses only response tokens
- Other stages ("ref", "values", "adv", "update_critic", "update_actor") use all tokens
(prompt + response)
"""
response_info = _compute_response_info(batch)
num_prompt_tokens = torch.sum(response_info["prompt_length"]).item()
num_response_tokens = torch.sum(response_info["response_length"]).item()
num_overall_tokens = num_prompt_tokens + num_response_tokens
num_tokens_of_section = {
"gen": num_response_tokens,
**{name: num_overall_tokens for name in ["ref", "values", "adv", "update_critic", "update_actor"]},
}
return {
**{f"timing_s/{name}": value for name, value in timing_raw.items()},
**{
f"timing_per_token_ms/{name}": timing_raw[name] * 1000 / num_tokens_of_section[name]
for name in set(num_tokens_of_section.keys()) & set(timing_raw.keys())
},
}
def compute_throughout_metrics(batch: DataProto, timing_raw: dict[str, float], n_gpus: int) -> dict[str, Any]:
"""
Computes throughput metrics for PPO training.
This function calculates performance metrics related to token processing speed,
including the total number of tokens processed, time per step, and throughput
(tokens per second per GPU).
Args:
batch: A DataProto object containing batch data with meta information about token counts.
timing_raw: A dictionary mapping stage names to their execution times in seconds.
Must contain a "step" key with the total step time.
n_gpus: Number of GPUs used for training.
Returns:
A dictionary containing:
- perf/total_num_tokens: Total number of tokens processed in the batch
- perf/time_per_step: Time taken for the step in seconds
- perf/throughput: Tokens processed per second per GPU
Note:
The throughput is calculated as total_tokens / (time * n_gpus) to normalize
across different GPU counts.
"""
total_num_tokens = sum(batch.meta_info["global_token_num"])
time = timing_raw["step"]
# estimated_flops, promised_flops = flops_function.estimate_flops(num_tokens, time)
# f'Actual TFLOPs/s/GPU': estimated_flops/(n_gpus),
# f'Theoretical TFLOPs/s/GPU': promised_flops,
return {
"perf/total_num_tokens": total_num_tokens,
"perf/time_per_step": time,
"perf/throughput": total_num_tokens / (time * n_gpus),
}
def compute_variance_proxy_metrics(batch: DataProto, gradient_norm: float = None) -> dict[str, float]:
"""
Compute variance proxy metrics using the simplified expected squared norm approach.
This metric provides a computationally efficient way to monitor gradient variance
during training. It works for any advantage estimator as long as sum_pi_squared
is available from the actor.
Theory:
- Full variance: Var(g̃) = E[||g̃||²] - ||g_true||²
- Simplified proxy (when ||g_true||² ≈ 0): Var(g̃) ≈ E[||g̃||²]
- Using W-score approximation: E[||g̃||²] ≈ E[A² × W(τ)]
Where W(τ) = Σ_t[1 - 2π_t(y_t) + Σπ²] is the score-norm proxy.
"""
metrics = {}
# Check if we have the necessary data (sum_pi_squared is required for W-score)
if "sum_pi_squared" not in batch.batch or "old_log_probs" not in batch.batch or "advantages" not in batch.batch:
return metrics
# Compute W(τ) = Σ_t[1 - 2π_t(y_t) + Σπ²]
pi_t = torch.exp(batch.batch["old_log_probs"])
w_per_timestep = 1 - 2 * pi_t + batch.batch["sum_pi_squared"]
# Get response mask to only consider valid tokens
response_mask = batch.batch["response_mask"]
# Use pre-computed rollout IS weights from batch (for variance proxy consistency with training loss)
# IS weights are computed centrally in ray_trainer.py to avoid duplication
rollout_is_weights = None
if "rollout_is_weights" in batch.batch:
# Extract pre-computed IS weights from batch (already computed in trainer)
rollout_is_weights = batch.batch["rollout_is_weights"]
# Scale W by (rollout IS weight)² for optimal baseline under biased estimation
w_per_timestep = w_per_timestep * (rollout_is_weights**2).detach()
# Note: IS weight statistics and mismatch metrics are logged in ray_trainer.py
# Get scalar advantages (mean over timesteps)
advantages = batch.batch["advantages"]
# Compute mean advantage per trajectory using masked_mean
advantages_scalar = verl_F.masked_mean(advantages, response_mask, axis=-1)
# Compute W values (sum over timesteps)
w_values = verl_F.masked_sum(w_per_timestep, response_mask, axis=-1)
# ====== COMPUTE VARIANCE PROXIES ======
# Variance proxy should match the actual gradient computation:
# - If IS weights were computed/applied: use them in variance proxy calculation
# - Otherwise: compute on-policy variance proxy
# ====== PROXY 1: Signal Strength ||ḡ||² ======
# The squared norm of the mean gradient (provided from training loop)
proxy1_signal_strength = gradient_norm**2 if gradient_norm is not None else None
# ====== PROXY 2: Total Power E[||ĝ_τ||²] ======
# Measures the average of squared gradient norms (Signal + Noise)
if rollout_is_weights is not None:
# Off-policy with IS correction applied: use clamped weights consistently with actual gradient computation
rollout_is_weights_scalar = verl_F.masked_mean(rollout_is_weights, response_mask, axis=-1)
# Recover original W (before IS correction was applied in line 657)
# Clamp to avoid division by zero when IS weights are zero
w_original = verl_F.masked_sum(
w_per_timestep / torch.clamp((rollout_is_weights**2).detach(), min=1e-10), response_mask, axis=-1
)
# Clamp W to avoid negative values (which would cause NaN in sqrt)
w_original = torch.clamp(w_original, min=0.0)
# Proxy 2 for off-policy: E[ρ̄² × A² × W]
proxy2_total_power = ((rollout_is_weights_scalar**2) * (advantages_scalar**2) * w_original).mean()
else:
# On-policy Proxy 2: E[A² × W]
# Clamp W to avoid negative values (which would cause NaN in sqrt)
w_values_clamped = torch.clamp(w_values, min=0.0)
proxy2_total_power = (advantages_scalar**2 * w_values_clamped).mean()
# ====== PROXY 3: Pure Noise - Variance of Mean Vector ======
# Requires ||ḡ||² from actual batch gradient
# Formula: (1/(N-1)) × (Proxy2 - Proxy1)
proxy3_pure_noise = None
if proxy1_signal_strength is not None:
batch_size = advantages_scalar.shape[0]
if batch_size > 1:
proxy3_pure_noise = (1.0 / (batch_size - 1)) * (proxy2_total_power - proxy1_signal_strength)
# Ensure non-negative (can be negative due to numerical errors)
proxy3_pure_noise = max(
0.0, proxy3_pure_noise.item() if torch.is_tensor(proxy3_pure_noise) else proxy3_pure_noise
)
# Decompose into components for analysis
expected_a_squared = (advantages_scalar**2).mean()
expected_w = w_values.mean()
metrics.update(
{
# Proxy 1: Signal Strength ||ḡ||²
"variance_proxy/proxy1_signal_strength": (
proxy1_signal_strength if proxy1_signal_strength is not None else 0.0
),
# Proxy 2: Total Power E[||ĝ_τ||²]
"variance_proxy/proxy2_total_power": proxy2_total_power.detach().item(),
# Proxy 3: Pure Noise - Variance of Mean Vector
"variance_proxy/proxy3_pure_noise": proxy3_pure_noise if proxy3_pure_noise is not None else 0.0,
# Component metrics for debugging
"variance_proxy/expected_a_squared": expected_a_squared.detach().item(),
"variance_proxy/expected_w": expected_w.detach().item(),
}
)
return metrics
def bootstrap_metric(
data: list[Any],
subset_size: int,
reduce_fns: list[Callable[[np.ndarray], float]],
n_bootstrap: int = 1000,
seed: int = 42,
) -> list[tuple[float, float]]:
"""
Performs bootstrap resampling to estimate statistics of metrics.
This function uses bootstrap resampling to estimate the mean and standard deviation
of metrics computed by the provided reduction functions on random subsets of the data.
Args:
data: List of data points to bootstrap from.
subset_size: Size of each bootstrap sample.
reduce_fns: List of functions that compute a metric from a subset of data.
n_bootstrap: Number of bootstrap iterations. Defaults to 1000.
seed: Random seed for reproducibility. Defaults to 42.
Returns:
A list of tuples, where each tuple contains (mean, std) for a metric
corresponding to each reduction function in reduce_fns.
Example:
>>> data = [1, 2, 3, 4, 5]
>>> reduce_fns = [np.mean, np.max]
>>> bootstrap_metric(data, 3, reduce_fns)
[(3.0, 0.5), (4.5, 0.3)] # Example values
"""
np.random.seed(seed)
data_np = np.array(data, dtype=object)
n_data = len(data_np)
# generate bootstrap indices, shape: (n_bootstrap, subset_size)
bootstrap_idxs = np.random.choice(n_data, size=(n_bootstrap, subset_size), replace=True)
# pre-allocate result array, shape: (n_fns, n_bootstrap)
n_fns = len(reduce_fns)
metric_results = np.empty((n_fns, n_bootstrap), dtype=np.float64)
# compute metric results for each bootstrap sample
for fn_idx, reduce_fn in enumerate(reduce_fns):
# bootstrap sample and compute metric
for boot_idx in range(n_bootstrap):
sample = data_np[bootstrap_idxs[boot_idx]]
metric_results[fn_idx, boot_idx] = reduce_fn(sample)
# compute mean and std for each metric function
result = [
(float(np.mean(metric_results[fn_idx])), float(np.std(metric_results[fn_idx]))) for fn_idx in range(n_fns)
]
return result
def calc_maj_val(data: list[dict[str, Any]], vote_key: str, val_key: str) -> float:
"""
Calculate a value based on majority voting.
This function identifies the most common value for a specified vote key
in the data, then returns the corresponding value for that majority vote.
Args:
data: List of dictionaries, where each dictionary contains both vote_key and val_key.
vote_key: The key in each dictionary used for voting/counting.
val_key: The key in each dictionary whose value will be returned for the majority vote.
Returns:
The value associated with the most common vote.
Example:
>>> data = [
... {"pred": "A", "val": 0.9},
... {"pred": "B", "val": 0.8},
... {"pred": "A", "val": 0.7}
... ]
>>> calc_maj_val(data, vote_key="pred", val_key="val")
0.9 # Returns the first "val" for the majority vote "A"
"""
vote2vals = defaultdict(list)
for d in data:
vote2vals[d[vote_key]].append(d[val_key])
vote2cnt = {k: len(v) for k, v in vote2vals.items()}
maj_vote = max(vote2cnt, key=vote2cnt.get)
maj_val = vote2vals[maj_vote][0]
return maj_val
def process_validation_metrics(
data_sources: list[str], sample_uids: list[str], infos_dict: dict[str, list[Any]], seed: int = 42
) -> dict[str, dict[str, dict[str, float]]]:
"""
Process validation metrics into a structured format with statistical analysis.
This function organizes validation metrics by data source and prompt, then computes
various statistical measures including means, standard deviations, best/worst values,
and majority voting results. It also performs bootstrap sampling to estimate statistics
for different sample sizes.
Args:
data_sources: List of data source identifiers for each sample.
sample_uids: List of sample uids corresponding to each sample.
infos_dict: Dictionary mapping variable names to lists of values for each sample.
seed: Random seed for bootstrap sampling. Defaults to 42.
Returns:
A nested dictionary with the structure:
{
data_source: {
variable_name: {
metric_name: value
}
}
}
Where metric_name includes:
- "mean@N": Mean value across N samples
- "std@N": Standard deviation across N samples
- "best@N/mean": Mean of the best values in bootstrap samples of size N
- "best@N/std": Standard deviation of the best values in bootstrap samples
- "worst@N/mean": Mean of the worst values in bootstrap samples
- "worst@N/std": Standard deviation of the worst values in bootstrap samples
- "maj@N/mean": Mean of majority voting results in bootstrap samples (if "pred" exists)
- "maj@N/std": Standard deviation of majority voting results (if "pred" exists)
Example:
>>> data_sources = ["source1", "source1", "source2"]
>>> sample_uids = ["uid1", "uid1", "uid2"]
>>> infos_dict = {"score": [0.8, 0.9, 0.7], "pred": ["A", "A", "B"]}
>>> result = process_validation_metrics(data_sources, sample_uids, infos_dict)
>>> # result will contain statistics for each data source and variable
"""
# Group metrics by data source, prompt and variable
data_src2uid2var2vals = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for sample_idx, data_source in enumerate(data_sources):
uid = sample_uids[sample_idx]
var2vals = data_src2uid2var2vals[data_source][uid]
for var_name, var_vals in infos_dict.items():
var2vals[var_name].append(var_vals[sample_idx])
np_mean = np.mean
np_std = np.std
reduce_fns_best_worst = [np.max, np.min]
n_bootstrap = 1000
# 2. cache ns list
def gen_ns(n_resps: int) -> list[int]:
if n_resps <= 1:
return []
ns = []
n = 2
while n < n_resps:
ns.append(n)
n *= 2
ns.append(n_resps)
return ns
ns_cache = {}
# 3. cache metric results
data_src2uid2var2metric = {}
# 4. flatten loop
for data_source, uid2var2vals in data_src2uid2var2vals.items():
# create uid dict
uid_dict = data_src2uid2var2metric.setdefault(data_source, {})
for uid, var2vals in uid2var2vals.items():
pred_vals = var2vals.get("pred")
has_pred = pred_vals is not None
var_dict = uid_dict.setdefault(uid, {})
for var_name, var_vals in var2vals.items():
# skip empty or string values
if not var_vals or isinstance(var_vals[0], str):
continue
# compute mean and std
n_resps = len(var_vals)
metric = {f"mean@{n_resps}": float(np_mean(var_vals))}
if n_resps > 1:
metric[f"std@{n_resps}"] = float(np_std(var_vals))
# cache ns list
if n_resps not in ns_cache:
ns_cache[n_resps] = gen_ns(n_resps)
ns = ns_cache[n_resps]
# compute best/worst metrics
for n in ns:
# compute best/worst metrics
(bon_mean, bon_std), (won_mean, won_std) = bootstrap_metric(
data=var_vals,
subset_size=n,
reduce_fns=reduce_fns_best_worst,
n_bootstrap=n_bootstrap,
seed=seed,
)
metric[f"best@{n}/mean"] = bon_mean
metric[f"best@{n}/std"] = bon_std
metric[f"worst@{n}/mean"] = won_mean
metric[f"worst@{n}/std"] = won_std
# compute maj metrics
if has_pred:
# create vote_data
vote_data = [
{"val": val, "pred": pred} for val, pred in zip(var_vals, pred_vals, strict=True)
]
# compute maj metrics
[(maj_n_mean, maj_n_std)] = bootstrap_metric(
data=vote_data,
subset_size=n,
reduce_fns=[partial(calc_maj_val, vote_key="pred", val_key="val")],
n_bootstrap=n_bootstrap,
seed=seed,
)
metric[f"maj@{n}/mean"] = maj_n_mean
metric[f"maj@{n}/std"] = maj_n_std
var_dict[var_name] = metric
# Aggregate metrics across uids
data_src2var2metric2uid_vals = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for data_source, uid2var2metric in data_src2uid2var2metric.items():
for uid, var2metric in uid2var2metric.items():
for var_name, metric in var2metric.items():
for metric_name, metric_val in metric.items():
data_src2var2metric2uid_vals[data_source][var_name][metric_name].append(metric_val)
data_src2var2metric2val = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
for data_source, var2metric2uid_vals in data_src2var2metric2uid_vals.items():
for var_name, metric2uid_vals in var2metric2uid_vals.items():
for metric_name, uid_vals in metric2uid_vals.items():
data_src2var2metric2val[data_source][var_name][metric_name] = np.mean(uid_vals)
return data_src2var2metric2val
|
verl__trainer__ppo__prefix_grouper_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import torch
from prefix_grouper import PrefixGrouper
from verl.utils.torch_functional import logprobs_from_logits
def build_position_ids_for_prefix_grouper(prefix_grouper: PrefixGrouper) -> torch.Tensor:
"""Build position_ids for PrefixGrouper where each response restarts from prefix_len."""
num_samples = len(prefix_grouper.group_info)
max_len = prefix_grouper.padding_mask.size(1)
device = prefix_grouper.padding_mask.device
position_ids = torch.zeros(num_samples, max_len, dtype=torch.long, device=device)
for i, group in enumerate(prefix_grouper.group_info):
prefix_len = group.prefix_len
position_ids[i, :prefix_len] = torch.arange(prefix_len, device=device)
cur_pos = prefix_len
for suffix_len in group.suffix_lens:
if suffix_len > 0:
position_ids[i, cur_pos : cur_pos + suffix_len] = torch.arange(
prefix_len, prefix_len + suffix_len, device=device
)
cur_pos += suffix_len
return position_ids
def build_pg_from_micro_batch(
micro_batch: dict,
pad_token_id: int,
padding_mode: str = "right",
):
"""Build PrefixGrouper from micro_batch dict containing prompts, responses, response_mask, uid."""
prompts = micro_batch["prompts"]
responses = micro_batch["responses"]
response_mask = micro_batch["response_mask"]
uids = micro_batch["uid"]
bs = responses.size(0)
group_sizes = []
cur = 1
for i in range(1, bs):
if uids[i] == uids[i - 1]:
cur += 1
else:
group_sizes.append(cur)
cur = 1
group_sizes.append(cur)
prefix_indices = []
cursor = 0
for gs in group_sizes:
prefix_indices.append(cursor)
cursor += gs
prefix_indices = torch.tensor(prefix_indices, device=prompts.device)
prefix_ids = prompts.index_select(0, prefix_indices)
prefix_mask = prefix_ids.ne(pad_token_id)
prefix_grouper = PrefixGrouper.from_ungrouped_masks(
prefix_mask=prefix_mask,
suffix_mask=response_mask,
group_sizes=group_sizes,
padding_mode=padding_mode,
device=prompts.device,
)
concat_input_ids = prefix_grouper.concat_input(prefix_ids, prefix_mask, responses, response_mask)
attention_mask = prefix_grouper.padding_mask
position_ids = build_position_ids_for_prefix_grouper(prefix_grouper)
return (
prefix_grouper,
concat_input_ids,
attention_mask,
position_ids,
responses,
response_mask,
)
def pg_forward(
model,
prefix_grouper,
concat_input_ids,
attention_mask,
position_ids,
completion_ids,
completion_mask,
*,
temperature=1.0,
padding_mode="right",
include_prefix_last=1,
calculate_entropy=False,
entropy_fn=None,
):
logits = model(
input_ids=concat_input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
use_cache=False,
prefix_grouper=prefix_grouper,
).logits
prefix_out, prefix_mask, suffix_out_raw, suffix_mask_raw = prefix_grouper.split_output(
logits, include_prefix_last=include_prefix_last
)
completion_ids_right = prefix_grouper.convert_padding(
completion_ids,
completion_mask,
padding_mode=padding_mode,
)
suffix_out = suffix_out_raw[:, :-1].float()
suffix_mask = suffix_mask_raw[:, 1:]
suffix_out /= temperature
log_probs = logprobs_from_logits(suffix_out, completion_ids_right)
entropy = None
if calculate_entropy and entropy_fn is not None:
entropy = entropy_fn(suffix_out)
return log_probs, entropy, suffix_mask
def forward_micro_batch_with_prefix_grouper(
micro_batch: dict,
model,
temperature: float,
calculate_entropy: bool,
device_name: str,
param_dtype,
use_chunking_entropy: bool = False,
):
"""
Forward pass using PrefixGrouper for shared-prefix optimization.
Args:
micro_batch: Dict containing prompts, responses, response_mask, uid, etc.
model: The actor module.
temperature: Temperature for logits scaling.
calculate_entropy: Whether to compute entropy.
device_name: Device name for autocast.
param_dtype: Parameter dtype for autocast.
use_chunking_entropy: Whether to use chunking entropy function.
Returns:
tuple: (entropy, log_probs) where entropy may be None if not calculated.
"""
import verl.utils.torch_functional as verl_F
entropy_fn = None
if calculate_entropy:
if use_chunking_entropy:
entropy_fn = verl_F.entropy_from_logits_with_chunking
else:
entropy_fn = verl_F.entropy_from_logits
pad_token_id = micro_batch.get("pad_token_id", 0)
(
prefix_grouper,
concat_input_ids,
attention_mask,
position_ids,
responses,
response_mask,
) = build_pg_from_micro_batch(
micro_batch,
pad_token_id=pad_token_id,
padding_mode="right",
)
with torch.autocast(device_type=device_name, dtype=param_dtype):
log_probs, entropy, suffix_mask_from_pg = pg_forward(
model=model,
prefix_grouper=prefix_grouper,
concat_input_ids=concat_input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
completion_ids=responses,
completion_mask=response_mask,
temperature=temperature,
padding_mode="right",
include_prefix_last=1,
calculate_entropy=calculate_entropy,
entropy_fn=entropy_fn,
)
# Zero out padding positions
padding_mask = suffix_mask_from_pg == 0
log_probs = log_probs.masked_fill(padding_mask, 0.0)
if entropy is not None:
entropy = entropy.masked_fill(padding_mask, 0.0)
# Pad to target response length if needed
target_response_length = responses.size(1)
if log_probs.size(1) != target_response_length:
batch_size = log_probs.size(0)
current_len = log_probs.size(1)
full_log_probs = log_probs.new_zeros(batch_size, target_response_length)
full_log_probs[:, :current_len] = log_probs
log_probs = full_log_probs
if entropy is not None:
full_entropy = entropy.new_zeros(batch_size, target_response_length)
full_entropy[:, :current_len] = entropy
entropy = full_entropy
return entropy, log_probs
|
verl__trainer__ppo__ray_trainer.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PPO Trainer with Ray-based single controller.
This trainer supports model-agonistic model initialization with huggingface
"""
import json
import os
import uuid
from collections import defaultdict
from copy import deepcopy
from pprint import pprint
from typing import Any, Optional
import numpy as np
import torch
from omegaconf import OmegaConf, open_dict
from torch.utils.data import Dataset, Sampler
from torchdata.stateful_dataloader import StatefulDataLoader
from tqdm import tqdm
from verl import DataProto
from verl.checkpoint_engine import CheckpointEngineManager
from verl.experimental.dataset.sampler import AbstractCurriculumSampler
from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup, ResourcePoolManager
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.config import AlgoConfig
from verl.trainer.ppo import core_algos
from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss
from verl.trainer.ppo.metric_utils import (
compute_data_metrics,
compute_throughout_metrics,
compute_timing_metrics,
compute_variance_proxy_metrics,
process_validation_metrics,
)
from verl.trainer.ppo.reward import extract_reward
from verl.trainer.ppo.utils import Role, WorkerType, need_critic, need_reference_policy, need_reward_model
from verl.utils import tensordict_utils as tu
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, should_save_ckpt_esi
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.debug import marked_timer
from verl.utils.import_utils import load_class_from_fqn
from verl.utils.metric import reduce_metrics
from verl.utils.py_functional import rename_dict
from verl.utils.rollout_skip import RolloutSkip
from verl.utils.seqlen_balancing import calculate_workload, get_seqlen_balanced_partitions, log_seqlen_unbalance
from verl.utils.torch_functional import masked_mean
from verl.utils.tracking import ValidationGenerationsLogger
from verl.workers.config import FSDPEngineConfig
from verl.workers.utils.padding import left_right_2_no_padding, no_padding_2_padding
def apply_kl_penalty(data: DataProto, kl_ctrl: core_algos.AdaptiveKLController, kl_penalty="kl"):
"""Apply KL penalty to the token-level rewards.
This function computes the KL divergence between the reference policy and current policy,
then applies a penalty to the token-level rewards based on this divergence.
Args:
data (DataProto): The data containing batched model outputs and inputs.
kl_ctrl (core_algos.AdaptiveKLController): Controller for adaptive KL penalty.
kl_penalty (str, optional): Type of KL penalty to apply. Defaults to "kl".
Returns:
tuple: A tuple containing:
- The updated data with token-level rewards adjusted by KL penalty
- A dictionary of metrics related to the KL penalty
"""
response_mask = data.batch["response_mask"]
token_level_scores = data.batch["token_level_scores"]
batch_size = data.batch.batch_size[0]
# compute kl between ref_policy and current policy
# When apply_kl_penalty, algorithm.use_kl_in_reward=True, so the reference model has been enabled.
kld = core_algos.kl_penalty(
data.batch["old_log_probs"], data.batch["ref_log_prob"], kl_penalty=kl_penalty
) # (batch_size, response_length)
kld = kld * response_mask
beta = kl_ctrl.value
token_level_rewards = token_level_scores - beta * kld
current_kl = masked_mean(kld, mask=response_mask, axis=-1) # average over sequence
current_kl = torch.mean(current_kl, dim=0).item()
# according to https://github.com/huggingface/trl/blob/951ca1841f29114b969b57b26c7d3e80a39f75a0/trl/trainer/ppo_trainer.py#L837
kl_ctrl.update(current_kl=current_kl, n_steps=batch_size)
data.batch["token_level_rewards"] = token_level_rewards
metrics = {"actor/reward_kl_penalty": current_kl, "actor/reward_kl_penalty_coeff": beta}
return data, metrics
def compute_response_mask(data: DataProto):
"""Compute the attention mask for the response part of the sequence.
This function extracts the portion of the attention mask that corresponds to the model's response,
which is used for masking computations that should only apply to response tokens.
Args:
data (DataProto): The data containing batched model outputs and inputs.
Returns:
torch.Tensor: The attention mask for the response tokens.
"""
responses = data.batch["responses"]
response_length = responses.size(1)
attention_mask = data.batch["attention_mask"]
return attention_mask[:, -response_length:]
def compute_advantage(
data: DataProto,
adv_estimator: AdvantageEstimator,
gamma: float = 1.0,
lam: float = 1.0,
num_repeat: int = 1,
norm_adv_by_std_in_grpo: bool = True,
config: Optional[AlgoConfig] = None,
) -> DataProto:
"""Compute advantage estimates for policy optimization.
This function computes advantage estimates using various estimators like GAE, GRPO, REINFORCE++, etc.
The advantage estimates are used to guide policy optimization in RL algorithms.
Args:
data (DataProto): The data containing batched model outputs and inputs.
adv_estimator (AdvantageEstimator): The advantage estimator to use (e.g., GAE, GRPO, REINFORCE++).
gamma (float, optional): Discount factor for future rewards. Defaults to 1.0.
lam (float, optional): Lambda parameter for GAE. Defaults to 1.0.
num_repeat (int, optional): Number of times to repeat the computation. Defaults to 1.
norm_adv_by_std_in_grpo (bool, optional): Whether to normalize advantages by standard deviation in
GRPO. Defaults to True.
config (dict, optional): Configuration dictionary for algorithm settings. Defaults to None.
Returns:
DataProto: The updated data with computed advantages and returns.
"""
# Back-compatible with trainers that do not compute response mask in fit
if "response_mask" not in data.batch.keys():
data.batch["response_mask"] = compute_response_mask(data)
# prepare response group
if adv_estimator == AdvantageEstimator.GAE:
# Compute advantages and returns using Generalized Advantage Estimation (GAE)
advantages, returns = core_algos.compute_gae_advantage_return(
token_level_rewards=data.batch["token_level_rewards"],
values=data.batch["values"],
response_mask=data.batch["response_mask"],
gamma=gamma,
lam=lam,
)
data.batch["advantages"] = advantages
data.batch["returns"] = returns
if config.get("use_pf_ppo", False):
data = core_algos.compute_pf_ppo_reweight_data(
data,
config.pf_ppo.get("reweight_method"),
config.pf_ppo.get("weight_pow"),
)
elif adv_estimator == AdvantageEstimator.GRPO:
# Initialize the mask for GRPO calculation
grpo_calculation_mask = data.batch["response_mask"]
# Call compute_grpo_outcome_advantage with parameters matching its definition
advantages, returns = core_algos.compute_grpo_outcome_advantage(
token_level_rewards=data.batch["token_level_rewards"],
response_mask=grpo_calculation_mask,
index=data.non_tensor_batch["uid"],
norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
)
data.batch["advantages"] = advantages
data.batch["returns"] = returns
else:
# handle all other adv estimator type other than GAE and GRPO
adv_estimator_fn = core_algos.get_adv_estimator_fn(adv_estimator)
adv_kwargs = {
"token_level_rewards": data.batch["token_level_rewards"],
"response_mask": data.batch["response_mask"],
"config": config,
}
if "uid" in data.non_tensor_batch: # optional
adv_kwargs["index"] = data.non_tensor_batch["uid"]
if "reward_baselines" in data.batch: # optional
adv_kwargs["reward_baselines"] = data.batch["reward_baselines"]
# Add sum_pi_squared for Optimal Token Baseline
if adv_estimator in (AdvantageEstimator.OPTIMAL_TOKEN_BASELINE, AdvantageEstimator.TIR_OPTIMAL_TOKEN_BASELINE):
# Check if sum_pi_squared is available
assert "sum_pi_squared" in data.batch, (
"Step-dependent optimal baseline requires sum_pi_squared from actor. "
"Please set actor.calculate_sum_pi_squared=True in config."
)
adv_kwargs["sum_pi_squared"] = data.batch["sum_pi_squared"]
# Get pre-computed rollout IS weights if available
rollout_is_weights = data.batch.get("rollout_is_weights", None)
adv_kwargs["rollout_is_weights"] = rollout_is_weights
# calculate advantage estimator
advantages, returns = adv_estimator_fn(**adv_kwargs)
data.batch["advantages"] = advantages
data.batch["returns"] = returns
return data
class RayPPOTrainer:
"""Distributed PPO trainer using Ray for scalable reinforcement learning.
This trainer orchestrates distributed PPO training across multiple nodes and GPUs,
managing actor rollouts, critic training, and reward computation with Ray backend.
Supports various model architectures including FSDP, Megatron, vLLM, and SGLang integration.
"""
# TODO: support each role have individual ray_worker_group_cls,
# i.e., support different backend of different role
def __init__(
self,
config,
tokenizer,
role_worker_mapping: dict[Role, WorkerType],
resource_pool_manager: ResourcePoolManager,
ray_worker_group_cls: type[RayWorkerGroup] = RayWorkerGroup,
processor=None,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
collate_fn=None,
train_sampler: Optional[Sampler] = None,
device_name=None,
):
"""
Initialize distributed PPO trainer with Ray backend.
Note that this trainer runs on the driver process on a single CPU/GPU node.
Args:
config: Configuration object containing training parameters.
tokenizer: Tokenizer used for encoding and decoding text.
role_worker_mapping (dict[Role, WorkerType]): Mapping from roles to worker classes.
resource_pool_manager (ResourcePoolManager): Manager for Ray resource pools.
ray_worker_group_cls (RayWorkerGroup, optional): Class for Ray worker groups. Defaults to RayWorkerGroup.
processor: Optional data processor, used for multimodal data
train_dataset (Optional[Dataset], optional): Training dataset. Defaults to None.
val_dataset (Optional[Dataset], optional): Validation dataset. Defaults to None.
collate_fn: Function to collate data samples into batches.
train_sampler (Optional[Sampler], optional): Sampler for the training dataset. Defaults to None.
device_name (str, optional): Device name for training (e.g., "cuda", "cpu"). Defaults to None.
"""
# Store the tokenizer for text processing
self.tokenizer = tokenizer
self.processor = processor
self.config = config
self.hybrid_engine = config.actor_rollout_ref.hybrid_engine
assert self.hybrid_engine, "Currently, only support hybrid engine"
if self.hybrid_engine:
assert Role.ActorRollout in role_worker_mapping or Role.ActorRolloutRef in role_worker_mapping, (
f"{role_worker_mapping.keys()=}"
)
self.role_worker_mapping = role_worker_mapping
self.resource_pool_manager = resource_pool_manager
self.use_reference_policy = need_reference_policy(self.config)
self.use_rm = need_reward_model(self.config)
self.use_critic = need_critic(self.config)
self.ray_worker_group_cls = ray_worker_group_cls
self.device_name = device_name if device_name else self.config.trainer.device
self.validation_generations_logger = ValidationGenerationsLogger(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
)
# if ref_in_actor is True, the reference policy will be actor without lora applied
lora_rank = config.actor_rollout_ref.model.get("lora", {}).get("rank", 0)
if lora_rank <= 0:
lora_rank = config.actor_rollout_ref.model.get("lora_rank", 0)
self.ref_in_actor = lora_rank > 0 or config.actor_rollout_ref.model.get("lora_adapter_path") is not None
# define in-reward KL control
# kl loss control currently not suppoorted
if self.config.algorithm.use_kl_in_reward:
self.kl_ctrl_in_reward = core_algos.get_kl_controller(self.config.algorithm.kl_ctrl)
self.use_prefix_grouper = self.config.actor_rollout_ref.actor.get("use_prefix_grouper", False)
self.use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler)
def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampler: Optional[Sampler]):
"""
Creates the train and validation dataloaders.
"""
# TODO: we have to make sure the batch size is divisible by the dp size
from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler
if train_dataset is None:
train_dataset = create_rl_dataset(
self.config.data.train_files,
self.config.data,
self.tokenizer,
self.processor,
max_samples=self.config.data.get("train_max_samples", -1),
)
if val_dataset is None:
val_dataset = create_rl_dataset(
self.config.data.val_files,
self.config.data,
self.tokenizer,
self.processor,
max_samples=self.config.data.get("val_max_samples", -1),
)
self.train_dataset, self.val_dataset = train_dataset, val_dataset
if train_sampler is None:
train_sampler = create_rl_sampler(self.config.data, self.train_dataset)
if collate_fn is None:
from verl.utils.dataset.rl_dataset import collate_fn as default_collate_fn
collate_fn = default_collate_fn
num_workers = self.config.data["dataloader_num_workers"]
self.train_dataloader = StatefulDataLoader(
dataset=self.train_dataset,
batch_size=self.config.data.get("gen_batch_size", self.config.data.train_batch_size),
num_workers=num_workers,
drop_last=True,
collate_fn=collate_fn,
sampler=train_sampler,
)
val_batch_size = self.config.data.val_batch_size # Prefer config value if set
if val_batch_size is None:
val_batch_size = len(self.val_dataset)
self.val_dataloader = StatefulDataLoader(
dataset=self.val_dataset,
batch_size=val_batch_size,
num_workers=num_workers,
shuffle=self.config.data.get("validation_shuffle", True),
drop_last=False,
collate_fn=collate_fn,
)
assert len(self.train_dataloader) >= 1, "Train dataloader is empty!"
assert len(self.val_dataloader) >= 1, "Validation dataloader is empty!"
print(
f"Size of train dataloader: {len(self.train_dataloader)}, Size of val dataloader: "
f"{len(self.val_dataloader)}"
)
total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
if self.config.trainer.total_training_steps is not None:
total_training_steps = self.config.trainer.total_training_steps
self.total_training_steps = total_training_steps
print(f"Total training steps: {self.total_training_steps}")
try:
OmegaConf.set_struct(self.config, True)
with open_dict(self.config):
if OmegaConf.select(self.config, "actor_rollout_ref.actor.optim"):
self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps
if OmegaConf.select(self.config, "critic.optim"):
self.config.critic.optim.total_training_steps = total_training_steps
except Exception as e:
print(f"Warning: Could not set total_training_steps in config. Structure missing? Error: {e}")
def _dump_generations(self, inputs, outputs, gts, scores, reward_extra_infos_dict, dump_path):
"""Dump rollout/validation samples as JSONL."""
os.makedirs(dump_path, exist_ok=True)
filename = os.path.join(dump_path, f"{self.global_steps}.jsonl")
n = len(inputs)
base_data = {
"input": inputs,
"output": outputs,
"gts": gts,
"score": scores,
"step": [self.global_steps] * n,
}
for k, v in reward_extra_infos_dict.items():
if len(v) == n:
base_data[k] = v
lines = []
for i in range(n):
entry = {k: v[i] for k, v in base_data.items()}
lines.append(json.dumps(entry, ensure_ascii=False))
with open(filename, "w") as f:
f.write("\n".join(lines) + "\n")
print(f"Dumped generations to {filename}")
def _log_rollout_data(
self, batch: DataProto, reward_extra_infos_dict: dict, timing_raw: dict, rollout_data_dir: str
):
"""Log rollout data to disk.
Args:
batch (DataProto): The batch containing rollout data
reward_extra_infos_dict (dict): Additional reward information to log
timing_raw (dict): Timing information for profiling
rollout_data_dir (str): Directory path to save the rollout data
"""
with marked_timer("dump_rollout_generations", timing_raw, color="green"):
inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True)
outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True)
scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist()
sample_gts = [item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None) for item in batch]
reward_extra_infos_to_dump = reward_extra_infos_dict.copy()
if "request_id" in batch.non_tensor_batch:
reward_extra_infos_dict.setdefault(
"request_id",
batch.non_tensor_batch["request_id"].tolist(),
)
self._dump_generations(
inputs=inputs,
outputs=outputs,
gts=sample_gts,
scores=scores,
reward_extra_infos_dict=reward_extra_infos_to_dump,
dump_path=rollout_data_dir,
)
def _maybe_log_val_generations(self, inputs, outputs, scores):
"""Log a table of validation samples to the configured logger (wandb or swanlab)"""
generations_to_log = self.config.trainer.log_val_generations
if generations_to_log == 0:
return
import numpy as np
# Create tuples of (input, output, score) and sort by input text
samples = list(zip(inputs, outputs, scores, strict=True))
samples.sort(key=lambda x: x[0]) # Sort by input text
# Use fixed random seed for deterministic shuffling
rng = np.random.RandomState(42)
rng.shuffle(samples)
# Take first N samples after shuffling
samples = samples[:generations_to_log]
# Log to each configured logger
self.validation_generations_logger.log(self.config.trainer.logger, samples, self.global_steps)
def _get_gen_batch(self, batch: DataProto) -> DataProto:
reward_keys = set({"data_source", "reward_model", "extra_info", "uid"}) & batch.non_tensor_batch.keys()
# pop those keys for generation
batch_keys_to_pop = []
non_tensor_batch_keys_to_pop = set(batch.non_tensor_batch.keys()) - reward_keys
gen_batch = batch.pop(
batch_keys=batch_keys_to_pop,
non_tensor_batch_keys=list(non_tensor_batch_keys_to_pop),
)
# For agent loop, we need reward model keys to compute score.
gen_batch.non_tensor_batch.update(batch.non_tensor_batch)
return gen_batch
def _compute_reward_colocate(self, batch: DataProto) -> tuple[torch.Tensor, dict[str, Any]] | torch.Tensor:
"""
compute reward use colocate reward model
"""
assert self.reward_loop_manager is not None, "RewardLoopManager is None"
batch_reward = self.reward_loop_manager.compute_rm_score(batch)
return batch_reward
def _validate(self, merged: bool = False):
data_source_lst = []
reward_extra_infos_dict: dict[str, list] = defaultdict(list)
# Lists to collect samples for the table
sample_inputs = []
sample_outputs = []
sample_gts = []
sample_scores = []
sample_turns = []
sample_uids = []
for test_data in self.val_dataloader:
test_batch = DataProto.from_single_dict(test_data)
if "uid" not in test_batch.non_tensor_batch:
test_batch.non_tensor_batch["uid"] = np.array(
[str(uuid.uuid4()) for _ in range(len(test_batch.batch))], dtype=object
)
# repeat test batch
test_batch = test_batch.repeat(
repeat_times=self.config.actor_rollout_ref.rollout.val_kwargs.n, interleave=True
)
ground_truths = [
item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None) for item in test_batch
]
sample_gts.extend(ground_truths)
test_gen_batch = self._get_gen_batch(test_batch)
test_gen_batch.meta_info = {
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
"recompute_log_prob": False,
"do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample,
"validate": True,
"global_steps": self.global_steps,
}
print(f"test_gen_batch meta info: {test_gen_batch.meta_info}")
# pad to be divisible by dp_size
size_divisor = self.config.actor_rollout_ref.rollout.agent.num_workers
test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, size_divisor)
test_output_gen_batch_padded = self.async_rollout_manager.generate_sequences(test_gen_batch_padded)
if self.use_rm and "rm_scores" not in test_output_gen_batch_padded.batch.keys():
# for colocate reward models, we need to sleep rollout model
# to spare GPU memory for reward model
self.checkpoint_manager.sleep_replicas()
batch_reward = self._compute_reward_colocate(test_output_gen_batch_padded)
test_output_gen_batch_padded = test_output_gen_batch_padded.union(batch_reward)
# wake up rollout model
# replace with wake_up method once supported
self.checkpoint_manager.update_weights()
# unpad
test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=pad_size)
print("validation generation end")
# Store generated outputs
output_ids = test_output_gen_batch.batch["responses"]
output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids]
sample_outputs.extend(output_texts)
test_batch = test_batch.union(test_output_gen_batch)
test_batch.meta_info["validate"] = True
# Store original inputs
input_ids = test_batch.batch["prompts"]
# TODO: Can we keep special tokens except for padding tokens?
input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids]
sample_inputs.extend(input_texts)
sample_uids.extend(test_batch.non_tensor_batch["uid"])
# evaluate using reward_function
reward_tensor, reward_extra_info = extract_reward(test_batch)
scores = reward_tensor.sum(-1).cpu().tolist()
sample_scores.extend(scores)
reward_extra_infos_dict["reward"].extend(scores)
for key, values in reward_extra_info.items():
if key not in reward_extra_infos_dict:
reward_extra_infos_dict[key] = []
if isinstance(values, np.ndarray):
reward_extra_infos_dict[key].extend(values.tolist())
else:
reward_extra_infos_dict[key].extend(values if isinstance(values, list) else [values])
# collect num_turns of each prompt
if "__num_turns__" in test_batch.non_tensor_batch:
sample_turns.append(test_batch.non_tensor_batch["__num_turns__"])
data_source_lst.append(test_batch.non_tensor_batch.get("data_source", ["unknown"] * reward_tensor.shape[0]))
self._maybe_log_val_generations(inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores)
# dump generations
val_data_dir = self.config.trainer.get("validation_data_dir", None)
if val_data_dir:
self._dump_generations(
inputs=sample_inputs,
outputs=sample_outputs,
gts=sample_gts,
scores=sample_scores,
reward_extra_infos_dict=reward_extra_infos_dict,
dump_path=val_data_dir,
)
for key_info, lst in reward_extra_infos_dict.items():
assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}"
if merged:
print("_merge_validation_results validate result will be merged")
return {
"data_sources": data_source_lst,
"sample_uids": sample_uids,
"sample_turns": sample_turns,
"reward_extra_infos_dict": reward_extra_infos_dict,
}
data_sources = np.concatenate(data_source_lst, axis=0)
return self._val_metrics_update(data_sources, sample_uids, reward_extra_infos_dict, sample_turns)
def _val_metrics_update(self, data_sources, sample_uids, reward_extra_infos_dict, sample_turns):
data_src2var2metric2val = process_validation_metrics(data_sources, sample_uids, reward_extra_infos_dict)
metric_dict = {}
for data_source, var2metric2val in data_src2var2metric2val.items():
core_var = "acc" if "acc" in var2metric2val else "reward"
for var_name, metric2val in var2metric2val.items():
n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()])
for metric_name, metric_val in metric2val.items():
if (
(var_name == core_var)
and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"])
and (f"@{n_max}" in metric_name)
):
metric_sec = "val-core"
else:
metric_sec = "val-aux"
pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}"
metric_dict[pfx] = metric_val
if len(sample_turns) > 0:
sample_turns = np.concatenate(sample_turns)
metric_dict["val-aux/num_turns/min"] = sample_turns.min()
metric_dict["val-aux/num_turns/max"] = sample_turns.max()
metric_dict["val-aux/num_turns/mean"] = sample_turns.mean()
return metric_dict
def _merge_validation_results(self, result_a, result_b):
if result_a is None and result_b is None:
return {}
if result_a is None:
result_a = {"data_sources": [], "sample_uids": [], "sample_turns": [], "reward_extra_infos_dict": {}}
if result_b is None:
result_b = {"data_sources": [], "sample_uids": [], "sample_turns": [], "reward_extra_infos_dict": {}}
if not result_a.get("data_sources") and not result_b.get("data_sources"):
return {}
data_sources = np.concatenate(result_a["data_sources"] + result_b["data_sources"], axis=0)
sample_uids = result_a["sample_uids"] + result_b["sample_uids"]
sample_turns = result_a["sample_turns"] + result_b["sample_turns"]
reward_extra_infos_dict = {}
all_keys = set(result_a["reward_extra_infos_dict"].keys()) | set(result_b["reward_extra_infos_dict"].keys())
for key in all_keys:
list_a = result_a["reward_extra_infos_dict"].get(key, [])
list_b = result_b["reward_extra_infos_dict"].get(key, [])
reward_extra_infos_dict[key] = list_a + list_b
return self._val_metrics_update(data_sources, sample_uids, reward_extra_infos_dict, sample_turns)
def init_workers(self):
"""Initialize distributed training workers using Ray backend.
Creates:
1. Ray resource pools from configuration
2. Worker groups for each role (actor, critic, etc.)
"""
self.resource_pool_manager.create_resource_pool()
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
# create actor and rollout
actor_role = Role.ActorRolloutRef if Role.ActorRolloutRef in self.role_worker_mapping else Role.ActorRollout
if self.hybrid_engine:
actor_rollout_resource_pool = self.resource_pool_manager.get_resource_pool(actor_role)
actor_rollout_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[actor_role],
config=self.config.actor_rollout_ref,
role=str(actor_role),
)
self.resource_pool_to_cls[actor_rollout_resource_pool][str(actor_role)] = actor_rollout_cls
else:
raise NotImplementedError
# create critic
if self.use_critic:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic)
from verl.workers.config import CriticConfig
critic_cfg: CriticConfig = omega_conf_to_dataclass(self.config.critic)
if self.use_legacy_worker_impl == "disable":
# convert critic_cfg into TrainingWorkerConfig
from verl.workers.engine_workers import TrainingWorkerConfig
orig_critic_cfg = critic_cfg
if orig_critic_cfg.strategy == "fsdp":
engine_config: FSDPEngineConfig = orig_critic_cfg.model.fsdp_config
engine_config.infer_max_token_len_per_gpu = critic_cfg.ppo_infer_max_token_len_per_gpu
engine_config.max_token_len_per_gpu = critic_cfg.ppo_max_token_len_per_gpu
else:
raise NotImplementedError(f"Unknown strategy {orig_critic_cfg.strategy=}")
critic_cfg = TrainingWorkerConfig(
model_type="value_model",
model_config=orig_critic_cfg.model_config,
engine_config=engine_config,
optimizer_config=orig_critic_cfg.optim,
checkpoint_config=orig_critic_cfg.checkpoint,
)
critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=critic_cfg)
self.resource_pool_to_cls[resource_pool][str(Role.Critic)] = critic_cls
# create reference policy if needed
if self.use_reference_policy and Role.RefPolicy in self.role_worker_mapping:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy)
ref_policy_cls = RayClassWithInitArgs(
self.role_worker_mapping[Role.RefPolicy],
config=self.config.actor_rollout_ref,
role=str(Role.RefPolicy),
)
self.resource_pool_to_cls[resource_pool][str(Role.RefPolicy)] = ref_policy_cls
# initialize WorkerGroup
# NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
# you should not use `create_colocated_worker_cls`.
# Instead, directly pass different resource pool to different worker groups.
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
all_wg = {}
wg_kwargs = {} # Setting up kwargs for RayWorkerGroup
if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None:
wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout
if OmegaConf.select(self.config.global_profiler, "steps") is not None:
wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps")
# Only require nsight worker options when tool is nsys
if OmegaConf.select(self.config.global_profiler, "tool") == "nsys":
assert (
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
is not None
), "worker_nsight_options must be set when using nsys with profile_steps"
wg_kwargs["worker_nsight_options"] = OmegaConf.to_container(
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
)
wg_kwargs["device_name"] = self.device_name
for resource_pool, class_dict in self.resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = self.ray_worker_group_cls(
resource_pool=resource_pool,
ray_cls_with_init=worker_dict_cls,
**wg_kwargs,
)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
if self.use_critic:
self.critic_wg = all_wg[str(Role.Critic)]
if self.use_legacy_worker_impl == "disable":
self.critic_wg.reset()
# assign critic loss
from functools import partial
from verl.workers.utils.losses import value_loss
value_loss_ = partial(value_loss, config=orig_critic_cfg)
self.critic_wg.set_loss_fn(value_loss_)
else:
self.critic_wg.init_model()
if self.use_reference_policy and not self.ref_in_actor:
if str(Role.RefPolicy) in all_wg:
self.ref_policy_wg = all_wg[str(Role.RefPolicy)]
self.ref_policy_wg.init_model()
else:
# Model engine: ActorRolloutRefWorker
assert str(Role.ActorRolloutRef) in all_wg, f"{all_wg.keys()=}"
self.ref_policy_wg = all_wg[str(Role.ActorRolloutRef)]
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
self.actor_rollout_wg = all_wg[str(actor_role)]
self.actor_rollout_wg.init_model()
if self.ref_in_actor:
self.ref_policy_wg = self.actor_rollout_wg
# create reward loop manager
from verl.experimental.reward_loop import RewardLoopManager
# initalize reward loop manager
# reward model (colocate or standalone): get resource_pool
# no reward model: resource_pool = None
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) if self.use_rm else None
self.reward_loop_manager = RewardLoopManager(
config=self.config,
rm_resource_pool=resource_pool,
)
# create async rollout manager and request scheduler
# Note: mode is always "async" since sync mode is deprecated
self.async_rollout_mode = True
# Support custom AgentLoopManager via config
manager_class_fqn = self.config.actor_rollout_ref.rollout.get("agent", {}).get("agent_loop_manager_class")
if manager_class_fqn:
AgentLoopManager = load_class_from_fqn(manager_class_fqn, "AgentLoopManager")
else:
from verl.experimental.agent_loop import AgentLoopManager
# infrastructure overview: https://verl.readthedocs.io/en/latest/advance/reward_loop.html#architecture-design
# agent_reward_loop: streaming reward computation with actor rollout
# two conditions satisfied: (1) no reward model, or (2) reward model with extra resource pool
enable_agent_reward_loop = not self.use_rm or self.config.reward.reward_model.enable_resource_pool
# if enable_agent_reward_loop, we directly pass reward_loop_workers to agent loop manager
# to stream reward computation with actor rollout
reward_loop_worker_handles = self.reward_loop_manager.reward_loop_workers if enable_agent_reward_loop else None
self.async_rollout_manager = AgentLoopManager(
config=self.config,
worker_group=self.actor_rollout_wg,
rollout_resource_pool=actor_rollout_resource_pool,
reward_loop_worker_handles=reward_loop_worker_handles,
)
self.checkpoint_manager = CheckpointEngineManager(
backend=self.config.actor_rollout_ref.rollout.checkpoint_engine.backend,
trainer=self.actor_rollout_wg,
replicas=self.async_rollout_manager.rollout_replicas,
)
# sleep all replicas to load checkpoint
self.checkpoint_manager.sleep_replicas()
def _save_checkpoint(self):
from verl.utils.fs import local_mkdir_safe
# path: given_path + `/global_step_{global_steps}` + `/actor`
local_global_step_folder = os.path.join(
self.config.trainer.default_local_dir, f"global_step_{self.global_steps}"
)
print(f"local_global_step_folder: {local_global_step_folder}")
actor_local_path = os.path.join(local_global_step_folder, "actor")
actor_remote_path = (
None
if self.config.trainer.default_hdfs_dir is None
else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "actor")
)
remove_previous_ckpt_in_save = self.config.trainer.get("remove_previous_ckpt_in_save", False)
if remove_previous_ckpt_in_save:
print(
"Warning: remove_previous_ckpt_in_save is deprecated,"
+ " set max_actor_ckpt_to_keep=1 and max_critic_ckpt_to_keep=1 instead"
)
max_actor_ckpt_to_keep = (
self.config.trainer.get("max_actor_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1
)
max_critic_ckpt_to_keep = (
self.config.trainer.get("max_critic_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1
)
self.actor_rollout_wg.save_checkpoint(
actor_local_path, actor_remote_path, self.global_steps, max_ckpt_to_keep=max_actor_ckpt_to_keep
)
if self.use_critic:
critic_local_path = os.path.join(local_global_step_folder, str(Role.Critic))
critic_remote_path = (
None
if self.config.trainer.default_hdfs_dir is None
else os.path.join(
self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", str(Role.Critic)
)
)
self.critic_wg.save_checkpoint(
critic_local_path, critic_remote_path, self.global_steps, max_ckpt_to_keep=max_critic_ckpt_to_keep
)
# save dataloader
local_mkdir_safe(local_global_step_folder)
dataloader_local_path = os.path.join(local_global_step_folder, "data.pt")
dataloader_state_dict = self.train_dataloader.state_dict()
torch.save(dataloader_state_dict, dataloader_local_path)
# latest checkpointed iteration tracker (for atomic usage)
if (
hasattr(self.config.actor_rollout_ref.actor.checkpoint, "async_save")
and self.config.actor_rollout_ref.actor.checkpoint.async_save
) or (
"async_save" in self.config.actor_rollout_ref.actor.checkpoint
and self.config.actor_rollout_ref.actor.checkpoint["async_save"]
):
print("skip write latest_checkpointed_iteration.txt when async_save is True")
return
local_latest_checkpointed_iteration = os.path.join(
self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt"
)
with open(local_latest_checkpointed_iteration, "w") as f:
f.write(str(self.global_steps))
def _load_checkpoint(self):
if self.config.trainer.resume_mode == "disable":
return 0
# load from hdfs
if self.config.trainer.default_hdfs_dir is not None:
raise NotImplementedError("load from hdfs is not implemented yet")
else:
checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path
if not os.path.isabs(checkpoint_folder):
working_dir = os.getcwd()
checkpoint_folder = os.path.join(working_dir, checkpoint_folder)
global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest
# find global_step_folder
if self.config.trainer.resume_mode == "auto":
if global_step_folder is None:
print("Training from scratch")
return 0
else:
if self.config.trainer.resume_mode == "resume_path":
assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type"
assert "global_step_" in self.config.trainer.resume_from_path, (
"resume ckpt must specify the global_steps"
)
global_step_folder = self.config.trainer.resume_from_path
if not os.path.isabs(global_step_folder):
working_dir = os.getcwd()
global_step_folder = os.path.join(working_dir, global_step_folder)
print(f"Load from checkpoint folder: {global_step_folder}")
# set global step
self.global_steps = int(global_step_folder.split("global_step_")[-1])
print(f"Setting global step to {self.global_steps}")
print(f"Resuming from {global_step_folder}")
actor_path = os.path.join(global_step_folder, "actor")
critic_path = os.path.join(global_step_folder, str(Role.Critic))
# load actor
self.actor_rollout_wg.load_checkpoint(
actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load
)
# load critic
if self.use_critic:
self.critic_wg.load_checkpoint(
critic_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load
)
# load dataloader,
# TODO: from remote not implemented yet
dataloader_local_path = os.path.join(global_step_folder, "data.pt")
if os.path.exists(dataloader_local_path):
dataloader_state_dict = torch.load(dataloader_local_path, weights_only=False)
self.train_dataloader.load_state_dict(dataloader_state_dict)
else:
print(f"Warning: No dataloader state found at {dataloader_local_path}, will start from scratch")
def _start_profiling(self, do_profile: bool) -> None:
"""Start profiling for all worker groups if profiling is enabled."""
if do_profile:
self.actor_rollout_wg.start_profile(role="e2e", profile_step=self.global_steps)
if self.use_reference_policy:
self.ref_policy_wg.start_profile(profile_step=self.global_steps)
if self.use_critic:
self.critic_wg.start_profile(profile_step=self.global_steps)
def _stop_profiling(self, do_profile: bool) -> None:
"""Stop profiling for all worker groups if profiling is enabled."""
if do_profile:
self.actor_rollout_wg.stop_profile()
if self.use_reference_policy:
self.ref_policy_wg.stop_profile()
if self.use_critic:
self.critic_wg.stop_profile()
def _get_dp_size(self, worker_group, role: str) -> int:
"""Get data parallel size from worker group dispatch info.
This method retrieves the data parallel size by querying the dispatch info
for the specified role. The dispatch info is cached for subsequent calls.
Args:
worker_group: The worker group to query dispatch info from.
role: The role name (e.g., "actor", "critic") to get DP size for.
Returns:
The data parallel size (number of DP ranks).
"""
if role not in worker_group._dispatch_info:
dp_rank_mapping = worker_group._query_dispatch_info(role)
worker_group._dispatch_info[role] = dp_rank_mapping
else:
dp_rank_mapping = worker_group._dispatch_info[role]
return max(dp_rank_mapping) + 1
def _balance_batch(self, batch: DataProto, metrics, logging_prefix="global_seqlen", keep_minibatch=False):
"""Reorder the data on single controller such that each dp rank gets similar total tokens.
When use_prefix_grouper is enabled, uses group-level balancing to keep samples with
the same uid together on the same rank for prefix sharing optimization.
"""
attention_mask = batch.batch["attention_mask"]
batch_size = attention_mask.shape[0]
global_seqlen_lst = batch.batch["attention_mask"].view(batch_size, -1).sum(-1) # (train_batch_size,)
workload_lst = calculate_workload(global_seqlen_lst)
# Get dp_size from dispatch info to correctly balance across data parallel ranks
# Note: world_size may include tensor/pipeline parallel dimensions, but we only want DP
dp_size = self._get_dp_size(self.actor_rollout_wg, "actor")
# Use group-level balancing for PrefixGrouper to keep same-uid samples together
if getattr(self, "use_prefix_grouper", False) and "uid" in batch.non_tensor_batch:
from verl.utils.seqlen_balancing import get_group_balanced_partitions
uid_list = list(batch.non_tensor_batch["uid"])
seqlen_list = global_seqlen_lst.tolist()
# Count number of uid groups
num_groups = len(set(uid_list))
if num_groups % dp_size != 0:
raise ValueError(
f"PrefixGrouper with balance_batch requires num_uid_groups ({num_groups}) "
f"% dp_size ({dp_size}) == 0. "
f"This ensures each rank gets equal number of groups. "
f"Current batch_size={batch_size}, adjust batch_size to be a multiple of "
f"dp_size * rollout.n."
)
global_partition_lst = get_group_balanced_partitions(
seqlen_list=seqlen_list,
uid_list=uid_list,
k_partitions=dp_size,
)
elif keep_minibatch:
# Decouple the DP balancing and mini-batching.
minibatch_size = self.config.actor_rollout_ref.actor.get("ppo_mini_batch_size")
minibatch_num = len(workload_lst) // minibatch_size
global_partition_lst = [[] for _ in range(dp_size)]
for i in range(minibatch_num):
rearrange_minibatch_lst = get_seqlen_balanced_partitions(
workload_lst[i * minibatch_size : (i + 1) * minibatch_size],
k_partitions=dp_size,
equal_size=True,
)
for j, part in enumerate(rearrange_minibatch_lst):
global_partition_lst[j].extend([x + minibatch_size * i for x in part])
else:
global_partition_lst = get_seqlen_balanced_partitions(workload_lst, k_partitions=dp_size, equal_size=True)
# Place smaller micro-batches at both ends to reduce the bubbles in pipeline parallel.
# Skip reordering within partitions for PrefixGrouper to maintain uid grouping
if not getattr(self, "use_prefix_grouper", False):
for idx, partition in enumerate(global_partition_lst):
partition.sort(key=lambda x: (workload_lst[x], x))
ordered_partition = partition[::2] + partition[1::2][::-1]
global_partition_lst[idx] = ordered_partition
# reorder based on index. The data will be automatically equally partitioned by dispatch function
global_idx = torch.tensor([j for partition in global_partition_lst for j in partition])
batch.reorder(global_idx)
global_balance_stats = log_seqlen_unbalance(
seqlen_list=global_seqlen_lst.tolist(), partitions=global_partition_lst, prefix=logging_prefix
)
metrics.update(global_balance_stats)
def _compute_values(self, batch: DataProto) -> DataProto:
if self.use_legacy_worker_impl == "disable":
batch_td = batch.to_tensordict()
# step 2: convert from padding to nopadding
batch_td = left_right_2_no_padding(batch_td)
# step 3: add meta info
tu.assign_non_tensor(batch_td, compute_loss=False)
output = self.critic_wg.infer_batch(batch_td)
output = output.get()
values = tu.get(output, "values")
values = no_padding_2_padding(values, batch_td)
values = tu.get_tensordict({"values": values.float()})
values = DataProto.from_tensordict(values)
else:
values = self.critic_wg.compute_values(batch)
return values
def _compute_ref_log_prob(self, batch: DataProto) -> DataProto:
if self.use_legacy_worker_impl == "disable":
# step 1: convert dataproto to tensordict.
batch_td = batch.to_tensordict()
# step 2: convert from padding to nopadding
batch_td = left_right_2_no_padding(batch_td)
# step 3: add meta info
metadata = {"calculate_entropy": False, "compute_loss": False}
if self.ref_in_actor:
metadata["no_lora_adapter"] = True
tu.assign_non_tensor(batch_td, **metadata)
if self.ref_in_actor:
output = self.actor_rollout_wg.compute_log_prob(batch_td)
else:
output = self.ref_policy_wg.compute_ref_log_prob(batch_td)
# gather output
log_probs = tu.get(output, "log_probs")
# step 4. No padding to padding
log_probs = no_padding_2_padding(log_probs, batch_td)
# step 5: rebuild a tensordict and convert to dataproto
ref_log_prob = tu.get_tensordict({"ref_log_prob": log_probs.float()})
ref_log_prob = DataProto.from_tensordict(ref_log_prob)
else:
ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch)
return ref_log_prob
def _compute_old_log_prob(self, batch: DataProto):
if self.use_legacy_worker_impl == "disable":
# TODO: remove step 1, 2, 4 after we make the whole training tensordict and padding free
# step 1: convert dataproto to tensordict.
batch_td = batch.to_tensordict()
# step 2: convert from padding to nopadding
batch_td = left_right_2_no_padding(batch_td)
# step 3: add meta info
tu.assign_non_tensor(batch_td, calculate_entropy=True, compute_loss=False)
output = self.actor_rollout_wg.compute_log_prob(batch_td)
# gather output
entropy = tu.get(output, "entropy")
log_probs = tu.get(output, "log_probs")
old_log_prob_mfu = tu.get(output, "metrics")["mfu"]
# step 4. No padding to padding
entropy = no_padding_2_padding(entropy, batch_td)
log_probs = no_padding_2_padding(log_probs, batch_td)
# step 5: rebuild a tensordict and convert to dataproto
old_log_prob = tu.get_tensordict({"old_log_probs": log_probs.float(), "entropys": entropy.float()})
old_log_prob = DataProto.from_tensordict(old_log_prob)
else:
old_log_prob = self.actor_rollout_wg.compute_log_prob(batch)
old_log_prob_mfu = 0
return old_log_prob, old_log_prob_mfu
def _update_actor(self, batch: DataProto) -> DataProto:
rollout_config = self.config.actor_rollout_ref.rollout
batch.meta_info["multi_turn"] = rollout_config.multi_turn.enable
# TODO: Make "temperature" single source of truth from generation.
batch.meta_info["temperature"] = rollout_config.temperature
# update actor
if self.use_legacy_worker_impl == "disable":
batch_td = batch.to_tensordict()
# step 2: convert from padding to no-padding
batch_td = left_right_2_no_padding(batch_td)
calculate_entropy = self.config.actor_rollout_ref.actor.entropy_coeff != 0.0
ppo_mini_batch_size = self.config.actor_rollout_ref.actor.ppo_mini_batch_size
ppo_mini_batch_size = ppo_mini_batch_size * self.config.actor_rollout_ref.rollout.n
ppo_epochs = self.config.actor_rollout_ref.actor.ppo_epochs
seed = self.config.actor_rollout_ref.actor.data_loader_seed
shuffle = self.config.actor_rollout_ref.actor.shuffle
tu.assign_non_tensor(
batch_td,
calculate_entropy=calculate_entropy,
global_batch_size=ppo_mini_batch_size,
mini_batch_size=ppo_mini_batch_size,
epochs=ppo_epochs,
seed=seed,
dataloader_kwargs={"shuffle": shuffle},
)
actor_output = self.actor_rollout_wg.update_actor(batch_td)
actor_output = tu.get(actor_output, "metrics")
actor_output = rename_dict(actor_output, "actor/")
# modify key name
actor_output["perf/mfu/actor"] = actor_output.pop("actor/mfu")
actor_output = DataProto.from_single_dict(data={}, meta_info={"metrics": actor_output})
else:
actor_output = self.actor_rollout_wg.update_actor(batch)
return actor_output
def _update_critic(self, batch: DataProto) -> DataProto:
if self.use_legacy_worker_impl == "disable":
batch_td = batch.to_tensordict()
# step 2: convert from padding to no-padding
batch_td = left_right_2_no_padding(batch_td)
ppo_mini_batch_size = self.config.critic.ppo_mini_batch_size
ppo_mini_batch_size = ppo_mini_batch_size * self.config.actor_rollout_ref.rollout.n
ppo_epochs = self.config.critic.ppo_epochs
seed = self.config.critic.data_loader_seed
shuffle = self.config.critic.shuffle
tu.assign_non_tensor(
batch_td,
global_batch_size=ppo_mini_batch_size,
mini_batch_size=ppo_mini_batch_size,
epochs=ppo_epochs,
seed=seed,
dataloader_kwargs={"shuffle": shuffle},
)
output = self.critic_wg.train_mini_batch(batch_td)
output = output.get()
output = tu.get(output, "metrics")
output = rename_dict(output, "critic/")
# modify key name
output["perf/mfu/critic"] = output.pop("critic/mfu")
critic_output = DataProto.from_single_dict(data={}, meta_info={"metrics": output})
else:
critic_output = self.critic_wg.update_critic(batch)
return critic_output
def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC
to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
"""
from omegaconf import OmegaConf
from verl.utils.tracking import Tracking
logger = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
self.global_steps = 0
# load checkpoint and update weights before doing anything
self._load_checkpoint()
self.checkpoint_manager.update_weights()
current_epoch = self.global_steps // len(self.train_dataloader)
# perform validation before training
# currently, we only support validation using the reward_function.
if self.config.trainer.get("val_before_train", True):
val_metrics = self._validate()
assert val_metrics, f"{val_metrics=}"
pprint(f"Initial validation metrics: {val_metrics}")
logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.get("val_only", False):
return
if self.config.actor_rollout_ref.rollout.get("skip_rollout", False):
rollout_skip = RolloutSkip(self.config, self.async_rollout_manager)
rollout_skip.wrap_generate_sequences()
# add tqdm
progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress")
# we start from step 1
self.global_steps += 1
last_val_metrics = None
self.max_steps_duration = 0
prev_step_profile = False
curr_step_profile = (
self.global_steps in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
next_step_profile = False
for epoch in range(current_epoch, self.config.trainer.total_epochs):
for batch_dict in self.train_dataloader:
if hasattr(self.actor_rollout_wg, "async_calls_finalize_fn_exec"):
self.actor_rollout_wg.async_calls_finalize_fn_exec(blocking=False)
metrics = {}
timing_raw = {}
with marked_timer("start_profile", timing_raw):
self._start_profiling(
not prev_step_profile and curr_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
batch: DataProto = DataProto.from_single_dict(batch_dict)
batch.meta_info["temperature"] = self.config.actor_rollout_ref.rollout.temperature
# add uid to batch
batch.non_tensor_batch["uid"] = np.array(
[str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object
)
gen_batch = self._get_gen_batch(batch)
# pass global_steps to trace
gen_batch.meta_info["global_steps"] = self.global_steps
gen_batch_output = gen_batch.repeat(
repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True
)
is_last_step = self.global_steps >= self.total_training_steps
with marked_timer("step", timing_raw):
# generate a batch
with marked_timer("gen", timing_raw, color="red"):
if curr_step_profile:
self.async_rollout_manager.start_profile()
gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch_output)
self.checkpoint_manager.sleep_replicas()
if curr_step_profile:
self.async_rollout_manager.stop_profile()
timing_raw.update(gen_batch_output.meta_info["timing"])
gen_batch_output.meta_info.pop("timing", None)
if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX:
with marked_timer("gen_max", timing_raw, color="purple"):
gen_baseline_batch = deepcopy(gen_batch)
gen_baseline_batch.meta_info["do_sample"] = False
if curr_step_profile:
self.async_rollout_manager.start_profile()
gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_batch)
self.checkpoint_manager.sleep_replicas()
if curr_step_profile:
self.async_rollout_manager.stop_profile()
batch = batch.union(gen_baseline_output)
# compute reward model score on batch
rm_scores = None
if self.use_rm and "rm_scores" not in batch.batch.keys():
batch_reward = self._compute_reward_colocate(batch)
batch = batch.union(batch_reward)
# Compute or extract reward for REMAX baseline
reward_baseline_tensor = batch.batch["rm_scores"].sum(dim=-1)
keys_to_pop = set(gen_baseline_output.batch.keys())
if rm_scores is not None:
keys_to_pop.update(rm_scores.batch.keys())
batch.pop(batch_keys=list(keys_to_pop))
batch.batch["reward_baselines"] = reward_baseline_tensor
del rm_scores, gen_baseline_batch, gen_baseline_output
# repeat to align with repeated responses in rollout
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
batch = batch.union(gen_batch_output)
if "response_mask" not in batch.batch.keys():
batch.batch["response_mask"] = compute_response_mask(batch)
# Balance the number of valid tokens across DP ranks.
# NOTE: This usually changes the order of data in the `batch`,
# which won't affect the advantage calculation (since it's based on uid),
# but might affect the loss calculation (due to the change of mini-batching).
if self.config.trainer.balance_batch:
self._balance_batch(batch, metrics=metrics)
# compute global_valid tokens
batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
# get images_seqlens
images_seqlens_all = []
for multi_modal_input in batch.non_tensor_batch["multi_modal_inputs"]:
if "image_grid_thw" not in multi_modal_input.keys():
continue
images_seqlens_all.extend(multi_modal_input["images_seqlens"].tolist())
batch.meta_info["images_seqlens"] = images_seqlens_all
with marked_timer("reward", timing_raw, color="yellow"):
# compute reward model score
if self.use_rm and "rm_scores" not in batch.batch.keys():
batch_reward = self._compute_reward_colocate(batch)
batch = batch.union(batch_reward)
# extract reward_tensor and reward_extra_infos_dict for training
reward_tensor, reward_extra_infos_dict = extract_reward(batch)
# Operating Mode Selection:
# - Bypass mode: Sets old_log_probs = rollout_log_probs (2 policies: π_rollout, π_θ)
# - Decoupled mode: Recomputes old_log_probs as proximal anchor (3 policies: π_rollout, π_old, π_θ)
# Note: π_old computed once per data batch, serves as stable reference during mini-batch updates
rollout_corr_config = self.config.algorithm.get("rollout_correction", None)
bypass_recomputing_logprobs = rollout_corr_config and rollout_corr_config.get("bypass_mode", False)
if bypass_recomputing_logprobs: # Use `rollout_log_probs`
from verl.trainer.ppo.rollout_corr_helper import apply_bypass_mode
apply_bypass_mode(
batch=batch,
rollout_corr_config=rollout_corr_config,
policy_loss_config=self.config.actor_rollout_ref.actor.policy_loss,
)
else: # Recompute old_log_probs
with marked_timer("old_log_prob", timing_raw, color="blue"):
old_log_prob, old_log_prob_mfu = self._compute_old_log_prob(batch)
entropys = old_log_prob.batch["entropys"]
response_masks = batch.batch["response_mask"]
actor_config = self.config.actor_rollout_ref.actor
entropy_agg = agg_loss(
loss_mat=entropys,
loss_mask=response_masks,
loss_agg_mode=actor_config.loss_agg_mode,
loss_scale_factor=actor_config.loss_scale_factor,
)
old_log_prob_metrics = {
"actor/entropy": entropy_agg.detach().item(),
"perf/mfu/actor_infer": old_log_prob_mfu,
}
metrics.update(old_log_prob_metrics)
old_log_prob.batch.pop("entropys")
if "routed_experts" in batch.batch and "routed_experts" in old_log_prob.batch:
router_mode = getattr(
self.config.actor_rollout_ref.actor.router_replay, "mode", "disabled"
)
if router_mode == "R2":
batch.batch.pop("routed_experts")
else:
old_log_prob.batch.pop("routed_experts")
batch = batch.union(old_log_prob)
if "rollout_log_probs" in batch.batch.keys():
# TODO: we may want to add diff of probs too.
from verl.utils.debug.metrics import calculate_debug_metrics
metrics.update(calculate_debug_metrics(batch))
assert "old_log_probs" in batch.batch, f'"old_log_prob" not in {batch.batch.keys()=}'
if self.use_reference_policy:
# compute reference log_prob
with marked_timer(str(Role.RefPolicy), timing_raw, color="olive"):
ref_log_prob = self._compute_ref_log_prob(batch)
batch = batch.union(ref_log_prob)
# compute values
if self.use_critic:
with marked_timer("values", timing_raw, color="cyan"):
values = self._compute_values(batch)
batch = batch.union(values)
with marked_timer("adv", timing_raw, color="brown"):
# we combine with rule-based rm
reward_extra_infos_dict: dict[str, list]
batch.batch["token_level_scores"] = reward_tensor
if reward_extra_infos_dict:
batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()})
# compute rewards. apply_kl_penalty if available
if self.config.algorithm.use_kl_in_reward:
batch, kl_metrics = apply_kl_penalty(
batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty
)
metrics.update(kl_metrics)
else:
batch.batch["token_level_rewards"] = batch.batch["token_level_scores"]
# Compute rollout correction: IS weights, rejection sampling, and metrics
# Only runs in decoupled mode (computes once per batch using stable π_old)
# In bypass mode, this is skipped - actor computes metrics from evolving π_θ vs π_rollout
if (
rollout_corr_config is not None
and "rollout_log_probs" in batch.batch
and not bypass_recomputing_logprobs # Only in decoupled mode
):
from verl.trainer.ppo.rollout_corr_helper import compute_rollout_correction_and_add_to_batch
# Compute IS weights, apply rejection sampling, compute metrics
batch, is_metrics = compute_rollout_correction_and_add_to_batch(batch, rollout_corr_config)
# IS and off-policy metrics already have rollout_corr/ prefix
metrics.update(is_metrics)
# compute advantages, executed on the driver process
norm_adv_by_std_in_grpo = self.config.algorithm.get(
"norm_adv_by_std_in_grpo", True
) # GRPO adv normalization factor
batch = compute_advantage(
batch,
adv_estimator=self.config.algorithm.adv_estimator,
gamma=self.config.algorithm.gamma,
lam=self.config.algorithm.lam,
num_repeat=self.config.actor_rollout_ref.rollout.n,
norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
config=self.config.algorithm,
)
# update critic
if self.use_critic:
with marked_timer("update_critic", timing_raw, color="pink"):
critic_output = self._update_critic(batch)
critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"])
metrics.update(critic_output_metrics)
# implement critic warmup
if self.config.trainer.critic_warmup <= self.global_steps:
# update actor
with marked_timer("update_actor", timing_raw, color="red"):
actor_output = self._update_actor(batch)
# Check if the ESI (Elastic Server Instance)/training plan is close to expiration.
esi_close_to_expiration = should_save_ckpt_esi(
max_steps_duration=self.max_steps_duration,
redundant_time=self.config.trainer.esi_redundant_time,
)
# Check if the conditions for saving a checkpoint are met.
# The conditions include a mandatory condition (1) and
# one of the following optional conditions (2/3/4):
# 1. The save frequency is set to a positive value.
# 2. It's the last training step.
# 3. The current step number is a multiple of the save frequency.
# 4. The ESI(Elastic Server Instance)/training plan is close to expiration.
if self.config.trainer.save_freq > 0 and (
is_last_step
or self.global_steps % self.config.trainer.save_freq == 0
or esi_close_to_expiration
):
if esi_close_to_expiration:
print("Force saving checkpoint: ESI instance expiration approaching.")
with marked_timer("save_checkpoint", timing_raw, color="green"):
self._save_checkpoint()
# update weights from trainer to rollout
with marked_timer("update_weights", timing_raw, color="red"):
self.checkpoint_manager.update_weights()
actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"])
metrics.update(actor_output_metrics)
# Log rollout generations if enabled
rollout_data_dir = self.config.trainer.get("rollout_data_dir", None)
if rollout_data_dir:
self._log_rollout_data(batch, reward_extra_infos_dict, timing_raw, rollout_data_dir)
# validate
if self.config.trainer.test_freq > 0 and (
is_last_step or self.global_steps % self.config.trainer.test_freq == 0
):
with marked_timer("testing", timing_raw, color="green"):
val_metrics: dict = self._validate()
if is_last_step:
last_val_metrics = val_metrics
metrics.update(val_metrics)
with marked_timer("stop_profile", timing_raw):
next_step_profile = (
self.global_steps + 1 in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self._stop_profiling(
curr_step_profile and not next_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
prev_step_profile = curr_step_profile
curr_step_profile = next_step_profile
steps_duration = timing_raw["step"]
self.max_steps_duration = max(self.max_steps_duration, steps_duration)
# training metrics
metrics.update(
{
"training/global_step": self.global_steps,
"training/epoch": epoch,
}
)
# collect metrics
metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic))
metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
# TODO: implement actual tflpo and theoretical tflpo
n_gpus = self.resource_pool_manager.get_n_gpus()
metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus))
# compute variance proxy metrics
gradient_norm = metrics.get("actor/grad_norm", None)
metrics.update(compute_variance_proxy_metrics(batch=batch, gradient_norm=gradient_norm))
# Note: mismatch metrics (KL, PPL, etc.) are collected at line 1179 after advantage computation
# this is experimental and may be changed/removed in the future in favor of a general-purpose one
if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler):
self.train_dataloader.sampler.update(batch=batch)
# TODO: make a canonical logger that supports various backend
logger.log(data=metrics, step=self.global_steps)
progress_bar.update(1)
self.global_steps += 1
if (
hasattr(self.config.actor_rollout_ref.actor, "profiler")
and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory"
):
self.actor_rollout_wg.dump_memory_snapshot(
tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}"
)
if is_last_step:
if hasattr(self.actor_rollout_wg, "async_calls_finalize_fn_exec"):
self.actor_rollout_wg.async_calls_finalize_fn_exec(blocking=True)
pprint(f"Final validation metrics: {last_val_metrics}")
progress_bar.close()
return
# this is experimental and may be changed/removed in the future
# in favor of a general-purpose data buffer pool
if hasattr(self.train_dataset, "on_batch_end"):
# The dataset may be changed after each training batch
self.train_dataset.on_batch_end(batch=batch)
|
verl__trainer__sft_trainer.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from tensordict.tensorclass import NonTensorData
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
import logging
import hydra
import torch
import torch.distributed
from omegaconf import OmegaConf
from torch.utils.data import DistributedSampler
from torchdata.stateful_dataloader import StatefulDataLoader
from tqdm import tqdm
from verl.utils import tensordict_utils as tu
from verl.utils.checkpoint import CheckpointHandler
from verl.utils.dataset.dataset_utils import SFTTensorCollator
from verl.utils.dataset.multiturn_sft_dataset import MultiTurnSFTDataset
from verl.utils.device import auto_set_device, get_device_name
from verl.utils.distributed import destroy_global_process_group
from verl.utils.logger import log_with_rank
from verl.utils.memory_utils import aggressive_empty_cache
from verl.utils.profiler import log_gpu_memory_usage
from verl.utils.tracking import Tracking
from verl.workers.engine_workers import TrainingWorker
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN"))
class SFTTrainer:
def __init__(
self,
config,
):
self.config = config
log_gpu_memory_usage(f"rank {torch.distributed.get_rank()}: Before SFTTrainer init", logger=logger)
self.rank = torch.distributed.get_rank()
self._build_config()
self._build_dataset()
self._build_engine()
self._build_dataloader()
self._init_engine()
self._build_ckpt_handler()
# Initialize resume-related variables
self.resume_global_step = self.ckpt_handler.load_checkpoint()
self.device_name = self.config.trainer.device
if self.rank == 0:
print(self.config)
log_gpu_memory_usage(f"rank {self.rank}: After SFTTrainer init", logger=logger)
def _build_ckpt_handler(self):
resume_mode = getattr(self.config.trainer, "resume_mode", "auto")
resume_from_path = getattr(self.config.trainer, "resume_from_path", None)
max_ckpt_to_keep = getattr(self.config.trainer, "max_ckpt_to_keep", None)
default_hdfs_dir = getattr(self.config.trainer, "default_hdfs_dir", None)
self.ckpt_handler = CheckpointHandler(
engine=self.engine,
train_dataloader=self.train_dataloader,
default_local_dir=self.config.trainer.default_local_dir,
max_ckpt_to_keep=max_ckpt_to_keep,
default_hdfs_dir=default_hdfs_dir,
resume_mode=resume_mode,
resume_from_path=resume_from_path,
)
def _build_config(self):
from verl.utils.config import omega_conf_to_dataclass
self.model_config = omega_conf_to_dataclass(self.config.model)
self.engine_config = omega_conf_to_dataclass(self.config.engine)
self.optimizer_config = omega_conf_to_dataclass(self.config.optim)
self.checkpoint_config = omega_conf_to_dataclass(self.config.checkpoint)
self.profiler_config = omega_conf_to_dataclass(self.config.profiler)
# check profile interval
self.profiler_interval = self.config.trainer.profile_interval
self._validate_profiler_interval()
def _validate_profiler_interval(self):
assert len(self.profiler_interval) == 2
self.start_profile_step = self.profiler_interval[0]
self.end_profile_step = self.profiler_interval[1]
assert self.end_profile_step >= self.start_profile_step
if self.start_profile_step < 0:
assert self.end_profile_step < 0
def _build_engine(self):
from verl.workers.engine_workers import TrainingWorkerConfig
from verl.workers.utils.losses import sft_loss
self.loss_fn = partial(sft_loss, config=None)
config = TrainingWorkerConfig(
model_type="language_model",
model_config=self.model_config,
engine_config=self.engine_config,
optimizer_config=self.optimizer_config,
checkpoint_config=self.checkpoint_config,
profiler_config=self.profiler_config,
)
self.training_client = TrainingWorker(config=config)
self.training_client.set_loss_fn(loss_fn=self.loss_fn)
# Note that in SPMD world, this abstraction has to break
self.engine = self.training_client.engine
def _init_engine(self):
# patch optimizer config
if self.config.trainer.total_training_steps is not None:
self.total_training_steps = self.config.trainer.total_training_steps
else:
self.total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
self.optimizer_config.total_training_steps = self.total_training_steps
self.steps_per_epoch = len(self.train_dataloader)
# manage save and test frequency
self.save_freq = self.config.trainer.save_freq
if self.save_freq == "after_each_epoch":
self.save_freq = self.steps_per_epoch
self.test_freq = self.config.trainer.test_freq
if self.test_freq == "after_each_epoch":
self.test_freq = self.steps_per_epoch
self.training_client.reset()
def _build_dataset(self):
config = self.config
tokenizer = self.model_config.tokenizer
processor = self.model_config.processor
train_dataset = create_sft_dataset(
config.data.train_files,
config.data,
tokenizer,
processor,
max_samples=config.data.get("train_max_samples", -1),
)
if config.data.val_files:
val_dataset = create_sft_dataset(
config.data.val_files,
config.data,
tokenizer,
processor,
max_samples=config.data.get("val_max_samples", -1),
)
else:
val_dataset = None
self.train_dataset, self.val_dataset = train_dataset, val_dataset
def _build_dataloader(self):
# build dataset
config = self.config
# build dataloader
# Use data parallel rank and size instead of global rank and world size
# Set pin_memory_device when pin_memory is enabled.
device_name = get_device_name()
dp_rank = self.engine.get_data_parallel_rank()
dp_size = self.engine.get_data_parallel_size()
self.train_sampler = DistributedSampler(
self.train_dataset, shuffle=True, num_replicas=dp_size, rank=dp_rank, drop_last=True
)
self.global_batch_size = config.data.train_batch_size
self.train_batch_size_per_dp = self.global_batch_size // dp_size
self.collate_fn = SFTTensorCollator(config.data.pad_mode)
self.train_dataloader = StatefulDataLoader(
dataset=self.train_dataset,
batch_size=self.train_batch_size_per_dp,
sampler=self.train_sampler,
collate_fn=self.collate_fn,
num_workers=self.config.data.num_workers,
pin_memory=False,
drop_last=True,
pin_memory_device=device_name,
)
if self.val_dataset:
self.val_sampler = DistributedSampler(
self.val_dataset, shuffle=False, num_replicas=dp_size, rank=dp_rank, drop_last=True
)
self.val_dataloader = StatefulDataLoader(
dataset=self.val_dataset,
batch_size=self.train_batch_size_per_dp,
sampler=self.val_sampler,
collate_fn=self.collate_fn,
num_workers=self.config.data.num_workers,
pin_memory=False,
drop_last=True,
pin_memory_device=device_name,
)
else:
self.val_dataloader = None
def _get_batch_seqlens(self, data):
# mean over dp group
is_nested = data["input_ids"].is_nested
if is_nested:
batch_seqlens: torch.Tensor = data["input_ids"].offsets().diff()
else:
batch_seqlens: torch.Tensor = data["attention_mask"].sum(dim=-1)
batch_seqlens = batch_seqlens.to(self.device_name) # (global_bsz // dp)
output_tensor = torch.empty(
(batch_seqlens.shape[0] * self.engine.get_data_parallel_size(),),
dtype=batch_seqlens.dtype,
device=self.device_name,
) # (global_bsz,)
torch.distributed.all_gather_into_tensor(
output_tensor=output_tensor,
input_tensor=batch_seqlens,
group=self.engine.get_data_parallel_group(),
)
batch_seqlens = output_tensor.tolist()
return batch_seqlens
def fit(self):
is_logging = self.engine.is_mp_src_rank_with_outputs() and self.engine.get_data_parallel_rank() == 0
# TODO: add a unified tracking
if is_logging:
tracking = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
global_step = self.resume_global_step # Start from resumed step
last_valid_metric = None
log_with_rank(
f"Total training steps: {self.total_training_steps},",
logger=logger,
rank=0,
log_only_rank_0=True,
)
# With StatefulDataLoader, we don't need to manually calculate epochs and steps
# The dataloader will automatically resume from where it left off
if global_step > 0:
log_with_rank(
f"StatefulDataLoader will automatically resume from global step: {global_step}",
logger=logger,
rank=0,
log_only_rank_0=True,
)
# Calculate which epoch we're starting from for sampler.set_epoch()
start_epoch = global_step // self.steps_per_epoch
meta_info = {
"use_remove_padding": self.config.model.use_remove_padding,
"use_dynamic_bsz": self.config.data.use_dynamic_bsz,
"max_token_len_per_gpu": self.config.data.max_token_len_per_gpu,
"micro_batch_size_per_gpu": self.config.data.micro_batch_size_per_gpu,
"temperature": 1.0,
"global_batch_size": self.global_batch_size,
"pad_mode": self.config.data.pad_mode,
"pad_token_id": self.model_config.tokenizer.pad_token_id,
}
train_time = 0
total_tokens = 0
for epoch in range(start_epoch, self.config.trainer.total_epochs):
self.train_sampler.set_epoch(epoch=epoch)
aggressive_empty_cache(force_sync=True)
log_gpu_memory_usage(f"rank {self.rank}: At start of epoch {epoch}", logger=logger)
for step_in_epoch, data in enumerate(
tqdm(
self.train_dataloader,
initial=global_step % self.steps_per_epoch if epoch == start_epoch else 0,
total=self.steps_per_epoch,
desc=f"Epoch {epoch + 1}/{self.config.trainer.total_epochs}",
disable=not is_logging,
)
):
global_step += 1
# construct tensordict
data = tu.get_tensordict(tensor_dict=data, non_tensor_dict=meta_info)
batch_seqlens = self._get_batch_seqlens(data=data)
# this is necessary. Otherwise, it is interpreted as NonTensorStack
batch_seqlens_ntd = NonTensorData(batch_seqlens)
tu.assign_non_tensor(data, update_lr_scheduler=True, global_token_num=batch_seqlens_ntd)
# start profile in SPMD mode
if global_step == self.start_profile_step:
self.training_client.start_profile()
# train for on batch
output = self.training_client.train_batch(data=data)
if global_step == self.end_profile_step:
self.training_client.stop_profile()
if self.engine.is_mp_src_rank_with_outputs():
metrics = tu.get(output, "metrics")
# TODO: we can actual accumulate metrics for N steps and perform aggregate metrics
for k in ["loss", "grad_norm", "lr", "mfu"]:
if k in metrics.keys():
value = metrics.pop(k)
metrics[f"train/{k}"] = value
metrics["train/global_tokens"] = torch.sum(
torch.tensor(batch_seqlens, device=self.device_name)
).item()
total_tokens += metrics["train/global_tokens"]
metrics["train/total_tokens(B)"] = total_tokens / 1e9
if self.engine.get_data_parallel_rank() == 0:
tracking.log(data=metrics, step=global_step)
is_last_step = global_step >= self.total_training_steps
is_valid_step = global_step % self.test_freq == 0
is_save_step = global_step % self.save_freq == 0
# early exit or validation step
if is_last_step and self.val_dataloader is not None or (self.test_freq > 0 and is_valid_step):
# Perform validation
val_losses = []
for val_data in self.val_dataloader:
val_data = tu.get_tensordict(tensor_dict=val_data, non_tensor_dict=meta_info)
output = self.training_client.infer_batch(val_data)
if self.engine.is_mp_src_rank_with_outputs():
metrics = tu.get(output, "metrics")
val_losses.append(metrics["loss"])
if self.engine.is_mp_src_rank_with_outputs():
val_loss = torch.mean(torch.tensor(val_losses, device=self.device_name))
# average over data parallel group
torch.distributed.all_reduce(
val_loss, op=torch.distributed.ReduceOp.AVG, group=self.engine.get_data_parallel_group()
)
if is_logging:
metric = {"val/loss": val_loss.detach().item()}
tracking.log(data=metric, step=global_step)
last_valid_metric = metric
torch.distributed.barrier()
if is_last_step or (self.save_freq > 0 and is_save_step):
aggressive_empty_cache(force_sync=True)
self.ckpt_handler.save_checkpoint(step=global_step)
if is_last_step:
if is_logging:
print(f"Total time for train steps: {train_time:.2f}s")
print(f"Final validation metrics: {last_valid_metric}")
return
def run_sft(config):
from verl.utils.distributed import initialize_global_process_group
initialize_global_process_group()
trainer = SFTTrainer(config=config)
trainer.fit()
destroy_global_process_group()
@hydra.main(config_path="config", config_name="sft_trainer_engine", version_base=None)
def main(config):
# Automatically set `config.trainer.device = npu` when running on Ascend NPU.
auto_set_device(config)
run_sft(config)
def create_sft_dataset(data_paths, data_config, tokenizer, processor, max_samples=-1):
"""Create a dataset."""
# build dataset
# First check if a custom dataset class is specified
if data_config.custom_cls.get("path", None):
from verl.utils.import_utils import load_extern_object
dataset_cls = load_extern_object(data_config.custom_cls.path, data_config.custom_cls.name)
else:
# Default to multi-turn dataset
dataset_cls = MultiTurnSFTDataset
# Create datasets based on the selected class
dataset = dataset_cls(
parquet_files=data_paths, tokenizer=tokenizer, config=data_config, processor=processor, max_samples=max_samples
)
return dataset
if __name__ == "__main__":
main()
|
verl__utils__activation_offload.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for CPU offloading of tensors saved for backward pass."""
from __future__ import annotations
import functools
import logging
import os
from typing import Any, Optional
import torch
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from verl.utils.device import get_torch_device
from verl.utils.fsdp_utils import FSDPModule as FSDP2
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def _get_unique_tensor_key(tensor):
key = (tensor.untyped_storage().data_ptr() + tensor.storage_offset(), tensor.dtype)
return key
class FSDPParameterFilter:
def __init__(self):
self.model_parameters_storage = set()
def __call__(self, tensor):
return tensor.untyped_storage().data_ptr() not in self.model_parameters_storage
def update_model_parameters(self, model):
new_storage = set()
for p in model.parameters():
new_storage.add(p.data.untyped_storage().data_ptr())
self.model_parameters_storage = new_storage
class CpuOffloadHookWithOffloadHandler:
"""Context-manager that offloads/recovers tensors through an offload hander.
The hook just offloads/recovers the tensor object to the handler through `tensor_push`
and `tensor_pop` interface. How the offload-handler manages the offloading, recovering
or prefetching timing is transparent to this hook.
"""
def __init__(
self,
offload_handler: OffloadHandler,
handler_extra_kwargs: Optional[dict[str, Any]] = None,
) -> None:
if handler_extra_kwargs is None:
handler_extra_kwargs = {}
self.offload_handler: OffloadHandler = offload_handler
self.handler_extra_kwargs: dict[str, Any] = handler_extra_kwargs
self.inside_context = False
def __enter__(self):
self.inside_context = True
torch._C._autograd._push_saved_tensors_default_hooks(self.on_save_for_backward, self.on_get_saved_tensor)
def __exit__(self, *args: Any):
self.inside_context = False
torch._C._autograd._pop_saved_tensors_default_hooks()
def on_save_for_backward(self, tensor: torch.Tensor) -> Any:
retrieve_identifier = self.offload_handler.tensor_push(tensor, **self.handler_extra_kwargs)
return retrieve_identifier
def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor:
tensor = self.offload_handler.tensor_pop(saved_state, **self.handler_extra_kwargs)
return tensor
class OffloadHandler:
"""A base class for CPU offload-handler."""
def __init__(self) -> None:
pass
def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any:
"""Tensor push."""
raise NotImplementedError(
"`tensor_push is not implented in OffloadHandler class. Inherit this class and implement your "
"custom tensor_push."
)
def tensor_pop(self, tensor_tag: Any, **kwargs):
"""Tensor pop."""
raise NotImplementedError(
"`tensor_pop is not implented in OffloadHandler class. Inherit this class and implement your "
"custom tensor_pop."
)
class GroupCommitFunction(torch.autograd.Function):
"""this is a dummy op with output identical to input.
However, it is necessary for marking a timepoint for offload handler to
accomplish all synchronizations. Implementing it as a function is necessary
because we need to actions in both forward and backward.
"""
@staticmethod
def forward(ctx, tensor, cpu_offload_handler):
# pylint: disable=missing-function-docstring
cpu_offload_handler.on_group_commit_forward()
ctx.cpu_offload_handler = cpu_offload_handler
# return the identical tensor
return tensor
@staticmethod
def backward(ctx, grad_output):
# pylint: disable=missing-function-docstring
cpu_offload_handler = ctx.cpu_offload_handler
cpu_offload_handler.on_group_commit_backward()
return grad_output, None
group_prefetch_offload_commit = GroupCommitFunction.apply
class SynchronizedGroupOffloadHandler(OffloadHandler):
"""Offload Handler that offloads/reloads in a synchronized way.
The device-to-host and host-to-device copying happen in the same stream
as the computation kernels, thus the copying will block computation.
"""
def __init__(self, num_offload_group, tensor_need_offloading_checker=(lambda _: True)) -> None:
super().__init__()
self.num_offload_group = num_offload_group
self.tensor_need_offloading_checker = tensor_need_offloading_checker
self.groupid_reset()
def groupid_reset(self):
"""Groupid reset."""
# Data structures to label saved tensors and book-keep their cpu copies.
# Currently, on push, create a new cpu tensor and copies; on pop, copies
# the tensor back to gpu and deletes the cpu tensor.
# These will increment whenever `group_commit()` is invoked
self.current_group, self.tensor_count_current_group = (0, 0)
self.torch_tensor_count = 0
self.tensor_tag_to_state = {}
def on_group_commit_forward(self):
"""On group commit forward."""
# finishing up with updating current group and tensor count
self.current_group += 1 # increment
self.tensor_count_current_group = 0 # reset
def on_group_commit_backward(self):
"""On group commit backward."""
self.current_group -= 1
assert self.current_group >= 0
@staticmethod
def offload(src_tensor, pin_memory=True):
"""Offload."""
cpu_backup = torch.empty(
src_tensor.size(),
dtype=src_tensor.dtype,
layout=src_tensor.layout,
device="cpu",
pin_memory=pin_memory,
)
cpu_backup.copy_(src_tensor, non_blocking=True)
state = (src_tensor.device, cpu_backup)
return state
@staticmethod
def reload(state, non_blocking=None):
"""Reload."""
dev, cpu_backup = state
if non_blocking is None:
non_blocking = cpu_backup.is_pinned()
return cpu_backup.to(dev, non_blocking=non_blocking)
def tensor_push(self, tensor: torch.Tensor, **kwargs):
"""Tensor push."""
# obtain a unique tensor tag
tensor_tag = (self.current_group, self.tensor_count_current_group)
self.tensor_count_current_group += 1
assert tensor_tag not in self.tensor_tag_to_state
if self.current_group < self.num_offload_group and self.tensor_need_offloading_checker(tensor):
state = SynchronizedGroupOffloadHandler.offload(tensor)
self.tensor_tag_to_state[tensor_tag] = state
else:
# will be offloaded together after group commit
self.tensor_tag_to_state[tensor_tag] = tensor
return tensor_tag
def tensor_pop(self, tensor_tag, **kwargs):
"""Tensor pop."""
assert tensor_tag in self.tensor_tag_to_state
state = self.tensor_tag_to_state.pop(tensor_tag)
if isinstance(state, tuple):
tensor = SynchronizedGroupOffloadHandler.reload(state)
else:
tensor = state
return tensor
class AsyncDoubleBufferGroupOffloadHandler(SynchronizedGroupOffloadHandler):
"""Compared to synchronize, this uses more memory because of the buffer but
achieves better performance due to the overlapping. D2h and h2d copying are
completely hidden behind computation if computation time of a layer is longer
than host-device communication time. Bulk offloading with delay and bulk reloading
with prefetch are implemented."""
def __init__(
self,
num_offload_group, # must be <= actual number of groups (number of commits)
num_model_group,
tensor_need_offloading_checker=(lambda t: True),
) -> None:
super().__init__(
num_offload_group=num_offload_group,
tensor_need_offloading_checker=tensor_need_offloading_checker,
)
# Number of layers in the model
self.num_layers = num_model_group
# Data Structure to maintain reference to activation tensors
self.tensor_tag_to_buf = {}
# Tracking the number of layers offloaded
self.offloaded_group_count = 0
# Core data structure that decides the window for offloading
self.layer_window_map = {}
self.group_offload_mapping = {}
# Logic to make offloading load balance across computation
# for optimal CPU/GPU interconnect usage
constant = 0
for i in range(self.num_offload_group):
self.layer_window_map[i] = ((self.num_layers // self.num_offload_group) * (i + 1)) - 1
if i < (self.num_layers % self.num_offload_group):
self.layer_window_map[i] += i + 1
constant = i + 1
else:
self.layer_window_map[i] += constant
# allocate streams and events for synchronization
self.d2h_stream = get_torch_device().Stream()
self.h2d_stream = get_torch_device().Stream()
def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any:
torch_stray_tensor = isinstance(
tensor,
torch._subclasses.fake_tensor.FakeTensor | torch._subclasses.functional_tensor.FunctionalTensor,
)
need_offload = not torch_stray_tensor
need_offload = need_offload and self.tensor_need_offloading_checker(tensor)
if need_offload:
# obtain a unique tensor tag
tensor_tag = (self.current_group, self.tensor_count_current_group)
self.tensor_count_current_group += 1
assert tensor_tag not in self.tensor_tag_to_state
self.tensor_tag_to_state[tensor_tag] = tensor
if self.current_group < self.num_offload_group:
self.tensor_tag_to_buf[tensor_tag] = tensor
else:
tensor_tag = tensor
return tensor_tag
def tensor_pop(self, tensor_tag, **kwargs):
"""Tensor pop."""
if isinstance(tensor_tag, torch.Tensor):
return tensor_tag
assert tensor_tag in self.tensor_tag_to_state
tensor = self.tensor_tag_to_state.pop(tensor_tag)
self.tensor_tag_to_buf.pop(tensor_tag, None)
# the tensor should have been copied back in on_group_commit_backward()
# which invokes bulk_reload_group.
assert not isinstance(tensor, tuple)
return tensor
def bulk_offload_group(self, group_to_offload):
"""Bulk offload group."""
offload_mapping = {}
offload_size = 0
with get_torch_device().stream(self.d2h_stream):
for tensor_tag, state in self.tensor_tag_to_state.items():
group_id, _ = tensor_tag
if group_id == group_to_offload:
assert not isinstance(state, tuple)
key = _get_unique_tensor_key(state)
if key not in offload_mapping:
offload_mapping[key] = state
# if offload, return the reference to cpu copy
self.tensor_tag_to_state[tensor_tag] = (key, state.shape)
for key, tensor in offload_mapping.items():
state = SynchronizedGroupOffloadHandler.offload(tensor)
offload_size += tensor.numel() * tensor.element_size()
offload_mapping[key] = state
self.group_offload_mapping[group_to_offload] = offload_mapping
def synchronize_on_group_commit_forward(self, current_group):
"""Synchronize on group commit forward."""
# For the first group, kickstart the offload after we have
# the first compute completion
if current_group == 0:
self.d2h_stream.wait_stream(get_torch_device().current_stream())
self.bulk_offload_group(current_group)
# Window map data structure helps us synchronize based on number
# of layers offloaded
if self.layer_window_map[self.offloaded_group_count] == current_group:
# Stream synchronization both ways
self.d2h_stream.wait_stream(get_torch_device().current_stream())
get_torch_device().current_stream().wait_stream(self.d2h_stream)
# Time to free the activation memory after usage
for tensor_tag, _ in self.tensor_tag_to_buf.items():
if tensor_tag[0] == self.offloaded_group_count:
self.tensor_tag_to_buf[tensor_tag] = None
# Time to offload the next group
if self.offloaded_group_count < (self.num_offload_group - 1):
self.bulk_offload_group(self.offloaded_group_count + 1)
# Increment the offload group count to keep track
self.offloaded_group_count += 1
def on_group_commit_forward(self):
"""This function will cause host device synchronization"""
# handle synchronization events
self.synchronize_on_group_commit_forward(self.current_group)
super().on_group_commit_forward()
@torch.no_grad
def bulk_reload_group(self, group_to_reload):
"""Bulk reload group."""
assert group_to_reload < self.num_offload_group
with get_torch_device().stream(self.h2d_stream):
# move back tensors
offload_mapping = self.group_offload_mapping.pop(group_to_reload)
assert offload_mapping is not None
for key, state in offload_mapping.items():
offload_mapping[key] = SynchronizedGroupOffloadHandler.reload(state)
for tensor_label, state in self.tensor_tag_to_state.items():
group_id, _ = tensor_label
if group_id == group_to_reload and not isinstance(state, torch.Tensor):
assert isinstance(state, tuple), f"{group_id} {state}"
key, shape = state
recovered_tensor = offload_mapping[key].view(shape)
self.tensor_tag_to_state[tensor_label] = recovered_tensor
def on_group_commit_backward(self):
# first decrement the current group.
# after last commit in forward, the group will +1; in backward it -1.
# Finally it should be decremented to 0.
self.current_group -= 1
assert self.current_group >= 0
# Layer window data structure helps us to reload at right times
if self.layer_window_map[self.offloaded_group_count - 1] == self.current_group:
# Stream synchronization both ways
self.h2d_stream.wait_stream(get_torch_device().current_stream())
get_torch_device().current_stream().wait_stream(self.h2d_stream)
# Time to reload the next group
self.bulk_reload_group(self.offloaded_group_count - 1)
# Decrease the offloading group counter
self.offloaded_group_count -= 1 if self.offloaded_group_count > 1 else 0
# Last group computation needs to wait till all the reloads complete
if self.current_group == 0:
get_torch_device().current_stream().wait_stream(self.h2d_stream)
self.offloaded_group_count = 0
def get_activation_offload_context(
num_layers: int = 1, model_layers: int = 1, tensor_need_offloading_checker=(lambda t: True)
):
cpu_offload_handler = AsyncDoubleBufferGroupOffloadHandler(
num_offload_group=num_layers,
num_model_group=model_layers,
tensor_need_offloading_checker=tensor_need_offloading_checker,
)
def group_prefetch_offload_commit_async(tensor):
return group_prefetch_offload_commit(tensor, cpu_offload_handler)
return (
CpuOffloadHookWithOffloadHandler(offload_handler=cpu_offload_handler),
group_prefetch_offload_commit_async,
)
class ActivationHandler:
def __init__(self, offload_ctx, sync_func, tensor_filter, enable_ckpt):
self._offload_ctx = offload_ctx
self._sync_func = sync_func
self._enable_ckpt = enable_ckpt
self._tensor_filter = tensor_filter
if enable_ckpt:
self.checkpoint_fn = functools.partial(
torch.utils.checkpoint.checkpoint,
use_reentrant=True,
)
def pre_forward(self, module):
if module.training:
self._offload_ctx.__enter__()
self._tensor_filter.update_model_parameters(module)
def post_forward(self, module):
if module.training:
self._offload_ctx.__exit__(None, None, None)
def _pack_kwargs(self, *args, **kwargs):
kwarg_keys = []
flat_args = list(args)
for k, v in kwargs.items():
kwarg_keys.append(k)
flat_args.append(v)
return tuple(flat_args), tuple(kwarg_keys)
def _unpack_kwargs(self, flat_args, kwarg_keys):
assert len(kwarg_keys) <= len(flat_args), f"too many keys {len(kwarg_keys)} vs. {len(flat_args)}"
if len(kwarg_keys) == 0:
return flat_args, {}
args = flat_args[: -len(kwarg_keys)]
kwargs = dict(zip(kwarg_keys, flat_args[-len(kwarg_keys) :], strict=True))
return args, kwargs
def _ckpt_forward(self, forward_method, *args, **kwargs):
flat_args, kwarg_keys = self._pack_kwargs(*args, **kwargs)
def my_function(*inputs):
# unpack back into args and kwargs
nonlocal forward_method, kwarg_keys
unpacked_args, unpacked_kwargs = self._unpack_kwargs(inputs, kwarg_keys)
# run original module
return forward_method(*unpacked_args, **unpacked_kwargs)
return self.checkpoint_fn(
my_function,
*flat_args,
)
def forward(self, module, forward_method, *args, **kwargs):
if not module.training:
return forward_method(*args, **kwargs)
if not self._enable_ckpt:
ret = forward_method(*args, **kwargs)
else:
ret = self._ckpt_forward(forward_method, *args, **kwargs)
binded_tensor = ret
if isinstance(ret, tuple):
binded_tensor = ret[0]
binded_tensor = self._sync_func(binded_tensor)
final_ret = binded_tensor
if isinstance(ret, tuple):
final_ret = (final_ret,) + ret[1:]
return final_ret
def wrap_module_forward_method(self, module):
orig_method = module.forward
handler = self
@functools.wraps(orig_method)
def wrapped_method(model_self, *args, **kwargs):
nonlocal handler
handler.pre_forward(model_self)
out = handler.forward(model_self, orig_method, *args, **kwargs)
handler.post_forward(model_self)
return out
module.forward = wrapped_method.__get__(module, type(module))
def enable_activation_offloading(model, strategy, enable_ckpt=False):
"""
Enable activation offloading for the model. It groups activations by TransformerLayer and offloads activation
groups asynchronously. This means that the offloading of the i-th activation group and the computation of the i+1-th
activation group happen at the same time, and there are at most two activation groups in GPU memory.
Args:
model: the model to enable activation offloading
strategy: the training strategy of the model, such as "fsdp"
enable_ckpt: whether activation checkpointing(also called gradient checkpointing) has been enabled for the model
Note:
For best efficiency, activation offloading is usually combined with activation checkpointing. However, this
implementation of activation offloading is conflicted with the implementation of activation checkpointing in
some training strategies. This function resolves this conflict, and therefore requires the "strategy" and
"enable_ckpt" arguments.
Returns:
"""
assert strategy == "fsdp" or strategy == "fsdp2", "activation offloading only supports fsdp strategy"
layers = []
def get_layers(module):
for name, child in module.named_children():
if not isinstance(child, FSDP | FSDP2):
get_layers(child)
else:
wrapped_module = child
if isinstance(child, FSDP):
wrapped_module = child._fsdp_wrapped_module
# In some cases, torch.nn.Embedding is wrapped with FSDP alone. However, the activation
# size of torch.nn.Embedding is small, so it's not necessary to offload it.
if not isinstance(wrapped_module, torch.nn.Embedding):
layers.append(child)
get_layers(model)
if len(layers) < 3:
logger.warning(f"Find only {len(layers)} fsdp layers, not necessary to enable async activation offloading")
return
tensor_filter = FSDPParameterFilter()
context, sync_func = get_activation_offload_context(len(layers) - 1, len(layers), tensor_filter)
if enable_ckpt:
# The implementation of activation checkpointing in transformers library is incompatible with
# activation offloading,
# so it will be disabled, but this implementation supports another version of activation checkpointing, so that
# these two features can be enabled at the same time.
for module in model.modules():
if hasattr(module, "gradient_checkpointing_disable"):
module.gradient_checkpointing_disable()
handler = ActivationHandler(context, sync_func, tensor_filter, enable_ckpt)
for layer in layers:
module = layer
if isinstance(layer, FSDP):
module = module._fsdp_wrapped_module
handler.wrap_module_forward_method(module)
|
verl__utils__attention_utils.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
_index_first_axis, _pad_input, _rearrange, _unpad_input = None, None, None, None
def _get_attention_functions() -> tuple[Callable, Callable, Callable, Callable]:
"""Dynamically import attention functions based on available hardware."""
from verl.utils.device import is_torch_npu_available
global _index_first_axis, _pad_input, _rearrange, _unpad_input
if is_torch_npu_available(check_device=False):
from verl.utils.npu_flash_attn_utils import index_first_axis, pad_input, rearrange, unpad_input
else:
from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input
_index_first_axis, _pad_input, _rearrange, _unpad_input = index_first_axis, pad_input, rearrange, unpad_input
return _index_first_axis, _pad_input, _rearrange, _unpad_input
def index_first_axis(*args, **kwargs):
"""
Unified entry point for `index_first_axis` across CUDA and NPU backends.
Dynamically dispatches to the appropriate device-specific implementation:
- On CUDA: `flash_attn.bert_padding.index_first_axis`
- On NPU: `transformers.integrations.npu_flash_attention.index_first_axis`
(falls back to `transformers.modeling_flash_attention_utils._index_first_axis`
in newer versions of transformers).
Users can call this function directly without worrying about the underlying device.
"""
func, *_ = _get_attention_functions()
return func(*args, **kwargs)
def pad_input(*args, **kwargs):
"""
Unified entry point for `pad_input` across CUDA and NPU backends.
Dynamically dispatches to the appropriate device-specific implementation:
- On CUDA: `flash_attn.bert_padding.pad_input`
- On NPU: `transformers.integrations.npu_flash_attention.pad_input`
(falls back to `transformers.modeling_flash_attention_utils._pad_input`
in newer versions of transformers).
Users can call this function directly without worrying about the underlying device.
"""
_, func, *_ = _get_attention_functions()
return func(*args, **kwargs)
def rearrange(*args, **kwargs):
"""
Unified entry point for `rearrange` across CUDA and NPU backends.
Dynamically dispatches to the appropriate device-specific implementation:
- On CUDA: `flash_attn.bert_padding.rearrange`
- On NPU: `transformers.integrations.npu_flash_attention.rearrange`
(falls back to `einops.rearrange` if no dedicated NPU implementation exists).
Users can call this function directly without worrying about the underlying device.
"""
*_, func, _ = _get_attention_functions()
return func(*args, **kwargs)
def unpad_input(*args, **kwargs):
"""
Unified entry point for `unpad_input` across CUDA and NPU backends.
Dynamically dispatches to the appropriate device-specific implementation:
- On CUDA: `flash_attn.bert_padding.unpad_input`
- On NPU: `transformers.integrations.npu_flash_attention.unpad_input`
(falls back to `transformers.modeling_flash_attention_utils._unpad_input`
in newer versions of transformers).
Users can call this function directly without worrying about the underlying device.
"""
*_, func = _get_attention_functions()
return func(*args, **kwargs)
__all__ = ["index_first_axis", "pad_input", "rearrange", "unpad_input"]
|
verl__utils__chat_template.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
import logging
import os
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def initialize_system_prompt(tokenizer, **apply_chat_template_kwargs) -> list[int]:
"""
Initialize system prompt tokens for chat templates that support them.
Args:
tokenizer: The tokenizer with a chat template
**apply_chat_template_kwargs: Additional arguments for apply_chat_template
Returns:
List of token IDs for the system prompt, or empty list if not supported
"""
token1 = tokenizer.apply_chat_template(
[{"role": "user", "content": ""}], add_generation_prompt=False, tokenize=True
)
token2 = tokenizer.apply_chat_template(
[{"role": "user", "content": ""}] * 2, add_generation_prompt=False, tokenize=True
)
# get system prompt tokens
system_prompt = token1[: -(len(token2) - len(token1))]
return system_prompt
def extract_system_prompt_and_generation(tokenizer):
token1 = tokenizer.apply_chat_template(
[{"role": "user", "content": ""}], add_generation_prompt=False, tokenize=True
)
token2 = tokenizer.apply_chat_template(
[{"role": "user", "content": ""}] * 2, add_generation_prompt=False, tokenize=True
)
# get system prompt tokens
system_prompt = token1[: -(len(token2) - len(token1))]
# get generate prompt tokens
token3 = tokenizer.apply_chat_template([{"role": "user", "content": ""}], add_generation_prompt=True, tokenize=True)
generate_prompt = token3[len(token1) :]
return system_prompt, generate_prompt
|
verl__utils__checkpoint__checkpoint_handler.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: add unit tests
import logging
import os
import re
from enum import Enum
import torch
import verl.utils.hdfs_io as hdfs_io
from verl.single_controller import WorkerGroup
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, get_checkpoint_tracker_filename
from verl.utils.logger import log_with_rank
from verl.workers.engine import BaseEngine
def extract_step(path):
match = re.search(r"global_step_(\d+)", path)
if match:
return int(match.group(1))
return None
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN"))
class OrchestrationMode(Enum):
SPMD = 0
RAY = 1
class CheckpointHandler:
"""
Checkpoint handler handles the path, global_step of a checkpoint folder.
Currently, it only works with a single model.
We can expand it to support multiple models. It is expected to be used with SPMD style (e.g., torchrun)
"""
def __init__(
self,
engine: BaseEngine | WorkerGroup,
train_dataloader,
*,
default_local_dir,
max_ckpt_to_keep=None,
default_hdfs_dir=None,
resume_mode="auto",
resume_from_path=None,
mode=OrchestrationMode.SPMD,
):
self.default_local_dir = default_local_dir
self.max_ckpt_to_keep = max_ckpt_to_keep
self.default_hdfs_dir = default_hdfs_dir
self.resume_mode = resume_mode
self.resume_from_path = resume_from_path
self.engine = engine
self.train_dataloader = train_dataloader
self.mode = mode
if self.mode == OrchestrationMode.SPMD:
self.rank = torch.distributed.get_rank()
self.is_mp_src_rank_with_outputs = self.engine.is_mp_src_rank_with_outputs()
self.dp_rank = self.engine.get_data_parallel_rank()
elif self.mode == OrchestrationMode.RAY:
self.rank = 0
self.is_mp_src_rank_with_outputs = True
self.dp_rank = 0
else:
raise ValueError(f"Unknown {self.mode=}")
def save_checkpoint(self, step):
"""Save checkpoint using FSDPCheckpointManager with improved tracking"""
from verl.utils.fs import local_mkdir_safe
# Determine checkpoint path
local_global_step_folder = os.path.join(self.default_local_dir, f"global_step_{step}")
if self.rank == 0:
print(f"Saving checkpoint to: {local_global_step_folder}")
# Get max checkpoints to keep
max_ckpt_to_keep = self.max_ckpt_to_keep
# Use checkpoint manager to save
self.engine.save_checkpoint(
local_path=local_global_step_folder, global_step=step, max_ckpt_to_keep=max_ckpt_to_keep
)
# Save dataloader state. Note that we only save the iterator in the train_dataloader.
# So it's identical in each dp rank.
if self.is_mp_src_rank_with_outputs:
dp_rank = self.dp_rank
local_mkdir_safe(local_global_step_folder)
dataloader_local_path = os.path.join(local_global_step_folder, f"data_{dp_rank}.pt")
# Use StatefulDataLoader's built-in state dict functionality
dataloader_state_dict = self.train_dataloader.state_dict()
torch.save(dataloader_state_dict, dataloader_local_path)
print(f"Saved dataloader state to: {dataloader_local_path}")
if self.rank == 0:
# Update latest checkpoint tracker (atomic write)
tracker_file = get_checkpoint_tracker_filename(self.default_local_dir)
temp_tracker_file = tracker_file + ".tmp"
with open(temp_tracker_file, "w") as f:
f.write(str(step))
os.rename(temp_tracker_file, tracker_file)
print(f"Updated checkpoint tracker: {tracker_file}")
# Copy to HDFS if configured
if self.rank == 0 and self.default_hdfs_dir:
hdfs_io.makedirs(self.default_hdfs_dir, exist_ok=True)
hdfs_io.copy(src=local_global_step_folder, dst=self.default_hdfs_dir, dirs_exist_ok=True)
if self.mode == OrchestrationMode.SPMD:
torch.distributed.barrier()
def load_checkpoint(self):
# Determine resume path based on configuration
checkpoint_path = self._determine_resume_path()
if checkpoint_path is None:
return 0
# extract resume step from checkpoint path
resume_step = extract_step(checkpoint_path)
if resume_step is None:
log_with_rank(
f"Warning: Could not extract step number from {checkpoint_path}, starting from step 0",
logger=logger,
rank=self.rank,
level=logging.WARNING,
log_only_rank_0=True,
)
return 0
self.resume_global_step = resume_step
# Use checkpoint manager to load model state
self.engine.load_checkpoint(checkpoint_path)
# Always load dataloader state for StatefulDataLoader
self._load_dataloader_state(checkpoint_path)
return resume_step
def _load_dataloader_state(self, checkpoint_path: str):
"""Load dataloader state from checkpoint"""
dp_rank = self.dp_rank
dataloader_path = os.path.join(checkpoint_path, f"data_{dp_rank}.pt")
if os.path.exists(dataloader_path):
# Use StatefulDataLoader's built-in state dict functionality
dataloader_state_dict = torch.load(dataloader_path, map_location="cpu", weights_only=False)
self.train_dataloader.load_state_dict(dataloader_state_dict)
log_with_rank(
f"Successfully loaded dataloader state from {dataloader_path}",
logger=logger,
rank=self.rank,
log_only_rank_0=True,
)
else:
log_with_rank(
f"Warning: No dataloader state found at {dataloader_path}, will start from scratch",
logger=logger,
rank=self.rank,
level=logging.WARNING,
log_only_rank_0=True,
)
def _determine_resume_path(self):
"""Determine the path to resume from based on resume_mode configuration"""
resume_mode = self.resume_mode
resume_from_path = self.resume_from_path
if resume_mode == "disable":
return None
elif resume_mode == "auto":
if resume_from_path is not None:
assert os.path.exists(resume_from_path), (
"resume_from_path must be null or an existing path when resume_mode is 'auto'"
)
assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps"
return resume_from_path
# Try to find the latest checkpoint in the default directory
return self._find_latest_checkpoint()
elif resume_mode == "resume_path":
assert os.path.exists(resume_from_path), (
"resume_from_path must be an existing path when resume_mode is 'resume_path'"
)
assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps"
return resume_from_path
else:
raise ValueError(f"Invalid resume_mode: {resume_mode}. Must be 'auto', 'disable', or 'resume_path'")
def _find_latest_checkpoint(self):
"""Find the latest checkpoint in the default local directory"""
checkpoint_dir = self.default_local_dir
if not os.path.exists(checkpoint_dir):
return None
latest_checkpoint = find_latest_ckpt_path(checkpoint_dir)
if latest_checkpoint and self.rank == 0:
step_num = extract_step(latest_checkpoint)
print(f"Found latest checkpoint: {latest_checkpoint} (step {step_num})")
return latest_checkpoint
|
verl__utils__checkpoint__checkpoint_manager.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
import numpy as np
import torch
import torch.distributed
from omegaconf import DictConfig
from transformers import PreTrainedTokenizer, ProcessorMixin
from verl.trainer.config import CheckpointConfig
from verl.utils.device import get_device_name, get_torch_device
class BaseCheckpointManager:
"""
A checkpoint manager that saves and loads the following states in a SPMD way:
- model
- optimizer
- lr_scheduler
- extra_states
We save
- sharded model states and optimizer states
- full lr_scheduler states
- huggingface tokenizer and config for ckpt merge
"""
def __init__(
self,
model,
optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler.LRScheduler = None,
processing_class: PreTrainedTokenizer | ProcessorMixin = None,
checkpoint_config: DictConfig | CheckpointConfig = None,
):
self.checkpoint_config = checkpoint_config
checkpoint_load_contents = checkpoint_config.get("load_contents", None) if checkpoint_config else None
checkpoint_save_contents = checkpoint_config.get("save_contents", None) if checkpoint_config else None
if checkpoint_load_contents is None:
checkpoint_load_contents = ["model", "optimizer", "extra"]
if checkpoint_save_contents is None:
checkpoint_save_contents = ["model", "optimizer", "extra"]
self.previous_global_step = None
self.previous_saved_paths = []
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.processing_class = processing_class
self.checkpoint_load_contents = checkpoint_load_contents
self.checkpoint_save_contents = checkpoint_save_contents
self.rank = torch.distributed.get_rank()
self.world_size = torch.distributed.get_world_size()
@property
def should_save_model(self) -> bool:
"""
Returns True if 'model' is in checkpoint_save_contents, indicating the model state should be saved.
"""
return "model" in self.checkpoint_save_contents
@property
def should_save_optimizer(self) -> bool:
"""
Returns True if 'optimizer' is in checkpoint_save_contents, indicating the optimizer state should be saved.
"""
return "optimizer" in self.checkpoint_save_contents
@property
def should_save_extra(self) -> bool:
"""
Returns True if 'extra' is in checkpoint_save_contents, indicating the extra state should be saved.
"""
return "extra" in self.checkpoint_save_contents
@property
def should_save_hf_model(self) -> bool:
"""
Returns True if 'hf_model' is in checkpoint_save_contents, indicating the model should be converted to hf
model and saved.
"""
return "hf_model" in self.checkpoint_save_contents
@property
def should_load_model(self) -> bool:
"""
Returns True if 'model' is in checkpoint_load_contents, indicating the model state should be loaded.
"""
return "model" in self.checkpoint_load_contents
@property
def should_load_optimizer(self) -> bool:
"""
Returns True if 'optimizer' is in checkpoint_load_contents, indicating the optimizer state should be loaded.
"""
return "optimizer" in self.checkpoint_load_contents
@property
def should_load_extra(self) -> bool:
"""
Returns True if 'extra' is in checkpoint_load_contents, indicating the extra state should be loaded.
"""
return "extra" in self.checkpoint_load_contents
def load_checkpoint(self, local_path: str, hdfs_path: str = None, del_local_after_load: bool = False):
raise NotImplementedError
def save_checkpoint(
self, local_path: str, hdfs_path: str = None, global_step: int = 0, max_ckpt_to_keep: int = None
):
raise NotImplementedError
@staticmethod
def checkpath(local_path: str, hdfs_path: str):
assert local_path is not None or hdfs_path is not None, "local_path and hdfs_path cannot be both None"
return local_path is not None, local_path if local_path is not None else hdfs_path
def remove_previous_save_local_path(self, path):
if isinstance(path, str):
path = [path]
for p in path:
abs_path = os.path.abspath(p)
print(f"Checkpoint manager remove previous save local path: {abs_path}")
if not os.path.exists(abs_path):
continue
shutil.rmtree(abs_path, ignore_errors=True)
def ensure_checkpoint_capacity(self, max_ckpt_to_keep: int):
"""
Remove old checkpoints to make room for a new one, keeping a safety buffer.
With max_ckpt_to_keep=1, this does nothing - we keep the existing checkpoint
until the new save completes successfully (handled by register_checkpoint).
For max_ckpt_to_keep >= 2, we keep (max_ckpt_to_keep - 1) checkpoints before save.
"""
if not (max_ckpt_to_keep and isinstance(max_ckpt_to_keep, int) and max_ckpt_to_keep > 1):
return
if len(self.previous_saved_paths) >= max_ckpt_to_keep:
keep_start = len(self.previous_saved_paths) - max_ckpt_to_keep + 1
self.remove_previous_save_local_path(self.previous_saved_paths[:keep_start])
self.previous_saved_paths = self.previous_saved_paths[keep_start:]
def register_checkpoint(self, new_path: str, max_ckpt_to_keep: int):
"""
Register a successfully saved checkpoint and enforce retention limit.
Adds the new checkpoint path to tracking and removes excess old
checkpoints beyond max_ckpt_to_keep.
"""
self.previous_saved_paths.append(new_path)
if not (max_ckpt_to_keep and isinstance(max_ckpt_to_keep, int) and max_ckpt_to_keep > 0):
return
if len(self.previous_saved_paths) > max_ckpt_to_keep:
keep_start = len(self.previous_saved_paths) - max_ckpt_to_keep
self.remove_previous_save_local_path(self.previous_saved_paths[:keep_start])
self.previous_saved_paths = self.previous_saved_paths[keep_start:]
@staticmethod
def get_rng_state():
rng_state = {
"cpu": torch.get_rng_state(),
"numpy": np.random.get_state(),
"random": random.getstate(),
}
if get_device_name() != "cpu":
rng_state[get_device_name()] = get_torch_device().get_rng_state()
return rng_state
@staticmethod
def load_rng_state(rng_state):
torch.set_rng_state(rng_state["cpu"])
np.random.set_state(rng_state["numpy"])
random.setstate(rng_state["random"])
if get_device_name() != "cpu":
get_torch_device().set_rng_state(rng_state[get_device_name()])
def find_latest_ckpt_path(path, directory_format="global_step_{}"):
"""
Return the most recent checkpoint directory based on a tracker file.
Args:
path (str): Base directory containing the checkpoint tracker.
directory_format (str): Template for checkpoint subfolders with one
placeholder for the iteration number (default "global_step_{}").
Returns:
str or None: Full path to the latest checkpoint directory, or
None if the tracker or checkpoint folder is missing.
"""
if path is None:
return None
tracker_file = get_checkpoint_tracker_filename(path)
if not os.path.exists(tracker_file):
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
print(f"Checkpoint tracker file does not exist: {tracker_file}")
return None
with open(tracker_file, "rb") as f:
iteration = int(f.read().decode())
ckpt_path = os.path.join(path, directory_format.format(iteration))
if not os.path.exists(ckpt_path):
print("Checkpoint does not exist: %s", ckpt_path)
return None
print("Found checkpoint: %s", ckpt_path)
return ckpt_path
def get_checkpoint_tracker_filename(root_path: str):
"""
Tracker file rescords the latest chckpoint during training to restart from.
"""
return os.path.join(root_path, "latest_checkpointed_iteration.txt")
def should_save_ckpt_esi(max_steps_duration: float, save_ckpt_duration: float = 60, redundant_time: float = 0) -> bool:
"""
Determine if checkpoint should be saved based on capacity esi expiration.
Args:
max_steps_duration: Max estimated time (seconds) required to complete one training step
save_ckpt_duration: Estimated time (seconds) required to save checkpoint (default: 60)
redundant_time: Additional buffer time (seconds) for unexpected delays (default: 0)
"""
exp_ts_mlp = os.getenv("MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP") # vemlp
exp_ts_aws = os.getenv("SAGEMAKER_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP") # aws
if exp_ts_mlp:
try:
import time
remaining = float(exp_ts_mlp) - time.time()
except ValueError:
return False
return (
remaining > 0
and max_steps_duration > 0
and remaining <= save_ckpt_duration + max_steps_duration + redundant_time
)
elif exp_ts_aws:
from datetime import datetime, timedelta
expiration_time = datetime.fromtimestamp(int(exp_ts_aws))
time_difference = expiration_time - datetime.now()
threshold_minutes = (save_ckpt_duration + max_steps_duration + redundant_time) / 60
return time_difference < timedelta(minutes=threshold_minutes)
else:
return False
|
verl__utils__checkpoint__fsdp_checkpoint_manager.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import warnings
from dataclasses import asdict, dataclass
from typing import Optional
import torch
import torch.distributed
from accelerate import init_empty_weights
from omegaconf import DictConfig
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType
from transformers import GenerationConfig, PreTrainedTokenizer, ProcessorMixin
from transformers.dynamic_module_utils import custom_object_save
from verl.utils.device import is_cuda_available
from verl.utils.fs import copy_to_local, is_non_local, local_mkdir_safe
from verl.utils.fsdp_utils import fsdp_version, get_fsdp_full_state_dict, get_fsdp_state_ctx
from verl.utils.logger import log_with_rank
from .checkpoint_manager import BaseCheckpointManager
# Setup logging
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "INFO"))
@dataclass
class FSDPConfig:
"""Configuration for FSDP checkpointing.
Args:
FSDP_version (int): Version of FSDP being used.
world_size (int): Number of processes in the distributed training setup.
"""
FSDP_version: int
world_size: int
class FSDPCheckpointManager(BaseCheckpointManager):
"""
Manage FSDP checkpointing in SPMD training.
- Saves/loads per-rank sharded model & optimizer states
- Persists full lr_scheduler and RNG state
- Stores HF tokenizer/processor and model/config for unified restore
Args:
model (FSDP): Wrapped model instance.
optimizer (Optimizer): Training optimizer.
lr_scheduler (LRScheduler): Learning-rate scheduler.
processing_class (PreTrainedTokenizer or ProcessorMixin, optional):
Pre-/post-processing artifact handler.
checkpoint_contents DictConfig: Configuration for checkpoint contents.
- 'load': Components to load; must contain 'model'. Defaults to ['model', 'optimizer', 'extra'].
- 'save': Components to save; must contain 'model'. Defaults to ['model', 'optimizer', 'extra'].
trust_remote_code: Whether to trust_remote_code when loading the model configuration
"""
def __init__(
self,
model: FSDP,
optimizer: Optional[torch.optim.Optimizer] = None,
lr_scheduler: Optional[torch.optim.lr_scheduler.LRScheduler] = None,
processing_class: PreTrainedTokenizer | ProcessorMixin = None,
checkpoint_config: DictConfig = None,
trust_remote_code: bool = False,
**kwargs,
):
if processing_class is None and "tokenizer" in kwargs:
warnings.warn(
"`tokenizer` is deprecated. use `processing_class` instead.", DeprecationWarning, stacklevel=2
)
processing_class = kwargs.pop("tokenizer")
super().__init__(
model,
optimizer,
lr_scheduler=lr_scheduler,
processing_class=processing_class,
checkpoint_config=checkpoint_config,
)
self.trust_remote_code = trust_remote_code
def load_checkpoint(self, local_path: str, hdfs_path: str = None, del_local_after_load=False):
"""
Load an FSDP checkpoint for this rank.
Downloads and loads:
- model and optimizer shards
- extra state dict (scheduler + RNG)
Args:
local_path: Directory with per-rank checkpoint files.
hdfs_path: Unused (for API compatibility).
del_local_after_load: Remove local files after loading.
"""
if local_path is None:
return
# check if the checkpoint_load_contents is valid
if self.should_load_model:
assert self.model is not None, "model must be provided when checkpoint_contents.load includes ['model']"
if self.should_load_optimizer:
assert self.optimizer is not None, (
"optimizer must be provided when checkpoint_contents.load includes ['optimizer']"
)
# every rank download its own checkpoint
state_dict_cfg = (
ShardedStateDictConfig(offload_to_cpu=True if is_cuda_available else False)
if self.should_load_model
else None
)
optim_cfg = (
ShardedOptimStateDictConfig(offload_to_cpu=True if is_cuda_available else False)
if self.should_load_optimizer
else None
)
with get_fsdp_state_ctx(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg):
if self.should_load_model:
remote_model_path = os.path.join(local_path, f"model_world_size_{self.world_size}_rank_{self.rank}.pt")
local_model_path = copy_to_local(remote_model_path)
model_state_dict = torch.load(local_model_path, weights_only=False)
self.model.load_state_dict(model_state_dict)
log_with_rank(f"Loaded model from {remote_model_path}", rank=self.rank, logger=logger)
if self.should_load_optimizer:
remote_optim_path = os.path.join(local_path, f"optim_world_size_{self.world_size}_rank_{self.rank}.pt")
local_optim_path = copy_to_local(remote_optim_path)
optimizer_state_dict = torch.load(local_optim_path, weights_only=False)
self.optimizer.load_state_dict(optimizer_state_dict)
log_with_rank(f"Loaded optimizer from {remote_optim_path}", rank=self.rank, logger=logger)
if self.should_load_extra:
remote_extra_state_path = os.path.join(
local_path, f"extra_state_world_size_{self.world_size}_rank_{self.rank}.pt"
)
local_extra_state_path = copy_to_local(remote_extra_state_path)
extra_state_dict = torch.load(local_extra_state_path, weights_only=False)
# recover random state
if "rng" in extra_state_dict:
# 'rng' may not exist for backward compatibility
self.load_rng_state(extra_state_dict["rng"])
log_with_rank(f"Loaded rng from {remote_extra_state_path}", rank=self.rank, logger=logger)
lr_scheduler_state_dict = extra_state_dict["lr_scheduler"]
if lr_scheduler_state_dict is not None and self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(lr_scheduler_state_dict)
log_with_rank(f"Loaded lr_scheduler from {remote_extra_state_path}", rank=self.rank, logger=logger)
if self.rank == 0 and del_local_after_load:
try:
os.remove(local_model_path) if is_non_local(local_model_path) else None
os.remove(local_optim_path) if is_non_local(local_optim_path) else None
os.remove(local_extra_state_path) if is_non_local(local_extra_state_path) else None
except Exception as e:
log_with_rank(
f"remove local resume ckpt file after loading failed, exception {e} will be ignored",
rank=self.rank,
logger=logger,
)
# wait for everyone to load checkpoints
torch.distributed.barrier()
def save_checkpoint(self, local_path: str, hdfs_path: str = None, global_step: int = 0, max_ckpt_to_keep=None):
"""
Save an FSDP checkpoint for this rank.
Writes:
- model & optimizer shard files
- extra state dict (scheduler + RNG)
- HF tokenizer/processor and model/config on rank 0
- optional full HF model under 'huggingface/' if requested
Rotates old checkpoints, keeping at most `max_ckpt_to_keep`.
Args:
local_path: Target directory for checkpoint files.
hdfs_path: Unused (for API compatibility).
global_step: Current training step (used for bookkeeping).
max_ckpt_to_keep: Number of recent checkpoints to retain.
"""
if local_path is None:
return
# record the previous global step
self.previous_global_step = global_step
if self.rank == 0:
self.ensure_checkpoint_capacity(max_ckpt_to_keep)
local_path = local_mkdir_safe(local_path)
torch.distributed.barrier()
# check if the checkpoint_save_contents is valid
if self.should_save_model:
assert self.model is not None, "model must be provided when checkpoint_contents.save includes ['model']"
if self.should_save_optimizer:
assert self.optimizer is not None, (
"optimizer must be provided when checkpoint_contents.save includes ['optimizer']"
)
# every rank will save its own model and optim shard
state_dict_cfg = ShardedStateDictConfig(offload_to_cpu=True if is_cuda_available else False)
optim_cfg = ShardedOptimStateDictConfig(offload_to_cpu=True if is_cuda_available else False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with get_fsdp_state_ctx(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg):
model_path = os.path.join(local_path, f"model_world_size_{self.world_size}_rank_{self.rank}.pt")
optim_path = os.path.join(local_path, f"optim_world_size_{self.world_size}_rank_{self.rank}.pt")
extra_path = os.path.join(local_path, f"extra_state_world_size_{self.world_size}_rank_{self.rank}.pt")
if self.should_save_model:
model_state_dict = self.model.state_dict()
torch.save(model_state_dict, model_path)
log_with_rank(f"Saved model to {os.path.abspath(model_path)}", rank=self.rank, logger=logger)
if self.should_save_optimizer:
optimizer_state_dict = self.optimizer.state_dict()
torch.save(optimizer_state_dict, optim_path)
log_with_rank(f"Saved optim to {os.path.abspath(optim_path)}", rank=self.rank, logger=logger)
if self.should_save_extra:
lr_scheduler_state_dict = self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None
extra_state_dict = {
"lr_scheduler": lr_scheduler_state_dict,
"rng": self.get_rng_state(),
}
torch.save(extra_state_dict, extra_path)
log_with_rank(f"Saved extra_state to {os.path.abspath(extra_path)}", rank=self.rank, logger=logger)
if self.rank == 0:
# Save HF tokenizer/processor and model config on rank 0 to huggingface/ directory, no matter whether
# huggingface model is requested to be saved or not.
if fsdp_version(self.model) == 1:
unwrap_model = self.model._fsdp_wrapped_module
else:
unwrap_model = self.model
hf_config_tokenizer_path = os.path.join(local_path, "huggingface")
local_mkdir_safe(hf_config_tokenizer_path)
model_config = unwrap_model.config
generation_config = None
if unwrap_model.can_generate() and hasattr(model_config, "name_or_path") and model_config.name_or_path:
try:
# Some model's name_or_path is empty if not initialized from pretrained,
# in this cases, we don't save generation config.
generation_config = GenerationConfig.from_pretrained(model_config.name_or_path)
generation_config.save_pretrained(hf_config_tokenizer_path)
except Exception:
# if the generation config isn't available, we don't save it
pass
if hasattr(model_config, "auto_map") and None in model_config.auto_map:
model_config.auto_map = {k: v for k, v in model_config.auto_map.items() if k is not None}
model_config.save_pretrained(hf_config_tokenizer_path)
if self.processing_class is not None:
self.processing_class.save_pretrained(hf_config_tokenizer_path)
log_with_rank(
f"Saved model config and tokenizer class to {os.path.abspath(hf_config_tokenizer_path)}",
rank=self.rank,
logger=logger,
log_only_rank_0=True,
)
# If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if hasattr(model_config, "auto_map"):
custom_object_save(unwrap_model, hf_config_tokenizer_path, config=model_config)
# Also save runtime FSDP config
fsdp_config_path = os.path.join(local_path, "fsdp_config.json")
fsdp_config = FSDPConfig(
FSDP_version=fsdp_version(self.model),
world_size=self.world_size,
)
with open(fsdp_config_path, "w") as f:
json.dump(asdict(fsdp_config), f, indent=4)
# wait for everyone to dump to local
torch.distributed.barrier()
if self.should_save_hf_model:
# Only rank 0 will save hf model and,
# offload to cpu to save LLMs which may be too large to fit in one GPU
state_dict = get_fsdp_full_state_dict(self.model, offload_to_cpu=True, rank0_only=True)
if self.rank == 0:
hf_local_path = os.path.join(local_path, "huggingface")
os.makedirs(hf_local_path, exist_ok=True)
if "ForTokenClassification" in model_config.architectures[0]:
from transformers import AutoModelForTokenClassification
auto_model_cls = AutoModelForTokenClassification
elif "ForCausalLM" in model_config.architectures[0]:
from transformers import AutoModelForCausalLM
auto_model_cls = AutoModelForCausalLM
elif "ForConditionalGeneration" in model_config.architectures[0]:
# Handle different transformers versions for Vision2Seq models
import transformers
from packaging import version
if version.parse(transformers.__version__) >= version.parse("4.54.0"):
# transformers >= 4.54.0 uses AutoModelForImageTextToText
from transformers import AutoModelForImageTextToText
auto_model_cls = AutoModelForImageTextToText
else:
# transformers < 4.54.0 uses AutoModelForVision2Seq
from transformers import AutoModelForVision2Seq
auto_model_cls = AutoModelForVision2Seq
else:
raise NotImplementedError(f"Unknown architecture {model_config['architectures']}")
with init_empty_weights():
save_model = auto_model_cls.from_config(
model_config, torch_dtype=torch.bfloat16, trust_remote_code=self.trust_remote_code
)
save_model.to_empty(device="cpu")
if save_model.can_generate():
if generation_config is not None:
save_model.generation_config = generation_config
else:
print(
f"Warning: {self.__class__.__name__}.save_checkpoint: Generation config file not found "
f"in, using a generation config created from the model config when saving hf_model."
)
save_model.save_pretrained(hf_local_path, state_dict=state_dict)
log_with_rank(
f"Saved hf_model to {os.path.abspath(hf_local_path)}",
rank=self.rank,
logger=logger,
log_only_rank_0=True,
)
del state_dict
del save_model
# wait for rank0 to dump hf_model to local
torch.distributed.barrier()
if self.rank == 0:
self.register_checkpoint(local_path, max_ckpt_to_keep)
|
verl__utils__checkpoint__megatron_checkpoint_manager.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import logging
import os
import random
from collections.abc import Callable
from dataclasses import asdict
import megatron.core
import numpy as np
import torch
import torch.distributed
from megatron.core import dist_checkpointing, mpu, tensor_parallel
from megatron.core.dist_checkpointing.mapping import ShardedObject
from megatron.core.transformer.enums import AttnBackend
from packaging import version
from transformers import GenerationConfig
from verl.models.weight_loader_registry import get_weight_saver
from verl.utils.device import get_device_name, get_torch_device
from verl.utils.fs import is_non_local, local_mkdir_safe
from verl.utils.logger import log_with_rank
from verl.utils.megatron.dist_checkpointing import load_dist_checkpointing, save_dist_checkpointing
from verl.utils.megatron_utils import (
get_dist_checkpoint_path,
get_hf_model_checkpoint_path,
get_transformer_config_checkpoint_path,
)
from .checkpoint_manager import BaseCheckpointManager
# Setup logging
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "INFO"))
mcore_ge_014 = version.parse(megatron.core.__version__) >= version.parse("0.14.0")
if not mcore_ge_014:
logger.warning(
"Detected megatron.core %s, recommend upgrading to >= 0.14.0 for better checkpoint compatibility",
megatron.core.__version__,
)
class MegatronCheckpointManager(BaseCheckpointManager):
"""
Checkpoint manager for Megatron-LM distributed training.
This class manages the saving and loading of model checkpoints in a Megatron-LM
distributed training environment. It handles various aspects of checkpointing
including model states, optimizer states, learning rate schedulers, and random
number generator states, ensuring compatibility with HuggingFace formats.
Key features:
- Distributed checkpoint saving and loading using Megatron's dist_checkpointing
- Support for tensor parallel, pipeline parallel, and data parallel configurations
- Automatic handling of model state dictionaries across multiple pipeline stages
- Integration with HuggingFace model configurations and tokenizers
- Random number generator state management for reproducibility
- Support for both synchronous and asynchronous checkpoint operations
The manager automatically handles:
- Directory structure creation based on global steps and process ranks
- Model configuration and tokenizer saving in HuggingFace format
- Optimizer and scheduler state persistence
- CUDA RNG state management for deterministic training
- Checkpoint cleanup and retention policies
Args:
model: The Megatron model instance to checkpoint
optimizer: The optimizer instance (optional)
lr_scheduler: The learning rate scheduler instance (optional)
Attributes:
model: Reference to the Megatron model being checkpointed
optimizer: Reference to the optimizer (if provided)
lr_scheduler: Reference to the learning rate scheduler (if provided)
rank: Current process rank in the distributed setup
Example:
```python
checkpoint_manager = MegatronCheckpointManager(
model=megatron_model,
optimizer=optimizer,
lr_scheduler=scheduler
)
checkpoint_manager.save_checkpoint(
local_path="checkpoints/step_1000",
global_step=1000
)
checkpoint_manager.load_checkpoint(
local_path="checkpoints/step_1000"
)
```
"""
def __init__(
self,
config,
checkpoint_config,
model_config,
transformer_config,
role,
model: torch.nn.ModuleList,
arch: str,
hf_config,
param_dtype: torch.dtype,
share_embeddings_and_output_weights: bool,
processing_class,
optimizer,
optimizer_scheduler,
use_distributed_optimizer: bool,
use_checkpoint_opt_param_scheduler: bool = False,
use_dist_checkpointing: bool = True,
bridge=None,
provider=None,
peft_cls=None,
**kwargs,
):
super().__init__(
model,
optimizer=optimizer,
lr_scheduler=optimizer_scheduler,
processing_class=processing_class,
checkpoint_config=checkpoint_config,
)
self.arch = arch
self.config = config
self.transformer_config = transformer_config
self.role = role
self.is_value_model = False
if self.role in ["reward", "critic"]:
self.is_value_model = True
self.model_config = model_config
self.hf_config = hf_config
self.param_dtype = param_dtype
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
self.model_path = self.config.model.path
self.use_distributed_optimizer = use_distributed_optimizer
self.use_checkpoint_opt_param_scheduler = use_checkpoint_opt_param_scheduler
self.bridge = bridge
self.provider = provider
self.vanilla_bridge = self.provider is None
self.peft_cls = peft_cls
self.rank = torch.distributed.get_rank()
# Megatron-Bridge is Okay to load/save HF checkpoint for value model as well
self.use_dist_checkpointing = (
use_dist_checkpointing or not self.bridge or (self.vanilla_bridge and self.is_value_model)
)
self.use_hf_checkpoint = not self.use_dist_checkpointing
self.weight_saver = None
if self.bridge is None:
self.weight_saver = get_weight_saver(self.arch)
def get_rng_state(self, use_dist_ckpt: bool = True, data_parallel_random_init: bool = False):
"""collect rng state across data parallel ranks"""
rng_state = {
"random_rng_state": random.getstate(),
"np_rng_state": np.random.get_state(),
"torch_rng_state": torch.get_rng_state(),
"rng_tracker_states": tensor_parallel.get_cuda_rng_tracker().get_states(),
}
if get_device_name() != "cpu":
rng_state[f"{get_device_name()}_rng_state"] = get_torch_device().get_rng_state()
rng_state_list = None
if torch.distributed.is_initialized() and mpu.get_data_parallel_world_size() > 1 and data_parallel_random_init:
rng_state_list = [None for i in range(mpu.get_data_parallel_world_size())]
torch.distributed.all_gather_object(rng_state_list, rng_state, group=mpu.get_data_parallel_group())
else:
rng_state_list = [rng_state]
if use_dist_ckpt:
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
rng_state_list = ShardedObject(
"rng_state",
rng_state_list,
(pp_size, tp_size),
(pp_rank, tp_rank),
replica_id=mpu.get_data_parallel_rank(with_context_parallel=True),
)
return rng_state_list
def get_checkpoint_name(
self,
checkpoints_path,
pipeline_parallel=None,
tensor_rank=None,
pipeline_rank=None,
cp_rank=None,
expert_parallel=None,
expert_rank=None,
return_base_dir=True,
basename="model.pt",
):
"""Determine the directory name for this rank's checkpoint."""
# Use both the tensor and pipeline MP rank.
if pipeline_parallel is None:
pipeline_parallel = mpu.get_pipeline_model_parallel_world_size() > 1
if tensor_rank is None:
tensor_rank = mpu.get_tensor_model_parallel_rank()
if pipeline_rank is None:
pipeline_rank = mpu.get_pipeline_model_parallel_rank()
if cp_rank is None:
cp_rank = mpu.get_context_parallel_rank()
if expert_parallel is None:
expert_parallel = mpu.get_expert_model_parallel_world_size() > 1
if expert_rank is None:
expert_rank = mpu.get_expert_model_parallel_rank()
# Use both the tensor and pipeline MP rank. If using the distributed
# optimizer, then the optimizer's path must additionally include the
# data parallel rank.
# due to the fact that models are identical across cp ranks, cp rank is not used in the checkpoint path
if not pipeline_parallel:
common_path = os.path.join(checkpoints_path, f"mp_rank_{tensor_rank:02d}")
else:
common_path = os.path.join(checkpoints_path, f"mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}")
if expert_parallel:
common_path = common_path + f"_{expert_rank:03d}"
os.makedirs(common_path, exist_ok=True)
if return_base_dir:
return common_path
return os.path.join(common_path, basename)
def generate_state_dict(
self,
generate_model: bool = True,
generate_optimizer: bool = True,
generate_extra: bool = True,
is_loading: bool = False,
metadata: dict | None = None,
):
# For save dist checkpointing
state_dict = {}
base_metadata = metadata or self._build_sharded_state_dict_metadata()
# Should always generate model state dict
# All ranks Save Model to reduce memory pressure
# Get sharded state dict, notice that state_dict will collect among dp groups, causing memory pressure
for vpp_rank, model in enumerate(self.model):
if len(self.model) > 1:
mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank)
key = f"model{vpp_rank}" if len(self.model) > 1 else "model"
else:
key = "model"
if hasattr(model, "module"):
model = model.module
# GPTModel's sharded_state_dict function when having mtp requires metadata['dp_cp_group']
model_metadata = dict(base_metadata)
model_metadata["dp_cp_group"] = mpu.get_data_parallel_group(with_context_parallel=True)
kwargs = {"metadata": model_metadata}
state_dict[key] = model.sharded_state_dict(**kwargs)
# Optimizer State Dict
if generate_optimizer:
torch.distributed.barrier()
sharded_state_dict_kwargs = {"is_loading": is_loading}
if base_metadata is not None:
# https://github.com/NVIDIA/Megatron-LM/blob/core_v0.14.0/megatron/core/optimizer/distrib_optimizer.py#L1109-L1123
if mcore_ge_014:
sharded_state_dict_kwargs["metadata"] = base_metadata
optimizer_sharded_states = self.optimizer.sharded_state_dict(state_dict, **sharded_state_dict_kwargs)
state_dict["optimizer"] = optimizer_sharded_states
if self.lr_scheduler is not None:
lr_state_dict = self.lr_scheduler.state_dict()
state_dict["lr_scheduler"] = lr_state_dict
if not generate_model:
state_dict.pop("model", None)
# RNG States State Dict
if generate_extra:
torch.distributed.barrier()
rng_state = self.get_rng_state()
state_dict["rng_state"] = rng_state
return state_dict
def _build_sharded_state_dict_metadata(self) -> dict:
"""Builds metadata used for sharded_state_dict versioning.
The whole content metadata is passed to ``sharded_state_dict`` model and optimizer methods
and therefore affects only the logic behind sharded_state_dict creation.
The content metadata should be minimalistic, ideally flat (or with a single nesting level)
and with semantically meaningful flag names (e.g. `distrib_optim_sharding_type`).
In particular, a simple integer (or SemVer) versioning flag (e.g. `metadata['version'] = 3.4`)
is discouraged, because the metadata serves for all models and optimizers and it's practically
impossible to enforce a linearly increasing versioning for this whole space.
"""
metadata: dict = {}
if not mcore_ge_014:
# For backward compatibility with Megatron core < v0.14.0
if self.use_distributed_optimizer:
metadata["distrib_optim_sharding_type"] = "fully_sharded_model_space"
return metadata
if self.use_distributed_optimizer:
megatron_config = getattr(self.config, self.role, self.config).megatron
dist_ckpt_optim_fully_reshardable = megatron_config.dist_ckpt_optim_fully_reshardable
distrib_optim_fully_reshardable_mem_efficient = (
megatron_config.distrib_optim_fully_reshardable_mem_efficient
)
if dist_ckpt_optim_fully_reshardable:
metadata["distrib_optim_sharding_type"] = "fully_reshardable"
metadata["distrib_optim_fully_reshardable_mem_efficient"] = (
distrib_optim_fully_reshardable_mem_efficient
)
else:
metadata["distrib_optim_sharding_type"] = "dp_reshardable"
metadata["singleton_local_shards"] = False
metadata["chained_optim_avoid_prefix"] = True
return metadata
def load_rng_states(self, rng_states, data_parallel_random_init=False, use_dist_ckpt=True):
# access rng_state for data parallel rank
if data_parallel_random_init:
rng_states = rng_states[mpu.get_data_parallel_rank()]
else:
rng_states = rng_states[0]
random.setstate(rng_states["random_rng_state"])
np.random.set_state(rng_states["np_rng_state"])
torch.set_rng_state(rng_states["torch_rng_state"])
if get_device_name() != "cpu":
get_torch_device().set_rng_state(rng_states[f"{get_device_name()}_rng_state"])
# Check for empty states array
if not rng_states["rng_tracker_states"]:
raise KeyError
tensor_parallel.get_cuda_rng_tracker().set_states(rng_states["rng_tracker_states"])
def load_checkpoint(self, local_path: str, hdfs_path: str = None, del_local_after_load=False):
if local_path is not None:
assert os.path.exists(local_path), f"Checkpoint path {local_path} does not exist."
# For load optimizer dist_ckpt
try:
import transformer_engine
torch.serialization.add_safe_globals([torch.optim.AdamW])
torch.serialization.add_safe_globals([transformer_engine.pytorch.optimizers.fused_adam.FusedAdam])
except Exception:
pass
dist_checkpoint_path = get_dist_checkpoint_path(local_path)
load_content_metadata = getattr(dist_checkpointing, "load_content_metadata", None)
if load_content_metadata is None:
# For backward compatibility
sharded_sd_metadata = None
else:
sharded_sd_metadata = load_content_metadata(checkpoint_dir=dist_checkpoint_path)
if sharded_sd_metadata is None:
if self.use_distributed_optimizer:
# Backward-compatibility with old checkpoints which don't have content versioning
# Can be removed after ending support for MLM optimizer checkpoints with MCore < v0.13
# (for MCore v0.13+ checkpoints `sharded_sd_metadata is not None`)
sharded_sd_metadata = {
"distrib_optim_sharding_type": "fully_sharded_model_space",
}
else:
sharded_sd_metadata = self._build_sharded_state_dict_metadata()
# Get State Dict for loading
sharded_state_dict = self.generate_state_dict(
self.should_load_model and self.use_dist_checkpointing,
self.should_load_optimizer,
self.should_load_extra,
is_loading=True,
metadata=sharded_sd_metadata,
)
log_with_rank(f"Generated state dict for loading: {sharded_state_dict.keys()}", rank=self.rank, logger=logger)
# Load Dist Checkpointing
state_dict = load_dist_checkpointing(
sharded_state_dict=sharded_state_dict,
ckpt_dir=dist_checkpoint_path,
)
if self.should_load_model and self.use_dist_checkpointing:
assert "model" in state_dict or any(
f"model{vpp_rank}" in state_dict for vpp_rank in range(len(self.model))
), f"Model state dict not found in {state_dict.keys()}. Please check the checkpoint file {local_path}."
for vpp_rank, model in enumerate(self.model):
if len(self.model) == 1:
model_state_dict = state_dict["model"]
else:
assert f"model{vpp_rank}" in state_dict, f"model{vpp_rank} not found in state_dict"
model_state_dict = state_dict[f"model{vpp_rank}"]
mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank)
self.model[vpp_rank].load_state_dict(model_state_dict)
log_with_rank(f"Loaded sharded model checkpoint from {local_path}", rank=self.rank, logger=logger)
# Skip HF checkpoint loading if PEFT is used
elif self.should_load_model and self.use_hf_checkpoint and self.peft_cls is None:
hf_model_path = get_hf_model_checkpoint_path(local_path)
if self.vanilla_bridge:
self.bridge.load_weights(self.model, hf_model_path)
else:
self.bridge.load_hf_weights(self.model, hf_model_path)
log_with_rank(f"Loaded HF model checkpoint from {hf_model_path} with bridge", rank=self.rank, logger=logger)
# Load PEFT adapter checkpoint if available
if self.should_load_model and self.peft_cls is not None:
adapter_ckpt_path = os.path.join(local_path, "adapter_checkpoint")
if os.path.exists(adapter_ckpt_path):
from verl.utils.megatron_peft_utils import load_adapter_checkpoint
# TODO: a better format for adapter checkpoint, waiting megatron-bridge support
load_adapter_checkpoint(
self.model,
adapter_ckpt_path,
)
log_with_rank(
f"Loaded adapter checkpoint from {adapter_ckpt_path}",
rank=self.rank,
logger=logger,
)
else:
log_with_rank(
f"PEFT config is set but no adapter checkpoint found at {adapter_ckpt_path}",
rank=self.rank,
logger=logger,
)
if self.should_load_optimizer:
assert "optimizer" in state_dict, (
f"Optimizer state dict not found in {state_dict.keys()}. Please check the checkpoint file {local_path}."
)
optimizer_state_dict = state_dict["optimizer"]
self.optimizer.load_state_dict(optimizer_state_dict)
log_with_rank(f"Loaded optimizer checkpoint from {local_path}", rank=self.rank, logger=logger)
if self.use_checkpoint_opt_param_scheduler:
assert "lr_scheduler" in state_dict, (
f"LR scheduler state dict not found in {state_dict.keys()}. Please check the checkpoint file "
f"{local_path}."
)
lr_scheduler_state_dict = state_dict["lr_scheduler"]
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(lr_scheduler_state_dict)
log_with_rank(f"Loaded LR scheduler checkpoint from {local_path}", rank=self.rank, logger=logger)
if self.should_load_extra:
assert "rng_state" in state_dict, (
f"RNG state dict not found in {state_dict.keys()}. Please check the checkpoint file {local_path}."
)
rng_state = state_dict["rng_state"]
self.load_rng_states(rng_state)
log_with_rank(f"Loaded RNG states from {local_path}", rank=self.rank, logger=logger)
if del_local_after_load:
try:
os.remove(local_path) if is_non_local(local_path) else None
except Exception as e:
log_with_rank(
f"remove local resume ckpt file after loading failed, exception {e} will be ignored",
rank=self.rank,
logger=logger,
)
def save_checkpoint(self, local_path: str, hdfs_path: str = None, global_step: int = 0, max_ckpt_to_keep=None):
# record the previous global step
self.previous_global_step = global_step
if not self.checkpoint_config.async_save:
self.ensure_checkpoint_capacity(max_ckpt_to_keep)
local_path = local_mkdir_safe(local_path)
dist_checkpoint_path = get_dist_checkpoint_path(local_path)
# Note that model weights, optimizer states, and extra states are generated
# together in a state dict, we save them in one time
if self.use_dist_checkpointing:
# Generate state dict for saving
sharded_sd_metadata = self._build_sharded_state_dict_metadata()
state_dict = self.generate_state_dict(
self.should_save_model,
self.should_save_optimizer,
self.should_save_extra,
metadata=sharded_sd_metadata,
)
log_with_rank(f"Generated state dict for saving: {state_dict.keys()}", rank=self.rank, logger=logger)
for vpp_rank, model in enumerate(self.model):
if len(self.model) > 1:
model_i_keys = state_dict[f"model{vpp_rank}"].keys()
log_with_rank(f"Generated state dict for saving: {model_i_keys}", rank=self.rank, logger=logger)
else:
log_with_rank(
f"Generated state dict for saving: {state_dict['model'].keys()}", rank=self.rank, logger=logger
)
# Start Async save if enabled
async_save_request = save_dist_checkpointing(
sharded_state_dict=state_dict,
ckpt_path=dist_checkpoint_path,
async_save=self.checkpoint_config.async_save,
content_metadata=sharded_sd_metadata,
)
# Synchronize all async save requests
if not self.checkpoint_config.async_save:
assert async_save_request is None, "Async save request should be None when not using async save."
torch.distributed.barrier()
else:
assert self.use_hf_checkpoint, "When not using distributed checkpointing, use_hf_checkpoint should be True."
# Generate optimizer and exra state dicts
sharded_sd_metadata = self._build_sharded_state_dict_metadata()
state_dict = self.generate_state_dict(
generate_model=False,
generate_optimizer=self.should_save_optimizer,
generate_extra=self.should_save_extra,
metadata=sharded_sd_metadata,
)
# Save optimizer and extra states to local path
# Start Async save if enabled
async_save_request = save_dist_checkpointing(
sharded_state_dict=state_dict,
ckpt_path=dist_checkpoint_path,
async_save=self.checkpoint_config.async_save,
content_metadata=sharded_sd_metadata,
)
# Synchronize all async save requests
if not self.checkpoint_config.async_save:
assert async_save_request is None, "Async save request should be None when not using async save."
torch.distributed.barrier()
if self.should_save_model:
# Save adapter-only checkpoint if PEFT is enabled
if self.peft_cls is not None:
from verl.utils.megatron_peft_utils import save_adapter_checkpoint
adapter_ckpt_path = os.path.join(local_path, "adapter_checkpoint")
# Save adapter weights only (much smaller than full model)
save_adapter_checkpoint(
self.model,
adapter_ckpt_path,
self.rank,
)
log_with_rank(
f"Saved adapter-only checkpoint to {adapter_ckpt_path}",
rank=self.rank,
logger=logger,
log_only_rank_0=True,
)
elif self.use_hf_checkpoint:
# Use mbridge to save HF model checkpoint
log_with_rank(f"Saving HF model checkpoint to {local_path} with bridge", rank=self.rank, logger=logger)
hf_ckpt_path = get_hf_model_checkpoint_path(local_path)
if self.vanilla_bridge:
extended_args = {}
mbridge_config = getattr(self.checkpoint_config, "mbridge_config", None) or {}
for sig in inspect.signature(self.bridge.save_weights).parameters:
if sig == "weights_path" or sig == "models":
continue
if sig in mbridge_config:
extended_args[sig] = mbridge_config[sig]
self.bridge.save_weights(self.model, hf_ckpt_path, **extended_args)
else:
self.bridge.save_hf_weights(self.model, hf_ckpt_path)
log_with_rank(f"Saved bridge checkpoint to {hf_ckpt_path}", rank=self.rank, logger=logger)
# Only rank 0 saves the hf config and tokenizer to huggingface path
# No matter whether we save hf model or not
if self.rank == 0:
# Save tokenizer
hf_config_tokenizer_path = get_hf_model_checkpoint_path(local_path)
if self.processing_class is not None:
self.processing_class.save_pretrained(hf_config_tokenizer_path)
# Save huggingface config
self.hf_config.save_pretrained(hf_config_tokenizer_path)
if hasattr(self.hf_config, "name_or_path") and self.hf_config.name_or_path:
try:
generation_config = GenerationConfig.from_pretrained(self.hf_config.name_or_path)
generation_config.save_pretrained(hf_config_tokenizer_path)
except Exception:
# if the generation config isn't available, we don't save it
pass
log_with_rank(
f"Saved Huggingface config and tokenizer to {hf_config_tokenizer_path}",
rank=self.rank,
logger=logger,
log_only_rank_0=True,
)
if self.should_save_extra:
if self.rank == 0:
# Save transformer config
print(self.transformer_config)
bypass_keys = [
"finalize_model_grads_func",
"grad_scale_func",
"no_sync_func",
"grad_sync_func",
"param_sync_func",
"generation_config",
"_pg_collection",
]
backup = {}
for k in bypass_keys:
if hasattr(self.transformer_config, k):
backup[k] = getattr(self.transformer_config, k, None)
delattr(self.transformer_config, k)
transformer_config_dict = asdict(self.transformer_config)
for k in backup:
setattr(self.transformer_config, k, backup[k])
to_convert_types = {torch.dtype: str, AttnBackend: str}
ignore_types = [Callable]
pop_keys = []
for key, value in transformer_config_dict.items():
if type(value) in to_convert_types:
transformer_config_dict[key] = to_convert_types[type(value)](value)
if type(value) in ignore_types:
pop_keys.append(key)
if callable(value):
pop_keys.append(key)
for key in pop_keys:
transformer_config_dict.pop(key)
transformer_config_path = get_transformer_config_checkpoint_path(local_path)
with open(transformer_config_path, "w") as f:
json.dump(transformer_config_dict, f, indent=2)
if self.should_save_hf_model and not self.use_hf_checkpoint:
# wait for everyone to dump to local
if self.bridge is not None:
hf_model_ckpt_path = get_hf_model_checkpoint_path(local_path)
if self.vanilla_bridge:
extended_args = {}
mbridge_config = getattr(self.checkpoint_config, "mbridge_config", None) or {}
for sig in inspect.signature(self.bridge.save_weights).parameters:
if sig == "weights_path" or sig == "models":
continue
if sig in mbridge_config:
extended_args[sig] = mbridge_config[sig]
self.bridge.save_weights(self.model, hf_model_ckpt_path, **extended_args)
else:
self.bridge.save_hf_weights(self.model, hf_model_ckpt_path)
else:
state_dict = self.weight_saver(
self.model,
self.hf_config,
dtype=self.param_dtype,
is_value_model=self.is_value_model,
tie_word_embeddings=self.share_embeddings_and_output_weights,
)
torch.distributed.barrier()
if self.rank == 0:
hf_model_ckpt_path = get_hf_model_checkpoint_path(local_path)
import warnings
from accelerate import init_empty_weights
with init_empty_weights(), warnings.catch_warnings():
warnings.simplefilter("ignore")
if "mistral7b-rm" in self.config.model.path:
from transformers import MistralForSequenceClassification
model = MistralForSequenceClassification.from_pretrained(
self.config.model.path
) # use score head instead of lm_head
state_dict["score.weight"] = state_dict["score.weight"]
else:
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(self.config.model.path, torch_dtype="auto")
model.save_pretrained(hf_model_ckpt_path, state_dict=state_dict)
log_with_rank(
f"Saved Huggingface config and tokenizer to {hf_model_ckpt_path}",
rank=self.rank,
logger=logger,
log_only_rank_0=True,
)
if hdfs_path is not None:
log_with_rank(
f"Uploading checkpoint to {hdfs_path}", rank=self.rank, logger=logger, log_only_rank_0=True
)
from verl.utils import hdfs_io
hdfs_io.makedirs(hdfs_path, exist_ok=True)
hdfs_io.copy(src=hf_model_ckpt_path, dst=hdfs_path, dirs_exist_ok=True)
log_with_rank(
f"HDFS checkpoint uploaded to {hdfs_path}",
rank=self.rank,
logger=logger,
log_only_rank_0=True,
)
def finalize_save_fn():
# Rank 0 uploads checkpoint to HDFS if hdfs_path is provided
log_with_rank(
f"Dist checkpointing save completed for {dist_checkpoint_path}", rank=self.rank, logger=logger
)
if self.rank == 0:
if hdfs_path is not None:
log_with_rank(f"Uploading checkpoint to {hdfs_path}", rank=self.rank, logger=logger)
from verl.utils import hdfs_io
hdfs_io.makedirs(hdfs_path, exist_ok=True)
hdfs_io.copy(src=dist_checkpoint_path, dst=hdfs_path, dirs_exist_ok=True)
hdfs_io.copy(src=hf_config_tokenizer_path, dst=hdfs_path, dirs_exist_ok=True)
# update latest_checkpointed_iteration.txt when async_save is True
if self.checkpoint_config.async_save and self.rank == 0:
log_with_rank(
f"Update latest_checkpointed_iteration.txt to step {global_step}",
rank=self.rank,
logger=logger,
)
local_latest_checkpointed_iteration = os.path.join(
os.path.dirname(os.path.dirname(local_path)), "latest_checkpointed_iteration.txt"
)
with open(local_latest_checkpointed_iteration, "w") as f:
f.write(str(global_step))
self.register_checkpoint(local_path, max_ckpt_to_keep)
if self.checkpoint_config.async_save:
assert async_save_request is not None, "Async save request should not be None when using async save."
async_save_request.add_finalize_fn(finalize_save_fn)
from megatron.core.dist_checkpointing.strategies.base import async_calls
async_calls.schedule_async_request(async_save_request)
else:
finalize_save_fn()
|
verl__utils__dataset__dataset_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import torch
from tensordict.tensorclass import NonTensorData
class DatasetPadMode(str, Enum):
"""Padding mode for dataset"""
RIGHT = "right"
LEFT_RIGHT = "left_right"
NO_PADDING = "no_padding"
class SFTTensorCollator:
"""
A custom collate_fn that handles batching of sequences.
1. for variable-length sequences, convert them into NestedTensors.
2. for fixed-length sequences, use default_collate.
"""
def __init__(self, pad_mode: DatasetPadMode = DatasetPadMode.LEFT_RIGHT):
self.pad_mode = pad_mode
def __call__(self, batch: list[dict[str, any]]) -> dict[str, any]:
if self.pad_mode == DatasetPadMode.NO_PADDING:
return self.collate_variable_batch(batch)
elif self.pad_mode in [DatasetPadMode.RIGHT, DatasetPadMode.LEFT_RIGHT]:
from torch.utils.data import default_collate
return default_collate(batch)
else:
raise NotImplementedError(f"pad_mode {self.pad_mode} not implemented")
def collate_variable_batch(self, batch: list[dict[str, any]]) -> dict[str, any]:
"""
Collates a list of samples into a single batch.
Args:
batch: A list of dictionary samples from the dataset.
Returns:
A dictionary representing the batched data, with variable-length
sequences converted to NestedTensors.
"""
final_batch = {}
tensor_keys = set().union(*(d.keys() for d in batch))
# Handle tensor values by creating a NestedTensor.
for key in tensor_keys:
if isinstance(batch[0][key], torch.Tensor):
tensors = [item[key] for item in batch]
final_batch[key] = torch.nested.as_nested_tensor(tensors, layout=torch.jagged)
else:
tensors = [NonTensorData(item.get(key)) for item in batch]
final_batch[key] = torch.stack(tensors, dim=0)
return final_batch
|
verl__utils__dataset__multiturn_sft_dataset.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2025 ModelBest Inc. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Multi-turn SFT dataset that supports training on conversation data with multiple turns
"""
import logging
import os
import re
from functools import wraps
from typing import Any, Optional
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from omegaconf import DictConfig, ListConfig
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer, ProcessorMixin
from verl.models.transformers.qwen2_vl import get_rope_index
from verl.utils import hf_tokenizer
from verl.utils.chat_template import extract_system_prompt_and_generation
from verl.utils.dataset.dataset_utils import DatasetPadMode
from verl.utils.dataset.vision_utils import process_image, process_video
from verl.utils.fs import copy_local_path_from_hdfs
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def once(func):
"""Decorator to ensure a function runs only once. Subsequent calls do nothing."""
@wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(wrapper, "called"):
wrapper.called = True
return func(*args, **kwargs)
return wrapper
@once
def print_assembled_message(tokenizer, message_list, input_ids, loss_mask, attn_mask, tools):
"""
Print the message after applying the chat template
"""
tokenized = tokenizer.apply_chat_template(message_list, add_generation_prompt=False, tokenize=False, tools=tools)
sep = "\n\n"
str = f"tokenized entire message:\n{tokenized}"
str += sep
str += f"tokenized seperately :\n{tokenizer.decode(input_ids)}"
logger.debug(str)
def convert_nested_value_to_list_recursive(data_item):
if isinstance(data_item, dict):
return {k: convert_nested_value_to_list_recursive(v) for k, v in data_item.items()}
elif isinstance(data_item, list):
return [convert_nested_value_to_list_recursive(elem) for elem in data_item]
elif isinstance(data_item, np.ndarray):
# Convert to list, then recursively process the elements of the new list
return convert_nested_value_to_list_recursive(data_item.tolist())
else:
# Base case: item is already a primitive type (int, str, float, bool, etc.)
return data_item
class MultiTurnSFTDataset(Dataset):
"""
Dataset for multi-turn conversations where each assistant response should be trained
Args:
data_files (str or list): Path(s) to Parquet file(s).
tokenizer (PreTrainedTokenizer): For the tokenization of text to token IDs.
config (DictConfig): Options like cache_dir, prompt_key, max_prompt_length, truncation, etc.
processor (ProcessorMixin, optional): Multimodal preprocessor for images/videos.
max_samples (int, optional): Limit the number of samples. Defaults to -1 (use all).
"""
def __init__(
self,
parquet_files: str | list[str],
tokenizer: PreTrainedTokenizer,
config: DictConfig,
processor: Optional[ProcessorMixin] = None,
max_samples: int = -1,
):
# Set defaults and extract parameters from config if provided
config = config or {}
self.pad_mode = config.get("pad_mode", "right")
assert self.pad_mode in ["right", "no_padding"], (
f"Expect pad_mode to be 'right' or 'no_padding'. Got {self.pad_mode}"
)
self.truncation = config.get("truncation", "error")
# for right padding
self.max_length = config.get("max_length", 1024)
# Get messages_key from the new multiturn config structure
self.messages_key = config.get("messages_key", "messages")
self.image_key = config.get("image_key", "images")
self.video_key = config.get("video_key", "videos")
self.image_patch_size = config.get(
"image_patch_size", processor.image_processor.patch_size if processor else None
)
self.tools_key = config.get("tools_key", "tools")
self.enable_thinking_key = config.get("enable_thinking_key", "enable_thinking")
self.enable_thinking_default = config.get("enable_thinking_default", None)
self.apply_chat_template_kwargs = config.get("apply_chat_template_kwargs", {})
self.shuffle = config.get("shuffle", False)
self.seed = config.get("seed")
self.max_samples = max_samples
self.ignore_input_ids_mismatch = config.get("ignore_input_ids_mismatch", False)
assert self.truncation in ["error", "left", "right"]
if not isinstance(parquet_files, list | ListConfig):
parquet_files = [parquet_files]
self.parquet_files = parquet_files
if isinstance(tokenizer, str):
tokenizer = hf_tokenizer(tokenizer)
self.tokenizer: PreTrainedTokenizer = tokenizer
self.processor = processor
self._download()
self._read_files_and_process()
def _download(self):
for i, parquet_file in enumerate(self.parquet_files):
self.parquet_files[i] = copy_local_path_from_hdfs(parquet_file, verbose=True)
def _read_files_and_process(self):
def series_to_item(ls):
import numpy
import pandas
while isinstance(ls, pandas.core.series.Series | numpy.ndarray) and len(ls) == 1:
ls = ls[0]
return ls
dataframes = []
for parquet_file in self.parquet_files:
# default loader loads some list as np.ndarray, which fails the tokenizer
dataframe = pd.read_parquet(parquet_file, dtype_backend="pyarrow")
dataframes.append(dataframe)
self.dataframe = pd.concat(dataframes)
total = len(self.dataframe)
print(f"dataset len: {len(self.dataframe)}")
if self.max_samples > 0 and self.max_samples < total:
if self.shuffle:
rngs_args = (self.seed,) if self.seed is not None else ()
rng = np.random.default_rng(*rngs_args)
indices = rng.choice(total, size=self.max_samples, replace=False)
else:
indices = np.arange(self.max_samples)
self.dataframe = self.dataframe.iloc[indices.tolist()]
print(f"selected {self.max_samples} random samples out of {total}")
# Extract messages list from dataframe
self.messages = self.dataframe[self.messages_key].apply(convert_nested_value_to_list_recursive).tolist()
# Extract tools list from dataframe
if self.tools_key in self.dataframe.columns:
self.tools = self.dataframe[self.tools_key].apply(convert_nested_value_to_list_recursive).tolist()
else:
self.tools = None
# Extract enable_thinking list from dataframe
if self.enable_thinking_key in self.dataframe.columns:
self.enable_thinking = self.dataframe[self.enable_thinking_key].tolist()
else:
self.enable_thinking = None
# system prompt: <|im_start|>system\nYou are a helpful assistant.<|im_end|>\n
# generation prompt: <|im_start|>assistant\n
self.system_prompt, self.generation_prompt = extract_system_prompt_and_generation(self.tokenizer)
def __len__(self):
return len(self.messages)
def _process_single_message(
self,
index: int,
message: dict[str, Any],
full_message: list,
tools: Optional[list[dict[str, Any]]] = None,
enable_thinking: Optional[bool] = None,
) -> tuple[list[int], list[int], list[int]]:
"""
Process a single message and return its tokenized representation.
Args:
index: turn index in the conversation
message: A single message dictionary
images: List of images to be used
videos: List of videos to be used
tools: List of tools to be used
enable_thinking: Whether to enable thinking mode
Returns:
Tuple of (input_ids, loss_mask, attention_mask, dict[str, torch.Tensor])
"""
processor = self.processor if self.processor is not None else self.tokenizer
apply_chat_template_kwargs = {**self.apply_chat_template_kwargs}
if enable_thinking is not None:
apply_chat_template_kwargs["enable_thinking"] = enable_thinking
inputs = processor.apply_chat_template(
[message],
tools=tools,
add_generation_prompt=False,
tokenize=True,
return_dict=True,
return_tensors="pt",
**apply_chat_template_kwargs,
)
inputs = dict(inputs)
input_ids = inputs.pop("input_ids")[0]
attention_mask = inputs.pop("attention_mask")[0]
# remove system prompt if exists
if index != 0 and message["role"] != "system":
input_ids = input_ids[len(self.system_prompt) :]
attention_mask = attention_mask[len(self.system_prompt) :]
if message["role"] == "assistant":
loss_mask = torch.ones_like(attention_mask)
# mask out generation prompt if assistant message
loss_mask[: len(self.generation_prompt)] = 0
else:
loss_mask = torch.zeros_like(attention_mask)
return input_ids, loss_mask, attention_mask, inputs
def _build_messages(self, example: dict):
"""Replace <image> and <video> placeholder in messages with corresponding image and video
which is required by processor.apply_chat_template.
- <image>: {"type": "image", "image": image}
- <video>: {"type": "video", "video": video}
Args:
example: Row dictionary from dataframe.
Returns:
messages: List of messages with replaced placeholder.
"""
messages: list = example[self.messages_key]
images = example[self.image_key] if self.image_key in example else []
videos = example[self.video_key] if self.video_key in example else []
image_offset, video_offset = 0, 0
for message in messages:
if self.image_key not in example and self.video_key not in example:
continue
assert self.processor is not None, "processor is needed to process image and video"
content = message["content"]
if not isinstance(content, str):
continue
content_list = []
segments = re.split("(<image>|<video>)", content)
segments = [item for item in segments if item != ""]
for segment in segments:
if segment == "<image>":
image = process_image(images[image_offset], image_patch_size=self.image_patch_size)
content_list.append({"type": "image", "image": image})
image_offset += 1
elif segment == "<video>":
video = process_video(videos[video_offset], image_patch_size=self.image_patch_size)
content_list.append({"type": "video", "video": video})
video_offset += 1
else:
content_list.append({"type": "text", "text": segment})
message["content"] = content_list
assert image_offset == len(images), f"image_offset {image_offset} != len(images) {len(images)}"
assert video_offset == len(videos), f"video_offset {video_offset} != len(videos) {len(videos)}"
return messages
def __getitem__(self, item):
row_dict: dict = self.dataframe.iloc[item].to_dict()
messages = self._build_messages(row_dict)
tools = self.tools[item] if self.tools is not None else None
enable_thinking = (
self.enable_thinking[item] if self.enable_thinking is not None else self.enable_thinking_default
)
# 1. tokenize each message
input_ids, loss_mask, attention_mask, multi_modal_inputs = [], [], [], {}
for i, message in enumerate(messages):
_input_ids, _loss_mask, _attention_mask, _inputs = self._process_single_message(
index=i,
message=message,
full_message=messages,
tools=tools if i == 0 else None,
enable_thinking=enable_thinking,
)
input_ids.append(_input_ids)
loss_mask.append(_loss_mask)
attention_mask.append(_attention_mask)
for k, v in _inputs.items():
multi_modal_inputs.setdefault(k, []).append(v)
input_ids = torch.cat(input_ids, dim=0)
loss_mask = torch.cat(loss_mask, dim=0)
attention_mask = torch.cat(attention_mask, dim=0)
assert input_ids.shape == loss_mask.shape == attention_mask.shape, (
f"Shape mismatch: {input_ids.shape}, {loss_mask.shape}, {attention_mask.shape}"
)
print_assembled_message(self.tokenizer, messages, input_ids, loss_mask, attention_mask, tools)
self.sanity_check(input_ids, messages, tools, enable_thinking)
# Since the tokenizer may return user-customized results, we need to filter out inconsistent tensor shapes
keys_to_remove = []
for k, v in multi_modal_inputs.items():
if len(v) > 0 and v[0] is not None and isinstance(v[0], torch.Tensor):
# Check if all tensors in the list have the same shape
first_shape = v[0].shape[1:]
if not all(tensor.shape[1:] == first_shape for tensor in v):
keys_to_remove.append(k)
for k in keys_to_remove:
del multi_modal_inputs[k]
for k, v in multi_modal_inputs.items():
multi_modal_inputs[k] = torch.concat(v, dim=0)
# 2. handle position_ids for Qwen-VL series models
if self.processor is not None and "Qwen2VLImageProcessor" in self.processor.image_processor.__class__.__name__:
image_grid_thw = multi_modal_inputs.get("image_grid_thw", None)
video_grid_thw = multi_modal_inputs.get("video_grid_thw", None)
second_per_grid_ts = multi_modal_inputs.get("second_per_grid_ts", None)
vision_position_ids = get_rope_index(
self.processor,
input_ids=input_ids,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
second_per_grid_ts=second_per_grid_ts,
attention_mask=attention_mask,
) # (3, seq_len)
text_position_ids = torch.arange(input_ids.shape[0], dtype=torch.long).unsqueeze(0) # (1, seq_len)
position_ids = torch.cat((text_position_ids, vision_position_ids), dim=0) # (4, seq_length)
else:
position_ids = torch.arange(input_ids.shape[0], dtype=torch.long) # (seq_len,)
# 3. handle padding
sequence_length = input_ids.shape[0]
# Handle sequence length
if self.pad_mode == DatasetPadMode.RIGHT:
if sequence_length < self.max_length:
# Pad sequences
pad_token_id = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else 0
padded_input_ids = torch.full((self.max_length - sequence_length,), pad_token_id, dtype=input_ids.dtype)
padded_attention_mask = torch.zeros((self.max_length - sequence_length,), dtype=attention_mask.dtype)
padded_loss_mask = torch.zeros((self.max_length - sequence_length,), dtype=loss_mask.dtype)
input_ids = torch.cat((input_ids, padded_input_ids))
attention_mask = torch.cat((attention_mask, padded_attention_mask))
loss_mask = torch.cat((loss_mask, padded_loss_mask))
position_ids = F.pad(position_ids, (0, self.max_length - sequence_length), value=0)
elif sequence_length > self.max_length:
if self.truncation == "left":
input_ids = input_ids[-self.max_length :]
attention_mask = attention_mask[-self.max_length :]
loss_mask = loss_mask[-self.max_length :]
position_ids = position_ids[..., -self.max_length :]
elif self.truncation == "right":
input_ids = input_ids[: self.max_length]
attention_mask = attention_mask[: self.max_length]
loss_mask = loss_mask[: self.max_length]
position_ids = position_ids[..., : self.max_length]
elif self.truncation == "error":
raise ValueError(f"{sequence_length=} is larger than {self.max_length=}")
else:
raise ValueError(f"Unknown truncation method {self.truncation}")
res = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"position_ids": position_ids,
"loss_mask": loss_mask,
}
if len(multi_modal_inputs) > 0:
res["multi_modal_inputs"] = multi_modal_inputs
return res
elif self.pad_mode == DatasetPadMode.NO_PADDING:
# truncate input_ids if it is longer than max_length
if len(input_ids) > self.max_length:
input_ids = input_ids[: self.max_length]
loss_mask = loss_mask[: self.max_length]
position_ids = position_ids[..., : self.max_length]
# return nested tensor with out padding
res = {
"input_ids": input_ids,
"position_ids": position_ids,
"loss_mask": loss_mask,
}
if len(multi_modal_inputs) > 0:
res["multi_modal_inputs"] = multi_modal_inputs
return res
else:
raise ValueError(f"Unknown pad mode {self.pad_mode}")
def sanity_check(self, input_ids: torch.Tensor, messages: list[dict], tools: list[dict], enable_thinking: bool):
"""Check concatenated input_ids of apply_chat_template to each turn equals
apply_chat_template to whole messages.
"""
processor = self.processor if self.processor is not None else self.tokenizer
apply_chat_template_kwargs = {**self.apply_chat_template_kwargs}
if enable_thinking is not None:
apply_chat_template_kwargs["enable_thinking"] = enable_thinking
inputs = processor.apply_chat_template(
messages,
tools=tools,
add_generation_prompt=False,
tokenize=True,
return_dict=True,
return_tensors="pt",
**apply_chat_template_kwargs,
)
error_message = (
"MultiTurnSFTDataset apply_chat_template to each turn separately and concat `input_ids` "
"as a whole sequence, which may not equal to apply_chat_template to whole messages at once.\n"
"For example, Qwen Thinking series models add <think></think> tags to last turn, please check "
"your tokenizer chat template settings.\n"
"Set `ignore_input_ids_mismatch=True` to ignore input_ids mismatch and use the concatenated "
"input_ids as the final input_ids. "
)
if not torch.equal(input_ids, inputs["input_ids"].squeeze(0)):
if self.ignore_input_ids_mismatch:
logger.warning_once(error_message)
else:
raise AssertionError(error_message)
|
verl__utils__dataset__rl_dataset.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import os
import re
import traceback
from collections import defaultdict
from io import BytesIO
from typing import Optional
import datasets
import numpy as np
import torch
from omegaconf import DictConfig, ListConfig
from PIL import Image
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer, ProcessorMixin
from verl.utils.import_utils import load_extern_object
logger = logging.getLogger(__name__)
def collate_fn(data_list: list[dict]) -> dict:
"""
Collate a batch of sample dicts into batched tensors and arrays.
Args:
data_list: List of dicts mapping feature names to torch.Tensor or other values.
Returns:
Dict where tensor entries are stacked into a torch.Tensor of shape
(batch_size, \\*dims) and non-tensor entries are converted to
np.ndarray of dtype object with shape (batch_size,).
"""
tensors = defaultdict(list)
non_tensors = defaultdict(list)
for data in data_list:
for key, val in data.items():
if isinstance(val, torch.Tensor):
tensors[key].append(val)
else:
non_tensors[key].append(val)
for key, val in tensors.items():
tensors[key] = torch.stack(val, dim=0)
for key, val in non_tensors.items():
non_tensors[key] = np.fromiter(val, dtype=object, count=len(val))
return {**tensors, **non_tensors}
class RLHFDataset(Dataset):
"""
Load and preprocess RLHF data from Parquet files.
- Caches files locally.
- Reads into a HuggingFace Dataset and tokenizes prompts.
- Optionally handles images/videos via a ProcessorMixin.
- Filters prompts over a max length.
- Supports resuming from checkpoints.
Args:
data_files (str or list): Path(s) to Parquet file(s).
tokenizer (PreTrainedTokenizer): For the tokenization of text to token IDs.
config (DictConfig): Options like cache_dir, prompt_key, max_prompt_length, truncation, etc.
processor (ProcessorMixin, optional): Multimodal preprocessor for images/videos.
"""
def __init__(
self,
data_files: str | list[str],
tokenizer: PreTrainedTokenizer,
config: DictConfig,
processor: Optional[ProcessorMixin] = None,
max_samples: int = -1,
):
if not isinstance(data_files, list | ListConfig):
data_files = [data_files]
self.data_files = copy.deepcopy(data_files)
self.original_data_files = copy.deepcopy(data_files) # use for resume
self.tokenizer = tokenizer
self.processor = processor
self.max_samples = max_samples
self.config = config
self.cache_dir = os.path.expanduser(config.get("cache_dir", "~/.cache/verl/rlhf"))
self.prompt_key = config.get("prompt_key", "prompt")
self.image_key = config.get("image_key", "images")
self.video_key = config.get("video_key", "videos")
self.image_patch_size = config.get("image_patch_size", 14)
self.max_prompt_length = config.get("max_prompt_length", 1024)
self.return_raw_chat = config.get("return_raw_chat", False)
self.return_full_prompt = config.get("return_full_prompt", False)
self.truncation = config.get("truncation", "error")
self.filter_overlong_prompts = config.get("filter_overlong_prompts", True)
self.apply_chat_template_kwargs = config.get("apply_chat_template_kwargs", {})
self.tool_config_path = config.get("tool_config_path", None)
self.tool_schemas = None
if self.tool_config_path:
try:
from verl.tools.utils.tool_registry import initialize_tools_from_config
tool_list = initialize_tools_from_config(self.tool_config_path)
# match ToolAgentLoop behaviour: model_dump to plain dicts
self.tool_schemas = [
tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list
]
except Exception as e:
logger.warning("Failed to initialize tools from %s: %s", self.tool_config_path, e)
self.tool_schemas = None
self.num_workers = config.get("filter_overlong_prompts_workers", max(1, os.cpu_count() // 4))
self.num_workers = min(self.num_workers, os.cpu_count()) if self.num_workers is not None else None
self.use_shm = config.get("use_shm", False)
self.chat_template_func = config.get("chat_template_func", None)
self.need_tools_kwargs = config.get("need_tools_kwargs", False)
self.filter_prompts = config.get("filter_prompts", True)
self.serialize_dataset = False
self.return_multi_modal_inputs = config.get("return_multi_modal_inputs", True)
self.shuffle = config.get("shuffle", False)
self.seed = config.get("seed")
self._download()
self._read_files_and_tokenize()
def _download(self, use_origin_parquet=False):
from verl.utils.fs import copy_to_local
data_files = self.data_files if not use_origin_parquet else self.original_data_files
for i, parquet_file in enumerate(data_files):
self.data_files[i] = copy_to_local(src=parquet_file, cache_dir=self.cache_dir, use_shm=self.use_shm)
def _read_files_and_tokenize(self):
dataframes = []
for parquet_file in self.data_files:
# read files and cache
if parquet_file.endswith(".parquet"):
dataframe = datasets.load_dataset("parquet", data_files=parquet_file)["train"]
elif parquet_file.endswith(".json"):
dataframe = datasets.load_dataset("json", data_files=parquet_file)["train"]
else:
raise ValueError(f"Unsupported file format: {parquet_file}")
dataframes.append(dataframe)
self.dataframe: datasets.Dataset = datasets.concatenate_datasets(dataframes)
total = len(self.dataframe)
print(f"dataset len: {len(self.dataframe)}")
if self.max_samples > 0 and self.max_samples < total:
if self.shuffle:
rngs_args = (self.seed,) if self.seed is not None else ()
rng = np.random.default_rng(*rngs_args)
indices = rng.choice(total, size=self.max_samples, replace=False)
else:
indices = np.arange(self.max_samples)
self.dataframe = self.dataframe.select(indices.tolist())
print(f"selected {self.max_samples} random samples out of {total}")
self.dataframe = self.maybe_filter_out_long_prompts(self.dataframe)
def maybe_filter_out_long_prompts(self, dataframe: datasets.Dataset = None):
# filter out too long prompts
if self.filter_overlong_prompts:
tokenizer = self.tokenizer
processor = self.processor
prompt_key = self.prompt_key
image_key = self.image_key
video_key = self.video_key
if processor is not None:
from verl.utils.dataset.vision_utils import process_image, process_video
def doc2len(doc) -> int:
try:
messages = self._build_messages(doc)
# pass tool schemas if available so the processor can format prompts
apply_kwargs = dict(**self.apply_chat_template_kwargs)
if self.tool_schemas is not None:
apply_kwargs["tools"] = self.tool_schemas
raw_prompt = self.processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=False, **apply_kwargs
)
if image_key in doc and doc[image_key]:
images = [
process_image(image, image_patch_size=self.image_patch_size) for image in doc[image_key]
]
else:
images = None
if video_key in doc and doc[video_key]:
videos, video_metadata = zip(
*[
process_video(
video, image_patch_size=self.image_patch_size, return_video_metadata=True
)
for video in doc[video_key]
],
strict=True,
)
videos = list(videos)
video_metadata = list(video_metadata)
videos_kwargs = {"video_metadata": video_metadata, "do_sample_frames": False}
else:
videos = None
videos_kwargs = {}
return len(
processor(text=[raw_prompt], images=images, videos=videos, videos_kwargs=videos_kwargs)[
"input_ids"
][0]
)
except Exception:
print("Error processing one of the samples, skipping...")
traceback.print_exc()
return self.max_prompt_length + 1
else:
def doc2len(doc) -> int:
try:
apply_kwargs = dict(**self.apply_chat_template_kwargs)
if self.tool_schemas is not None:
apply_kwargs["tools"] = self.tool_schemas
return len(
tokenizer.apply_chat_template(doc[prompt_key], add_generation_prompt=True, **apply_kwargs)
)
except Exception:
print("Error processing one of the samples, skipping...")
traceback.print_exc()
return self.max_prompt_length + 1
dataframe = dataframe.filter(
lambda doc: doc2len(doc) <= self.max_prompt_length,
num_proc=self.num_workers,
desc=f"Filtering prompts longer than {self.max_prompt_length} tokens",
)
print(f"filter dataset len: {len(dataframe)}")
return dataframe
def resume_dataset_state(self):
self.serialize_dataset = not hasattr(self, "original_data_files")
# resume dataframe if not it's serialized in data.pt
if not self.serialize_dataset:
self._download(use_origin_parquet=True) # download and resume from original parquet files
self._read_files_and_tokenize()
else:
print(r"old dataloader ckpt file is used, please train from scratch for better ckpt performance")
def __getstate__(self):
if not self.serialize_dataset:
state = self.__dict__.copy()
if "dataframe" in state:
del state["dataframe"]
return state
return self.__dict__.copy()
def __len__(self):
return len(self.dataframe)
def _build_messages(self, example: dict):
"""Replace <image> and <video> placeholder in messages with corresponding image and video
which is required by processor.apply_chat_template.
- <image>: {"type": "image", **image}
- <video>: {"type": "video", **video}
Args:
example: Row dictionary from dataframe.
Returns:
messages: List of messages with replaced placeholder.
"""
messages: list = example[self.prompt_key]
# When concatenating image and video datasets, pop will return None for image or video sample
images = example.pop(self.image_key, None) or []
videos = example.pop(self.video_key, None) or []
image_offset, video_offset = 0, 0
for message in messages:
if not images and not videos:
continue
assert self.processor is not None, "processor is needed to process image and video"
content = message["content"]
if not isinstance(content, str):
continue
content_list = []
segments = re.split("(<image>|<video>)", content)
segments = [item for item in segments if item != ""]
for segment in segments:
if segment == "<image>":
assert image_offset < len(images), f"image_offset {image_offset} >= len(images) {len(images)}"
image = images[image_offset]
if isinstance(image, Image.Image):
image = image.convert("RGB")
content_list.append({"type": "image", "image": image})
elif isinstance(image, dict):
if "bytes" in image:
image["image"] = Image.open(BytesIO(image["bytes"]))
content_list.append({"type": "image", **image})
else:
raise TypeError(f"image must be dict or PIL.Image, unsupported image type: {type(image)}")
image_offset += 1
elif segment == "<video>":
assert video_offset < len(videos), f"video_offset {video_offset} >= len(videos) {len(videos)}"
content_list.append({"type": "video", **videos[video_offset]})
video_offset += 1
else:
content_list.append({"type": "text", "text": segment})
message["content"] = content_list
assert image_offset == len(images), f"image_offset {image_offset} != len(images) {len(images)}"
assert video_offset == len(videos), f"video_offset {video_offset} != len(videos) {len(videos)}"
return messages
def __getitem__(self, item):
"""For rollout, apply_chat_template has been moved to AgentLoop, so we only return raw_prompt here."""
row_dict: dict = self.dataframe[item]
row_dict["raw_prompt"] = self._build_messages(row_dict)
# TODO(wuxibin): We still need a dummy tensor to make sure DataProto.batch is not empty.
# Remove this after deprecate DataProto by TensorDict.
row_dict["dummy_tensor"] = torch.tensor([0], dtype=torch.uint8)
# add index for each prompt
if "extra_info" not in row_dict or row_dict["extra_info"] is None:
row_dict["extra_info"] = dict()
index = row_dict.get("extra_info", {}).get("index", 0)
tools_kwargs = row_dict.get("extra_info", {}).get("tools_kwargs", {})
interaction_kwargs = row_dict.get("extra_info", {}).get("interaction_kwargs", {})
need_tools_kwargs = row_dict.get("extra_info", {}).get("need_tools_kwargs", self.need_tools_kwargs)
if need_tools_kwargs and not tools_kwargs:
logger.warning("tools_kwargs is empty for index {}, data source: {}", index, row_dict["data_source"])
row_dict["index"] = index
row_dict["tools_kwargs"] = tools_kwargs
row_dict["interaction_kwargs"] = interaction_kwargs
return row_dict
@classmethod
async def process_vision_info(
cls,
messages: list[dict],
image_patch_size,
config: DictConfig,
) -> tuple[list[Image.Image], list[tuple[torch.Tensor, dict]]]:
"""Extract images and videos from messages.
This method is called by AgentLoop (e.g SingleTurnAgentLoop) before apply_chat_template to
the `raw_prompt` from dataset. User may customize RLHFDataset and override this method to
support custom vision extraction.
>>> messages = kwargs["raw_prompt"]
>>> images, videos = RLHFDataset.process_vision_info(messages, image_patch_size)
>>> videos, video_metadatas = zip(*videos)
>>> raw_prompt = processor.apply_chat_template(messages, tokenize=False)
>>> inputs = processor(text=[raw_prompt], images=images, videos=videos,
... video_metadata=video_metadatas, do_sample_frames=False)
Args:
messages: List of messages from dataset `raw_prompt`.
image_patch_size: Image patch size for processor.
config: Config for dataset.
Returns:
images: List of images.
videos: List of videos, each video is a tuple of (video_tensor, video_metadata).
"""
from qwen_vl_utils import process_vision_info
images, videos = process_vision_info(messages, image_patch_size=image_patch_size, return_video_metadata=True)
return images, videos
def split(self, num_splits: int):
"""
split the dataset into num_splits sub-datasets
Args:
num_splits: specified number of splits
Returns:
List[RLHFDataset]: list of RLHFDataset splits
Raises:
ValueError: if num_splits is not a positive integer
"""
if not isinstance(num_splits, int) or num_splits <= 0:
raise ValueError(f"num_splits must be a positive integer, got {num_splits}")
if not hasattr(self, "dataframe"):
raise AttributeError(
"dataframe not found in RLHFDataset\n"
"reason: _read_files_and_tokenize() not called or Parquet file loading failed"
)
if self.dataframe is None:
raise ValueError("RLHFDataset dataframe 为 None!")
total_samples = len(self.dataframe)
print(f"total_samples: {total_samples}")
if total_samples == 0:
raise ValueError("Cannot split an empty dataset")
if total_samples % num_splits != 0:
raise ValueError(f"Cannot split dataset size {total_samples} into {num_splits} splits")
split_size = total_samples // num_splits
splits = []
for i in range(num_splits):
start_idx = i * split_size
end_idx = (i + 1) * split_size if i < num_splits - 1 else total_samples
split_dataframe = self.dataframe.select(range(start_idx, end_idx))
split_dataset = RLHFDataset(
data_files=self.data_files,
tokenizer=self.tokenizer,
config=self.config,
processor=self.processor,
max_samples=self.max_samples,
)
split_dataset.dataframe = split_dataframe
split_dataset.serialize_dataset = self.serialize_dataset
split_dataset.original_data_files = self.original_data_files
splits.append(split_dataset)
return splits
def get_dataset_class(data_config: DictConfig):
"""Get RLHF dataset class.
Args:
data_config: The data config.
Returns:
dataset_cls: The dataset class.
"""
# Check if a custom dataset class is specified in the data configuration
# and if the path to the custom class is provided
if "custom_cls" in data_config and data_config.custom_cls.get("path", None) is not None:
# Dynamically load the custom dataset class
dataset_cls = load_extern_object(data_config.custom_cls.path, data_config.custom_cls.name)
# Verify that the custom dataset class inherits from torch.utils.data.Dataset
if not issubclass(dataset_cls, Dataset):
raise TypeError(
f"The custom dataset class '{data_config.custom_cls.name}' from "
f"'{data_config.custom_cls.path}' must inherit from torch.utils.data.Dataset"
)
else:
# Use the default RLHFDataset class if no custom class is specified
dataset_cls = RLHFDataset
print(f"Using dataset class: {dataset_cls.__name__}")
return dataset_cls
|
verl__utils__dataset__rm_dataset.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from verl.utils import hf_tokenizer
def download_files_distributed(download_fn):
import torch.distributed
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
# download files
download_fn()
torch.distributed.barrier()
else:
# download anyway
download_fn()
class RMDataset(Dataset):
def __init__(
self,
parquet_files: str | list[str],
tokenizer,
prompt_key="prompt",
chosen_key="chosen",
rejected_key="rejected",
max_length=1024,
add_eos=True,
cache_dir="~/.cache/verl/rm",
max_samples: int = -1,
shuffle: bool = False,
seed: Optional[int] = None,
):
if not isinstance(parquet_files, list):
parquet_files = [parquet_files]
self.parquet_files = parquet_files
self.max_samples = max_samples
self.shuffle = shuffle
self.seed = seed
self.cache_dir = os.path.expanduser(cache_dir)
if isinstance(tokenizer, str):
tokenizer = hf_tokenizer(tokenizer)
self.tokenizer = tokenizer
self.prompt_key = prompt_key
self.chosen_key = chosen_key
self.rejected_key = rejected_key
self.add_eos = add_eos
self.max_length = max_length
self._download()
self._read_files_and_tokenize()
def _download(self):
def _download_files():
from verl.utils.fs import copy, is_non_local
os.makedirs(self.cache_dir, exist_ok=True)
assert os.path.exists(self.cache_dir)
for i, parquet_file in enumerate(self.parquet_files):
if is_non_local(parquet_file):
dst = os.path.join(self.cache_dir, os.path.basename(parquet_file))
if not os.path.exists(dst):
copy(src=parquet_file, dst=dst)
self.parquet_files[i] = dst
download_files_distributed(_download_files)
def _read_files_and_tokenize(self):
dataframes = []
for parquet_file in self.parquet_files:
# read parquet files and cache
dataframe = pd.read_parquet(parquet_file)
dataframes.append(dataframe)
self.dataframe = pd.concat(dataframes)
total = len(self.dataframe)
print(f"dataset len: {len(self.dataframe)}")
if self.max_samples > 0 and self.max_samples < total:
if self.shuffle:
rngs_args = (self.seed,) if self.seed is not None else ()
rng = np.random.default_rng(*rngs_args)
indices = rng.choice(total, size=self.max_samples, replace=False)
else:
indices = np.arange(self.max_samples)
self.dataframe = self.dataframe.iloc[indices.tolist()]
print(f"selected {self.max_samples} random samples out of {total}")
self.prompts = self.dataframe[self.prompt_key].tolist()
self.chosen_responses = self.dataframe[self.chosen_key].tolist()
self.rejected_responses = self.dataframe[self.rejected_key].tolist()
def __len__(self):
return len(self.prompts)
def _pad_to_length(self, input_ids, attention_mask):
curr_length = input_ids.shape[-1]
if curr_length < self.max_length:
input_ids = torch.cat(
(input_ids, torch.zeros(size=(self.max_length - curr_length,), dtype=input_ids.dtype)), dim=-1
)
attention_mask = torch.cat(
(attention_mask, torch.zeros(size=(self.max_length - curr_length,), dtype=attention_mask.dtype)), dim=-1
)
elif curr_length > self.max_length:
input_ids = input_ids[: self.max_length]
attention_mask = attention_mask[: self.max_length]
return input_ids, attention_mask
def __getitem__(self, item):
prompt = self.prompts[item]
chosen_response = self.chosen_responses[item]
rejected_response = self.rejected_responses[item]
prompt_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"][0]
chosen_response_ids = self.tokenizer(chosen_response, return_tensors="pt")["input_ids"][0]
rejected_response_ids = self.tokenizer(rejected_response, return_tensors="pt")["input_ids"][0]
if self.add_eos:
chosen_response_ids = torch.cat((chosen_response_ids, torch.tensor([self.tokenizer.eos_token_id])), dim=-1)
rejected_response_ids = torch.cat(
(rejected_response_ids, torch.tensor([self.tokenizer.eos_token_id])), dim=-1
)
chosen_input_ids = torch.cat((prompt_ids, chosen_response_ids), dim=-1)
chosen_attention_mask = torch.ones_like(chosen_input_ids)
rejected_input_ids = torch.cat((prompt_ids, rejected_response_ids), dim=-1)
rejected_attention_mask = torch.ones_like(rejected_input_ids)
chosen_input_ids, chosen_attention_mask = self._pad_to_length(chosen_input_ids, chosen_attention_mask)
rejected_input_ids, rejected_attention_mask = self._pad_to_length(rejected_input_ids, rejected_attention_mask)
input_ids = torch.stack((chosen_input_ids, rejected_input_ids), dim=0)
attention_mask = torch.stack((chosen_attention_mask, rejected_attention_mask), dim=0)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
|
verl__utils__dataset__sft_dataset.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SFT dataset
- We assume user pass a single parquet file.
- We load all the data into the memory.
Each parquet file contains
"""
import numpy as np
import pandas as pd
import torch
from omegaconf.listconfig import ListConfig
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer
from verl.utils import hf_tokenizer
from verl.utils.fs import copy_to_local
from verl.utils.model import compute_position_id_with_mask
class SFTDataset(Dataset):
"""
This is an in-memory SFTDataset
Arguments:
config (OmegaConf): the data config
"""
def __init__(self, parquet_files: str | ListConfig, tokenizer, config, max_samples: int = -1):
prompt_key = config.get("prompt_key", "prompt")
prompt_dict_keys = config.get("prompt_dict_keys", None)
response_key = config.get("response_key", "response")
response_dict_keys = config.get("response_dict_keys", None)
max_length = config.get("max_length", 1024)
truncation = config.get("truncation", "error")
use_shm = config.get("use_shm", False)
self.shuffle = config.get("shuffle", False)
self.seed = config.get("seed")
self.apply_chat_template_kwargs = config.get("apply_chat_template_kwargs", {})
assert truncation in ["error", "left", "right"]
self.truncation = truncation
self.use_shm = use_shm
if not isinstance(parquet_files, ListConfig):
parquet_files = [parquet_files]
self.parquet_files = parquet_files
self.max_samples = max_samples
if isinstance(tokenizer, str):
tokenizer = hf_tokenizer(tokenizer)
self.tokenizer: PreTrainedTokenizer = tokenizer
self.prompt_key = prompt_key if isinstance(prompt_key, tuple | list) else [prompt_key]
self.response_key = response_key if isinstance(response_key, tuple | list) else [response_key]
self.prompt_dict_keys = prompt_dict_keys if prompt_dict_keys else []
self.response_dict_keys = response_dict_keys if response_dict_keys else []
self.max_length = max_length
self._download()
self._read_files_and_tokenize()
def _download(self):
for i, parquet_file in enumerate(self.parquet_files):
self.parquet_files[i] = copy_to_local(parquet_file, verbose=True, use_shm=self.use_shm)
def _read_files_and_tokenize(self):
def series_to_item(ls):
import numpy
import pandas
while isinstance(ls, pandas.core.series.Series | numpy.ndarray) and len(ls) == 1:
ls = ls[0]
return ls
dataframes = []
for parquet_file in self.parquet_files:
# read parquet files and cache
dataframe = pd.read_parquet(parquet_file)
dataframes.append(dataframe)
self.dataframe = pd.concat(dataframes)
total = len(self.dataframe)
print(f"dataset len: {len(self.dataframe)}")
if self.max_samples > 0 and self.max_samples < total:
if self.shuffle:
rngs_args = (self.seed,) if self.seed is not None else ()
rng = np.random.default_rng(*rngs_args)
indices = rng.choice(total, size=self.max_samples, replace=False)
else:
indices = np.arange(self.max_samples)
self.dataframe = self.dataframe.iloc[indices.tolist()]
print(f"selected {self.max_samples} random samples out of {total}")
self.prompts = self.dataframe[self.prompt_key]
for key in self.prompt_dict_keys:
# type(x): pandas.core.series.Series
# type(x[0]): numpy.ndarray
# type(x[0][0]): dict
try:
self.prompts = self.prompts.apply(lambda x: series_to_item(x)[key], axis=1) # noqa: B023
except Exception:
print(f"self.prompts={self.prompts}")
raise
if isinstance(self.prompts, pd.DataFrame):
self.prompts = self.prompts.squeeze()
self.prompts = self.prompts.tolist()
self.responses = self.dataframe[self.response_key]
for key in self.response_dict_keys:
try:
self.responses = self.responses.apply(lambda x: series_to_item(x)[key], axis=1) # noqa: B023
except Exception:
print(f"self.responses={self.responses}")
raise
if isinstance(self.responses, pd.DataFrame):
self.responses = self.responses.squeeze()
self.responses = self.responses.tolist()
def __len__(self):
return len(self.prompts)
def __getitem__(self, item):
tokenizer = self.tokenizer
prompt = self.prompts[item]
response = self.responses[item]
# apply chat template
prompt_chat = [{"role": "user", "content": prompt}]
# string
prompt_chat_str = tokenizer.apply_chat_template(
prompt_chat, add_generation_prompt=True, tokenize=False, **self.apply_chat_template_kwargs
)
response_chat_str = response + tokenizer.eos_token
# tokenize
prompt_ids_output = tokenizer(prompt_chat_str, return_tensors="pt", add_special_tokens=False)
prompt_ids = prompt_ids_output["input_ids"][0]
prompt_attention_mask = prompt_ids_output["attention_mask"][0]
response_ids_output = tokenizer(response_chat_str, return_tensors="pt", add_special_tokens=False)
response_ids = response_ids_output["input_ids"][0]
response_attention_mask = response_ids_output["attention_mask"][0]
prompt_length = prompt_ids.shape[0]
response_length = response_ids.shape[0]
input_ids = torch.cat((prompt_ids, response_ids), dim=-1)
attention_mask = torch.cat((prompt_attention_mask, response_attention_mask), dim=-1)
# padding to max length
sequence_length = input_ids.shape[0]
if sequence_length < self.max_length:
padded_input_ids = (
torch.ones(size=(self.max_length - sequence_length,), dtype=input_ids.dtype)
* self.tokenizer.pad_token_id
)
padded_attention_mask = torch.zeros(size=(self.max_length - sequence_length,), dtype=attention_mask.dtype)
input_ids = torch.cat((input_ids, padded_input_ids))
attention_mask = torch.cat((attention_mask, padded_attention_mask))
elif sequence_length > self.max_length:
if self.truncation == "left":
# actually, left truncation may not be reasonable
input_ids = input_ids[-self.max_length :]
attention_mask = attention_mask[-self.max_length :]
elif self.truncation == "right":
input_ids = input_ids[: self.max_length]
attention_mask = attention_mask[: self.max_length]
elif self.truncation == "error":
raise NotImplementedError(f"{sequence_length=} is larger than {self.max_length=}")
else:
raise NotImplementedError(f"Unknown truncation method {self.truncation}")
position_ids = compute_position_id_with_mask(attention_mask)
loss_mask = attention_mask.clone()
if prompt_length > 1:
# mask out prompt for SFT.
loss_mask[: min(prompt_length, loss_mask.size(0)) - 1] = 0
# mask out the last token in response
loss_mask[min(prompt_length + response_length, loss_mask.size(0)) - 1] = 0
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"position_ids": position_ids,
"loss_mask": loss_mask,
}
|
verl__utils__dataset__vision_utils.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from typing import Optional
import torch
from PIL import Image
def process_image(image: dict | Image.Image, image_patch_size: int = 14) -> Image.Image:
from qwen_vl_utils import fetch_image
if isinstance(image, Image.Image):
return image.convert("RGB")
if "bytes" in image:
assert "image" not in image, "Cannot have both `bytes` and `image`"
image["image"] = Image.open(BytesIO(image["bytes"]))
try:
ans = fetch_image(image, image_patch_size=image_patch_size)
except Exception:
ans = fetch_image(image)
return ans
VIDEO_FORMAT_HELP = """Currently, we only support the video formats introduced in qwen2-vl.
Refer to https://github.com/QwenLM/Qwen2.5-VL?tab=readme-ov-file#using---transformers-to-chat.
eg.
{
"type": "video",
"video": [
"file:///path/to/frame1.jpg",
"file:///path/to/frame2.jpg"
]
}
{
"type": "video",
"video": "file:///path/to/video.mp4"
}
# Defaults to fps=2, min_frames=4, max_frames=768
{
"type": "video",
"video": "file:///path/to/video.mp4",
"fps": 2,
"min_frames": 1,
"max_frames": 32
}
"""
def process_video(
video: dict,
image_patch_size: int = 14,
nframes: Optional[int] = None,
fps: Optional[float] = None,
fps_min_frames: Optional[int] = None,
fps_max_frames: Optional[int] = None,
return_video_sample_fps: bool = False,
return_video_metadata: bool = False,
) -> torch.Tensor:
"""Converts a video dict into a [n_frames, 3, H, W] tensor
Add video sample FPS in a future MR
"""
from qwen_vl_utils import fetch_video
if not isinstance(video, dict) or "video" not in video:
raise NotImplementedError(VIDEO_FORMAT_HELP)
assert nframes is None or fps is None, "Can't use both `nframes` or `fps`"
# Shallow copy... since we might want to add some keys
video = dict(video)
contains_sampling_rules = "nframes" in video or "fps" in video
if not contains_sampling_rules:
if nframes is not None:
video["nframes"] = nframes
elif fps is not None:
video["fps"] = fps
if fps_min_frames is not None:
video["min_frames"] = fps_min_frames
if fps_max_frames is not None:
video["max_frames"] = fps_max_frames
return fetch_video(
video,
image_patch_size=image_patch_size,
return_video_sample_fps=return_video_sample_fps,
return_video_metadata=return_video_metadata,
)
def process_multi_modal_inputs_for_minicpmo(input_ids, attention_mask, position_ids, cu_seqlens, multi_modal_inputs):
# Adjust image bounds based on left padding and cumulative sequence lengths
# This is necessary for MiniCPM-o's vision-language alignment
left_padding_length = torch.argmax(attention_mask, dim=1)
image_bounds = []
for i in range(len(multi_modal_inputs["image_bound"])):
image_bound = (
multi_modal_inputs["image_bound"][i].to(left_padding_length.device) - left_padding_length[i] + cu_seqlens[i]
)
image_bounds.append(image_bound)
# Flatten pixel values list for MiniCPM-o processing
pixel_values = []
for i in range(len(multi_modal_inputs["pixel_values"])):
pixel_values.extend([p for p in multi_modal_inputs["pixel_values"][i]])
multi_modal_inputs["pixel_values"] = [pixel_values]
multi_modal_inputs["image_bound"] = [torch.vstack(image_bounds)]
multi_modal_inputs["tgt_sizes"] = [torch.vstack(multi_modal_inputs["tgt_sizes"])]
multi_modal_inputs["input_ids"] = input_ids
multi_modal_inputs["attention_mask"] = attention_mask
multi_modal_inputs["position_ids"] = position_ids
return {"data": multi_modal_inputs}
|
verl__utils__debug__metrics.py
|
# Copyright 2025 Individual Contributor: TomQunChaoA
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import torch
from verl.protocol import DataProto
logger = logging.getLogger(__file__)
def calculate_token_list_diff(tensor1: torch.Tensor, tensor2: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
# verify inputs
if tensor1.numel() == 0 or tensor2.numel() == 0:
return torch.zeros(tensor1.shape[0], dtype=torch.long, device=tensor1.device)
if tensor1.shape != tensor2.shape or mask.shape != tensor1.shape or mask.shape != tensor2.shape:
print(
f"<WARN> dim of tensor1, tensor2, mask is not equal, {(tensor1.shape)=},{(tensor2.shape)=}, {(mask.shape)=}"
)
return torch.ones_like(tensor1)
# transfer to same device
if tensor2.device != tensor1.device:
tensor2 = tensor2.to(tensor1.device)
if mask.device != tensor1.device:
mask = mask.to(tensor1.device)
# calculate diff
diff_mask = tensor1 != tensor2
valid_diff_mask = diff_mask & (mask == 1)
diff_counts = valid_diff_mask.sum(dim=1)
return diff_counts
def pearson_correlation_coefficient(tensor1: torch.Tensor, tensor2: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
# implemention of https://arxiv.org/pdf/2506.13585
if tensor1.shape != tensor2.shape or mask.shape != tensor1.shape or mask.shape != tensor2.shape:
return 0
mt1 = torch.masked_select(tensor1, mask)
mt2 = torch.masked_select(tensor2, mask)
result = torch.corrcoef(torch.stack([mt1, mt2], dim=0))
return result[0][1].detach().item()
def calculate_log_prob_diff(log_probs1: torch.Tensor, log_probs2: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
full_diff = torch.abs(log_probs1 - log_probs2)
return torch.masked_select(full_diff, mask)
def calculate_debug_metrics(data: DataProto) -> dict:
"""
calculate rollout vs actor logprobs diff, for debugging purpose
Args:
data: DataProto
the data batch to calculate
rollout_log_probs: log_probs record when rollout forward tokens
old_log_probs(actor log probs): log_probs record when actor forward tokens
loss_mask or attention_mask: to mask unrelated token
responses: the response tokens, for calculating size
Returns:
dict: metrics
"training/rollout_probs_diff_valid": 1->input is valid, 0->input is invalid
"training/rollout_probs_diff_max": max value of logprob diff of rollout vs. actor
"training/rollout_probs_diff_mean": mean value of logprob diff of rollout vs. actor
"training/rollout_probs_diff_std": std value of logprob diff of rollout vs. actor
"training/rollout_actor_probs_pearson_corr": logprob's pearson corrcoef of rollout vs. actor, reference to https://arxiv.org/pdf/2506.13585
"""
rollout_old_log_probs = data.batch["rollout_log_probs"]
actor_old_log_probs = data.batch["old_log_probs"]
if "response_mask" in data.batch:
logger.debug("response mask found, use it to mask log probs")
log_prob_mask = data.batch["response_mask"]
elif "attention_mask" in data.batch:
log_prob_mask = data.batch["attention_mask"]
else:
logger.warning(f"no mask info found, use all log probs, {(data.batch.keys())=}")
log_prob_mask = torch.ones_like(rollout_old_log_probs)
responses = data.batch["responses"]
response_length = responses.size(1)
response_mask = log_prob_mask[:, -response_length:]
# calculate pearson corrcoef
actor_probs = torch.exp(actor_old_log_probs)
rollout_probs = torch.exp(rollout_old_log_probs)
response_mask_bool = response_mask.bool()
pearson_corrcoef = pearson_correlation_coefficient(actor_probs, rollout_probs, response_mask_bool)
rollout_probs_diff = calculate_log_prob_diff(actor_probs, rollout_probs, response_mask_bool)
return {
"training/rollout_probs_diff_valid": 1,
"training/rollout_probs_diff_max": torch.max(rollout_probs_diff).detach().item(),
"training/rollout_probs_diff_mean": torch.mean(rollout_probs_diff).detach().item(),
"training/rollout_probs_diff_std": torch.std(rollout_probs_diff).detach().item(),
"training/rollout_actor_probs_pearson_corr": pearson_corrcoef,
}
|
verl__utils__debug__trajectory_tracker.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Trajectory tracker can be inserted into code to save the intermediate results.
The results will be dump to hdfs for offline comparison.
Each process will have a client that first move all the tensors to CPU
"""
import io
import os
import tempfile
from collections import deque
import ray
import torch
from verl.utils.hdfs_io import copy, makedirs
remote_copy = ray.remote(copy)
@ray.remote
def save_to_hdfs(data: io.BytesIO, name, hdfs_dir, verbose):
filename = name + ".pth"
with tempfile.TemporaryDirectory() as tmpdirname:
local_filepath = os.path.join(tmpdirname, filename)
with open(local_filepath, "wb") as f:
f.write(data.getbuffer())
# upload to hdfs
if verbose:
print(f"Saving {local_filepath} to {hdfs_dir}")
try:
copy(local_filepath, hdfs_dir)
except Exception as e:
print(e)
@ray.remote
class TrajectoryTracker:
def __init__(self, hdfs_dir, verbose) -> None:
self.hdfs_dir = hdfs_dir
makedirs(hdfs_dir)
self.verbose = verbose
self.handle = deque()
def dump(self, data: io.BytesIO, name):
# get a temp file and write to it
self.handle.append(save_to_hdfs.remote(data, name, self.hdfs_dir, self.verbose))
def wait_for_hdfs(self):
while len(self.handle) != 0:
future = self.handle.popleft()
ray.get(future)
def dump_data(data, name):
enable = os.getenv("VERL_ENABLE_TRACKER", "0") == "1"
if not enable:
return
buffer = io.BytesIO()
torch.save(data, buffer)
tracker = get_trajectory_tracker()
ray.get(tracker.dump.remote(buffer, name))
def get_trajectory_tracker():
hdfs_dir = os.getenv("VERL_TRACKER_HDFS_DIR", default=None)
verbose = os.getenv("VERL_TRACKER_VERBOSE", default="0") == "1"
assert hdfs_dir is not None
tracker = TrajectoryTracker.options(name="global_tracker", get_if_exists=True, lifetime="detached").remote(
hdfs_dir, verbose
)
return tracker
if __name__ == "__main__":
# testing
os.environ["VERL_ENABLE_TRACKER"] = "1"
os.environ["VERL_TRACKER_HDFS_DIR"] = "~/debug/test"
@ray.remote
def process(iter):
data = {"obs": torch.randn(10, 20)}
dump_data(data, f"process_{iter}_obs")
ray.init()
output_lst = []
for i in range(10):
output_lst.append(process.remote(i))
out = ray.get(output_lst)
tracker = get_trajectory_tracker()
ray.get(tracker.wait_for_hdfs.remote())
|
verl__utils__device.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# This code is inspired by the torchtune.
# https://github.com/pytorch/torchtune/blob/main/torchtune/utils/_device.py
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license in https://github.com/pytorch/torchtune/blob/main/LICENSE
import logging
import os
import platform
import subprocess
import torch
from packaging import version
logger = logging.getLogger(__name__)
def is_torch_npu_available(check_device=True) -> bool:
"""Check if Ascend NPU is available for PyTorch operations.
Attempts to detect NPU availability by checking for the torch.npu module
and its is_available() function.
Args:
check_device : only check torch_npu package or strictly check if NPU device is available
Returns:
bool: True if NPU is available, False otherwise.
"""
try:
if not hasattr(torch, "npu"):
return False
if check_device:
return torch.npu.is_available()
else:
return True
except ImportError:
return False
is_cuda_available = torch.cuda.is_available()
is_npu_available = is_torch_npu_available()
def get_resource_name() -> str:
"""Function that return ray resource name based on the device type.
Returns:
ray resource name string, either "GPU" or "NPU".
"""
return "GPU" if is_cuda_available else "NPU"
def get_visible_devices_keyword() -> str:
"""Get the environment variable name for visible device selection.
Returns the appropriate environment variable name based on the available
accelerator type (CUDA or Ascend NPU).
Returns:
str: 'CUDA_VISIBLE_DEVICES' if CUDA is available,
'ASCEND_RT_VISIBLE_DEVICES' otherwise.
"""
return "CUDA_VISIBLE_DEVICES" if not is_torch_npu_available(check_device=False) else "ASCEND_RT_VISIBLE_DEVICES"
def get_device_name() -> str:
"""Get the device type string based on available accelerators.
Detects the available accelerator and returns the corresponding PyTorch
device type string. Currently supports CUDA, Ascend NPU, and CPU.
Returns:
str: Device type string ('cuda', 'npu', or 'cpu').
"""
if is_cuda_available:
device = "cuda"
elif is_npu_available:
device = "npu"
else:
device = "cpu"
return device
def get_torch_device():
"""Get the PyTorch device module for the current accelerator.
Returns the torch device namespace (e.g., torch.cuda, torch.npu) based on
the detected accelerator type. Falls back to torch.cuda if the namespace
is not found.
Returns:
module: The PyTorch device module (torch.cuda, torch.npu, etc.).
"""
device_name = get_device_name()
try:
return getattr(torch, device_name)
except AttributeError:
logger.warning(f"Device namespace '{device_name}' not found in torch, try to load torch.cuda.")
return torch.cuda
def get_device_id() -> int:
"""Get the index of the current accelerator device.
Returns:
int: The current device index (e.g., 0 for 'cuda:0').
"""
return get_torch_device().current_device()
def get_nccl_backend() -> str:
"""Get the distributed communication backend based on device type.
Returns the appropriate collective communication backend for the
detected accelerator (HCCL for Ascend NPU, NCCL for CUDA).
Returns:
str: Backend name ('hccl' for NPU, 'nccl' for CUDA/default).
"""
if is_npu_available:
return "hccl"
else:
# default to nccl
return "nccl"
def set_expandable_segments(enable: bool) -> None:
"""Configure CUDA memory allocator expandable segments setting.
Expandable segments can help avoid out-of-memory (OOM) errors by allowing
the memory allocator to expand existing memory segments rather than
allocating new ones.
Args:
enable: If True, enable expandable segments. If False, disable them.
Note:
This function only has an effect when CUDA is available.
"""
if is_cuda_available:
torch.cuda.memory._set_allocator_settings(f"expandable_segments:{enable}")
def auto_set_device(config) -> None:
"""Automatically configure device name for different accelerators.
For example, on Ascend NPU, this function defaults the trainer device to "npu"
unless explicitly set to "cpu".
Args:
config: Configuration object with trainer.device attribute.
"""
if config and hasattr(config, "trainer") and hasattr(config.trainer, "device"):
if is_torch_npu_available():
if config.trainer.device not in ["cpu", "npu"]:
logger.warning(
f"Detect setting config.trainer.device to {config.trainer.device} for Ascend NPU, maybe"
f"from default value in config file, automatically set to `npu` instead."
)
config.trainer.device = "npu"
# Other cases: set device to "cuda" via config file, no need to change.
def get_device_capability(device_id: int = 0) -> tuple[int | None, int | None]:
"""Get the compute capability of a CUDA device.
Args:
device_id: The CUDA device index to query. Defaults to 0.
Returns:
tuple: A tuple of (major, minor) compute capability version,
or (None, None) if CUDA is not available.
"""
major, minor = None, None
if is_cuda_available:
major, minor = torch.cuda.get_device_capability(device_id)
return major, minor
def get_npu_versions() -> tuple[str, str]:
"""Get the software version and CANN toolkit version for NPU devices.
Returns:
tuple[str, str]: A tuple of (software_version, cann_version)
Raises:
RuntimeError: If unable to retrieve version information
"""
# Check npu-smi software version
result = subprocess.run(["npu-smi", "info", "-t", "board", "-i", "1"], capture_output=True, text=True, check=True)
# Parse software version from output
software_version = None
for line in result.stdout.split("\n"):
if "Software Version" in line:
# Extract version from line like: "Software Version : 25.3.rc1.2"
parts = line.split(":")
if len(parts) > 1:
software_version = parts[1].strip().lower()
break
if not software_version:
raise RuntimeError("Could not find Software Version in npu-smi output")
# Check CANN toolkit version
arch = platform.machine()
if arch not in ["arm64", "aarch64", "x86_64"]:
raise RuntimeError(f"Unsupported architecture: {arch}")
ascend_home = os.environ.get("ASCEND_HOME_PATH", "/usr/local/Ascend/ascend-toolkit/latest")
cann_path = os.path.join(ascend_home, f"{arch}-linux")
if not os.path.exists(cann_path):
raise RuntimeError(f"CANN toolkit path does not exist: {cann_path}")
info_file = os.path.join(cann_path, "ascend_toolkit_install.info")
if not os.path.exists(info_file):
raise RuntimeError(f"CANN toolkit info file does not exist: {info_file}")
# Parse version from info file
cann_version = None
with open(info_file) as f:
for line in f:
if line.startswith("version="):
cann_version = line.split("=", 1)[1].strip().lower()
break
if not cann_version:
raise RuntimeError("Could not find version in CANN toolkit info file")
return software_version, cann_version
def check_ipc_version_support(software_version: str, cann_version: str) -> bool:
"""Check if the given software and CANN versions support IPC.
Compares the software version and CANN toolkit version against minimum
required versions for IPC support:
- Software Version should be >= 25.3.rc1
- CANN version should be >= 8.3.rc1
Args:
software_version: The software version string (e.g., "25.5.0", "25.3.rc1.2", "25.5.t3.b001")
cann_version: The CANN toolkit version string (e.g., "8.3.0", "8.3.rc1")
Returns:
bool: True if IPC is supported, False otherwise.
Raises:
RuntimeError: If version format is invalid
"""
# For software_version like "25.3.rc1.2", "25.5.0", or "25.5.t3.b001",
# we need to extract the base version
# Use regex to extract version with the following rules:
# - Standard version: 25.5.0 -> 25.5.0
# - RC version: 25.3.rc1.2 -> 25.3.rc1
# - t suffix version: 25.5.t3.b001 -> 25.5 (only first 2 parts if third part is lowercase t)
# - RC version: 25.3.rc1 -> 25.3.rc1
# For versions with more than 3 parts (e.g., 25.3.rc1.2), only match the first 3 parts
import re
# Match version with optional rc part or lowercase t suffix:
# - If version has lowercase t (e.g., 25.5.t3.b001), only match first 2 parts
# - Otherwise, match up to 3 parts (e.g., 25.5.0, 25.3.rc1.2)
ascend_version_pattern = r"(\d+\.\d+(?=\.t))|(\d+\.\d+(?:\.(?:rc\d+|\d+))?)"
software_match = re.match(ascend_version_pattern, software_version)
if not software_match:
raise RuntimeError(f"Invalid software version format: {software_version}")
# Select the matched group (either first 2 parts or up to 3 parts)
software_base = software_match.group(1) if software_match.group(1) else software_match.group(2)
cann_match = re.match(ascend_version_pattern, cann_version)
if not cann_match:
raise RuntimeError(f"Invalid CANN version format: {cann_version}")
else:
# Select the matched group (either first 2 parts or up to 3 parts)
cann_base = cann_match.group(1) if cann_match.group(1) else cann_match.group(2)
if version.parse(software_base) >= version.parse("25.3.rc1"):
if version.parse(cann_base) >= version.parse("8.3.rc1"):
return True
else:
logger.info(f"CANN version {cann_version} is below 8.3.RC1")
else:
logger.info(f"Software version {software_version} is below 25.3.rc1")
return False
def is_support_ipc() -> bool:
"""Check if the device supports IPC (Inter-Process Communication).
For GPU devices, always returns True.
For NPU devices, checks the software version and CANN toolkit version
to determine if IPC is supported.
Returns:
bool: True if IPC is supported, False otherwise.
"""
# If CUDA is available, it's a GPU device
if is_cuda_available:
return True
# For NPU devices, check the software version and CANN toolkit version
if is_npu_available:
try:
software_version, cann_version = get_npu_versions()
return check_ipc_version_support(software_version, cann_version)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to execute npu-smi command: {e}") from e
except Exception as e:
raise RuntimeError(f"Error checking IPC support: {e}") from e
# For other devices (CPU), return False
return False
|
verl__utils__flops_counter.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import torch
from transformers import PretrainedConfig
from verl.utils.device import get_torch_device
_DEVICE_FLOPS = {
"CPU": 448e9,
"GB200": 2.5e15,
"B200": 2.25e15,
"MI300X": 1336e12,
"H100": 989e12,
"H800": 989e12,
"H200": 989e12,
"A100": 312e12,
"A800": 312e12,
"L40S": 362.05e12,
"L40": 181.05e12,
"A40": 149.7e12,
"L20": 119.5e12,
"H20": 148e12,
"910B": 354e12,
"Ascend910": 354e12,
"RTX 3070 Ti": 21.75e12,
}
def get_device_flops(unit="T", device_name=None):
"""Get the theoretical FLOPS (Floating Point Operations Per Second) capacity of the current device.
Args:
unit (str): The unit to return the FLOPS in. Supported values are:
"B" - Billion (1e9)
"K" - Thousand (1e3)
"M" - Million (1e6)
"G" - Giga (1e9)
"T" - Tera (1e12, default)
"P" - Peta (1e15)
Returns:
float: The theoretical FLOPS capacity of the current device in the specified unit.
Returns float('inf') for unknown GPU types.
"""
def unit_convert(number, level):
units = ["B", "K", "M", "G", "T", "P"]
if number <= 0:
return number
ptr = 0
while ptr < len(units) and units[ptr] != level:
number /= 1000
ptr += 1
return number
# pass device_name is for testing purpose only
if device_name is None:
device = get_torch_device()
if device == torch.cpu:
device_name = "CPU"
else:
device_name = get_torch_device().get_device_name()
flops = float("inf") # INF flops for unkown gpu type
for key, value in sorted(_DEVICE_FLOPS.items(), reverse=True):
if key in device_name:
flops = value
break
flops_unit = unit_convert(flops, unit)
return flops_unit
def _estimate_qwen2_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
intermediate_size = config.intermediate_size
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
# Qwen2/LLama use SwiGelu, gate, having up and down linear layer in mlp
mlp_N = hidden_size * intermediate_size * 3
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
dense_N = (mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_qwen3_vl_flops(config, tokens_sum, batch_seqlens, delta_time, **kargs):
# qwen3_vl uses text_config and vision_config to distinguish configs of different parts.
hidden_size = config.text_config.hidden_size
vocab_size = config.text_config.vocab_size
num_hidden_layers = config.text_config.num_hidden_layers
num_key_value_heads = config.text_config.num_key_value_heads
num_attention_heads = config.text_config.num_attention_heads
intermediate_size = config.text_config.intermediate_size
head_dim = hidden_size // num_attention_heads
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
mlp_N = hidden_size * intermediate_size * 3
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
dense_N = (mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# qwen3_vl uses deepstack to merge visual embeds and text embeds, but it has no tensor operation.
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# vit flops
images_seqlens = kargs.get("images_seqlens", None)
if images_seqlens is not None:
vit_flops = _estimate_qwen3_vit_flop(images_seqlens, config.vision_config)
else:
vit_flops = 0
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops + vit_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_qwen3_vl_moe_flops(config, tokens_sum, batch_seqlens, delta_time, **kargs):
# qwen3_vl uses text_config and vision_config to distinguish configs of different parts.
hidden_size = config.text_config.hidden_size
vocab_size = config.text_config.vocab_size
num_hidden_layers = config.text_config.num_hidden_layers
num_key_value_heads = config.text_config.num_key_value_heads
num_attention_heads = config.text_config.num_attention_heads
moe_intermediate_size = config.text_config.moe_intermediate_size
moe_num_expert = config.text_config.num_experts
moe_topk = config.text_config.num_experts_per_tok
head_dim = getattr(
config.text_config, "head_dim", config.text_config.hidden_size // config.text_config.num_attention_heads
)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
moe_gata_N = hidden_size * moe_num_expert
# moe has gate_proj, up_proj and down_proj using SwiGLU in ExpertMlp layer & shared experts
moe_expertmlp_N = hidden_size * moe_intermediate_size * (moe_topk) * 3
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
moe_N = (moe_gata_N + moe_expertmlp_N + attn_linear_N) * (num_hidden_layers) + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * moe_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# vit flops
images_seqlens = kargs.get("images_seqlens", None)
if images_seqlens is not None:
vit_flops = _estimate_qwen3_vit_flop(images_seqlens, config.vision_config)
else:
vit_flops = 0
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops + vit_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_qwen3_vit_flop(images_seqlens, config):
"""
Estimate the FLOPS of the vision encoder for Qwen3-VL
"""
if config is None:
return 0
tokens_sum = sum(images_seqlens)
num_heads = config.num_heads
depth = config.depth
dim = config.hidden_size
mlp_hidden_dim = config.intermediate_size
out_hidden_size = config.out_hidden_size
spatial_merge_size = config.spatial_merge_size
head_dim = dim // num_heads
# every vision token's patch_embed comes from a conv of (C, T, H, W) -> (dim,)
patch_embed_N = dim * config.in_channels * config.temporal_patch_size * config.patch_size * config.patch_size
# Qwen3 VL vision mlp does not use GLU, thus 2.
mlp_N = dim * mlp_hidden_dim * 2
attn_linear_N = dim * (4 * dim) # qkv and output proj
merger_N = (out_hidden_size + (dim * (spatial_merge_size**2))) * (dim * (spatial_merge_size**2))
# Qwen3 VL uses deep stack, one merger for every deepstack layer
deepstack_merger_N = merger_N * len(config.deepstack_visual_indexes)
# non-attn all_layer parm
dense_N = patch_embed_N + (mlp_N + attn_linear_N) * depth + deepstack_merger_N + merger_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# In Qwen3 VL, full attention is used in all vision layers.
full_attn_layer_num = depth
# full attn layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in images_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 12 * seqlen_square_sum * head_dim * num_heads * full_attn_layer_num
vit_flops = dense_N_flops + attn_qkv_flops
return vit_flops
def _estimate_deepseek_v3_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
moe_intermediate_size = config.moe_intermediate_size
num_hidden_layers = config.num_hidden_layers
first_k_dense_replace = config.first_k_dense_replace
num_query_heads = config.num_attention_heads
moe_num_expert = config.n_routed_experts
moe_topk = config.num_experts_per_tok
share_expert_num = config.n_shared_experts
# non-attn per layer parm
moe_gata_N = hidden_size * moe_num_expert
# moe has fc1_1, fc1_2 and fc2 using SwiGLU in ExpertMlp layer & shared experts
moe_expertmlp_N = hidden_size * moe_intermediate_size * (moe_topk + share_expert_num) * 3
# MLA attn
attn_linear_N = 0
q_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim
if config.q_lora_rank is None:
attn_linear_N += hidden_size * num_query_heads * q_head_dim
else:
attn_linear_N += hidden_size * config.q_lora_rank
attn_linear_N += num_query_heads * q_head_dim * config.q_lora_rank
attn_linear_N += hidden_size * (config.kv_lora_rank + config.qk_rope_head_dim)
attn_linear_N += num_query_heads * (q_head_dim - config.qk_rope_head_dim + config.v_head_dim) * config.kv_lora_rank
attn_linear_N += num_query_heads * config.v_head_dim * hidden_size
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
moe_N = (
(moe_gata_N + moe_expertmlp_N + attn_linear_N) * (num_hidden_layers - first_k_dense_replace)
+ (hidden_size * config.intermediate_size * 3 + attn_linear_N) * first_k_dense_replace
+ emd_and_lm_head_N
)
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * moe_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen * num_hidden_layers
# Core attention FLOPS for MLA with causal mask:
# Q @ K^T: 3 * 2 * seq^2 * q_head_dim * num_heads / 2 (causal)
# attn @ V: 3 * 2 * seq^2 * v_head_dim * num_heads / 2 (causal)
attn_qkv_flops = 3 * seqlen_square_sum * (q_head_dim + config.v_head_dim) * num_query_heads
# all_layer & all_token fwd & bwk flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_qwen2_moe_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
moe_intermediate_size = config.moe_intermediate_size
moe_topk = config.num_experts_per_tok
num_experts = config.num_experts
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
# gate + moe export
moe_mlp_N = hidden_size * moe_topk * moe_intermediate_size * 3 + hidden_size * num_experts
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
dense_N = (moe_mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_gemma3_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
intermediate_size = config.intermediate_size
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
# Gemma3 uses GeGLU (gelu_pytorch_tanh), having 3 matrices in MLP (inherited from Gemma2MLP)
mlp_N = hidden_size * intermediate_size * 3
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
dense_N = (mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
# Gemma3 alternates between full and sliding window attention based on layer_types
seqlen_square_sum = 0
layer_types = getattr(config, "layer_types", None)
sliding_window = getattr(config, "sliding_window", 1024) # default 1024
# default pattern: every 6th layer is full
sliding_window_pattern = getattr(config, "sliding_window_pattern", 6)
# If layer_types is not provided, generate it based on sliding_window_pattern
if layer_types is None and sliding_window is not None and sliding_window_pattern is not None:
layer_types = [
"sliding_attention" if bool((i + 1) % sliding_window_pattern) else "full_attention"
for i in range(num_hidden_layers)
]
if layer_types:
# Calculate attention flops per layer based on attention type
for layer_idx in range(num_hidden_layers):
is_sliding = False
if layer_types and layer_idx < len(layer_types):
is_sliding = layer_types[layer_idx] == "sliding_attention"
for seqlen in batch_seqlens:
if is_sliding and sliding_window:
# Sliding window limits each token to attend to at most window_size tokens
effective_seqlen = min(seqlen, sliding_window)
seqlen_square_sum += seqlen * effective_seqlen
else:
# Full attention
seqlen_square_sum += seqlen * seqlen
else:
# If no layer_types config, assume all layers use full attention
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
seqlen_square_sum *= num_hidden_layers
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_apertus_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
intermediate_size = config.intermediate_size
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# Apertus MLP with XIELU activation uses only 2 linear layers (up_proj, down_proj)
# No gate_proj for XIELU, unlike SwiGLU which has 3 layers
mlp_N = hidden_size * intermediate_size * 2
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
# ApertusConfig has qk_norm defaulting to True.
# This adds params for q_norm (on H) and k_norm (on num_kv_heads * head_dim)
qk_norm_params_per_layer = hidden_size + num_key_value_heads * head_dim # q_norm + k_norm
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer params
dense_N = (mlp_N + attn_linear_N + qk_norm_params_per_layer) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_gpt_oss_flops(config, tokens_sum, batch_seqlens, delta_time):
hidden_size = config.hidden_size
vocab_size = config.vocab_size
num_hidden_layers = config.num_hidden_layers
num_key_value_heads = config.num_key_value_heads
num_attention_heads = config.num_attention_heads
# MoE params
moe_intermediate_size = config.intermediate_size
num_experts = config.num_local_experts
num_experts_per_tok = config.num_experts_per_tok
mlp_matrices = 3
# Head dim
head_dim = getattr(config, "head_dim", hidden_size // num_attention_heads)
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# 1. Attention Block (GQA)
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
# 2. MLP / MoE Block
# Gate network
moe_gate_N = hidden_size * num_experts
# Expert forward calculation, Active parameters: mlp_matrices * H * I * num_experts_per_tok
moe_expert_N = hidden_size * moe_intermediate_size * mlp_matrices * num_experts_per_tok
moe_mlp_N = moe_gate_N + moe_expert_N
emd_and_lm_head_N = vocab_size * hidden_size * 2
# Total non-attn params per layer * layers + embeddings
# (moe_mlp_N + attn_linear_N) * layers
dense_N = (moe_mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# FLOPs for dense part (fwd + bwd = 6 * N)
dense_N_flops = 6 * dense_N * tokens_sum
# 3. Attention Matrix FLOPs
seqlen_square_sum = 0
# Handle sliding window attention
layer_types = getattr(config, "layer_types", None)
sliding_window = getattr(config, "sliding_window", 128)
if layer_types:
for layer_type in layer_types:
is_sliding = layer_type == "sliding_attention"
for seqlen in batch_seqlens:
if is_sliding and sliding_window:
# Sliding window limits each token to attend to at most window_size tokens
effective_seqlen = min(seqlen, sliding_window)
seqlen_square_sum += seqlen * effective_seqlen
else:
# Full attention
seqlen_square_sum += seqlen * seqlen
else:
# Default to full attention for all layers
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
seqlen_square_sum *= num_hidden_layers
attn_qkv_flops = 6 * seqlen_square_sum * head_dim * num_attention_heads
# Total FLOPs
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def _estimate_unknown_flops(config, tokens_sum, batch_seqlens, delta_time):
return 0
ESTIMATE_FUNC = {
"qwen2": _estimate_qwen2_flops,
"llama": _estimate_qwen2_flops,
"qwen2_moe": _estimate_qwen2_moe_flops,
"qwen2_vl": _estimate_qwen2_flops,
"qwen2_5_vl": _estimate_qwen2_flops,
"qwen3": _estimate_qwen2_flops,
"qwen3_moe": _estimate_qwen2_moe_flops,
"qwen3_vl": _estimate_qwen3_vl_flops,
"qwen3_vl_moe": _estimate_qwen3_vl_moe_flops,
"deepseek_v3": _estimate_deepseek_v3_flops,
"minicpmv": _estimate_qwen2_flops,
"minicpmo": _estimate_qwen2_flops,
"mistral": _estimate_qwen2_flops,
"gemma3_text": _estimate_gemma3_flops,
"seed_oss": _estimate_qwen2_flops,
"apertus": _estimate_apertus_flops,
"glm4v": _estimate_qwen2_flops,
"gpt_oss": _estimate_gpt_oss_flops,
"mimo": _estimate_qwen2_flops,
}
class FlopsCounter:
"""
Used to count mfu during training loop
Example:
flops_counter = FlopsCounter(config)
flops_achieved, flops_promised = flops_counter.estimate_flops(tokens_list, delta_time)
"""
def __init__(self, config: PretrainedConfig):
VALID_CONFIG_TYPE = ESTIMATE_FUNC.keys()
if config.model_type not in VALID_CONFIG_TYPE:
print(
f"Only support config type of {VALID_CONFIG_TYPE}, but got {config.model_type}. MFU will always be "
f"zero."
)
self.config = config
# TODO: actually we can make this a static method
def estimate_flops(self, batch_seqlens, delta_time, **kargs):
"""
Estimate the FLOPS based on the number of valid tokens in the current batch and the time taken.
Args:
batch_seqlens (List[int]): A list where each element represents the number of valid tokens in the
current batch.
delta_time (float): The time taken to process the batch, in seconds.
Returns:
estimated_flops (float): The estimated FLOPS based on the input tokens and time.
promised_flops (float): The expected FLOPS of the current device.
"""
tokens_sum = sum(batch_seqlens)
func = ESTIMATE_FUNC.get(self.config.model_type, _estimate_unknown_flops)
sig = inspect.signature(func)
if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()):
estimated_flops = func(self.config, tokens_sum, batch_seqlens, delta_time, **kargs)
else:
estimated_flops = func(self.config, tokens_sum, batch_seqlens, delta_time)
promised_flops = get_device_flops()
return estimated_flops, promised_flops
|
verl__utils__fsdp_utils.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import itertools
import json
import math
import os
from abc import ABC
from collections import OrderedDict
from contextlib import contextmanager, nullcontext
from typing import cast
import torch
import torch.distributed as dist
import torch.nn as nn
from packaging import version
from torch.distributed import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._runtime_utils import _lazy_init
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
from transformers.trainer_pt_utils import get_module_class_from_name
from verl.utils.device import get_device_id, get_device_name, get_torch_device
from verl.utils.model import check_exclude_modules, check_target_modules
if version.parse(torch.__version__) >= version.parse("2.6"):
from torch.distributed.fsdp import CPUOffloadPolicy, FSDPModule, MixedPrecisionPolicy, fully_shard
from torch.distributed.fsdp._fully_shard._fsdp_init import _get_post_forward_mesh_info
from torch.distributed.tensor import Shard
fully_shard_module = torch.distributed.fsdp._fully_shard._fully_shard
elif version.parse(torch.__version__) >= version.parse("2.4"):
from torch.distributed._composable.fsdp import CPUOffloadPolicy, FSDPModule, MixedPrecisionPolicy, fully_shard
fully_shard_module = torch.distributed._composable.fsdp
else:
fully_shard, MixedPrecisionPolicy, FSDPModule, CPUOffloadPolicy, fully_shard_module = None, None, None, None, None
def init_fn(x: torch.nn.Module):
if torch.distributed.get_rank() != 0:
x = x.to_empty(device=get_device_id(), recurse=False)
get_torch_device().empty_cache()
return x
def get_init_weight_context_manager(use_meta_tensor=True, mesh: DeviceMesh = None):
from accelerate import init_empty_weights
cpu_init_weights = lambda: torch.device("cpu")
if use_meta_tensor:
if mesh is None:
init_context = init_empty_weights if torch.distributed.get_rank() != 0 else cpu_init_weights
else:
init_context = init_empty_weights if mesh.get_coordinate()[-1] != 0 else cpu_init_weights
else:
init_context = cpu_init_weights
return init_context
# Copyright 2020-present the HuggingFace Inc. team.
# Adapted from https://github.com/huggingface/transformers/src/transformers/trainer.py
def get_fsdp_wrap_policy(module, config=None, is_lora=False):
"""Get FSDP wrap policy for the module.
Args:
module: The module to get wrap policy for
config: Configuration for wrap policy
is_lora: Whether to enable lambda policy for LoRA modules
"""
if config is None:
config = {}
# NOTE: This is a temporary workaround to be compatible with the OmegaConf & dataclass. We will remove this
# once we have make all config in verl from OmegaConf to data class.
def _get_attr(attr_name, default_value=None):
if hasattr(config, "get"):
return config.get(attr_name, default_value)
else:
return config.__getattribute__(attr_name)
if _get_attr("disable", False):
return None
default_transformer_cls_names_to_wrap = getattr(module, "_no_split_modules", None)
fsdp_transformer_layer_cls_to_wrap = _get_attr(
"transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap
)
min_num_params = _get_attr("min_num_params", 0)
auto_wrap_policy = None
policies = []
from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy
# Add lambda policy for LoRA modules if is_lora is True
if is_lora:
def lambda_policy_fn(module):
return bool(
len(list(module.named_children())) == 0
and getattr(module, "weight", None) is not None
and module.weight.requires_grad
)
lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn)
policies.append(lambda_policy)
if min_num_params > 0:
size_policy = functools.partial(size_based_auto_wrap_policy, min_num_params=min_num_params)
policies.append(size_policy)
elif fsdp_transformer_layer_cls_to_wrap is not None:
transformer_cls_to_wrap = set()
for layer_class in fsdp_transformer_layer_cls_to_wrap:
transformer_cls = get_module_class_from_name(module, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
transformer_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls=transformer_cls_to_wrap,
)
policies.append(transformer_policy)
if len(policies) > 0:
auto_wrap_policy = functools.partial(_or_policy, policies=policies)
return auto_wrap_policy
@torch.no_grad()
def offload_fsdp_model_to_cpu(model: FSDP, empty_cache: bool = True):
if fsdp_version(model) == 2:
offload_fsdp2_model_to_cpu(model, empty_cache)
return
assert isinstance(model, FSDP)
# lazy init FSDP model
_lazy_init(model, model)
assert model._is_root, "Only support root model offloading to CPU"
for handle in model._all_handles:
if handle._offload_params:
continue
flat_param = handle.flat_param
assert (
flat_param.data.data_ptr() == flat_param._local_shard.data_ptr()
and id(flat_param.data) != id(flat_param._local_shard)
and flat_param.data.size() == flat_param._local_shard.size()
)
handle.flat_param_to(torch.device("cpu"), non_blocking=True)
# the following still keeps id(._local_shard) != id(.data)
flat_param._local_shard = flat_param.data
assert id(flat_param._local_shard) != id(flat_param.data)
if empty_cache:
get_torch_device().empty_cache()
@torch.no_grad()
def offload_fsdp2_model_to_cpu(model, empty_cache: bool = True):
model.cpu()
if empty_cache:
get_torch_device().empty_cache()
@torch.no_grad()
def load_fsdp_model_to_gpu(model: FSDP):
if fsdp_version(model) == 2:
load_fsdp2_model_to_gpu(model)
return
assert isinstance(model, FSDP)
# lazy init FSDP model
_lazy_init(model, model)
assert model._is_root, "Only support root model loading to GPU"
device_id = get_device_id()
for handle in model._all_handles:
if handle._offload_params:
continue
flat_param = handle.flat_param
handle.flat_param_to(torch.device(f"{get_device_name()}:{device_id}"), non_blocking=True)
# the following still keeps id(._local_shard) != id(.data)
flat_param._local_shard = flat_param.data
@torch.no_grad()
def load_fsdp2_model_to_gpu(model):
device = get_device_id()
model.to(device)
@torch.no_grad()
def offload_fsdp_optimizer(optimizer):
if not optimizer.state:
return
for param_group in optimizer.param_groups:
for param in param_group["params"]:
state = optimizer.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to("cpu", non_blocking=True)
@torch.no_grad()
def load_fsdp_optimizer(optimizer, device_id):
if not optimizer.state:
return
for param_group in optimizer.param_groups:
for param in param_group["params"]:
state = optimizer.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to(device_id, non_blocking=True)
@contextmanager
def meta_device_init():
"""
Create model parameters with meta device.
Note buffers in model will still be initialized in default device (e.g., CPU),
since the buffers can be non-persistent and filled with expected values that can
NOT be captured in meta device.
"""
device = torch.device("meta")
old_register_parameter = nn.Module.register_parameter
registered = set()
def register_empty_parameter(module, name, param):
old_register_parameter(module, name, param)
# we will skip register shared parameters as it
# is already registered previously
if param is not None and param not in registered:
param_cls = type(module._parameters[name])
kwargs = module._parameters[name].__dict__
kwargs["requires_grad"] = param.requires_grad
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
registered.add(module._parameters[name])
try:
nn.Module.register_parameter = register_empty_parameter
yield
finally:
registered.clear()
nn.Module.register_parameter = old_register_parameter
def parallel_load_safetensors(filepath):
"""
Parallel load safetensors from huggingface checkpoint
Huggingface checkpoint contains:
- config.json: a json file for model configuration
- model.safetensor.index.json: a json file for safetensors (parameters & buffers) index
- model-000x-of-ooxx.safetensors: a binary file for safetensors (parameters & buffers) chunks
Or (when model is small),
- model.safetensors: a binary file for all parameters and buffers
Each rank will own a part of model chunks and load them directly into GPU memory.
"""
from safetensors.torch import load_file
safetensors2param = {}
index_file = os.path.join(filepath, "model.safetensors.index.json")
if os.path.exists(index_file):
index = json.load(open(index_file, "rb"))
for param_name, filename in index["weight_map"].items():
safetensors2param.setdefault(filename, []).append(param_name)
else:
# in this case, the model is small and we can load it all at once
param_file = os.path.join(filepath, "model.safetensors")
assert os.path.exists(param_file), f"Cannot find {param_file}"
states = load_file(param_file)
for param_name in states:
safetensors2param.setdefault("model.safetensors", []).append(param_name)
del states
total_files = len(safetensors2param)
ckpt_chunks = sorted(safetensors2param.keys())
world_size = dist.get_world_size()
size = int(math.ceil(total_files / world_size))
ckpt_chunks = [ckpt_chunks[rank * size : rank * size + size] for rank in range(world_size)]
shard_states = {}
device = get_device_id()
for rank, files in enumerate(ckpt_chunks):
if rank == dist.get_rank():
for file in files:
file = os.path.join(filepath, file)
states = load_file(file, device=device)
# print(f"rank {rank} loading {file}...")
shard_states.update(states)
else:
for file in files:
for param_name in safetensors2param[file]:
shard_states[param_name] = rank
return shard_states
def parallel_init_module_fn(module: torch.nn.Module, shard_states: dict[str, torch.nn.Parameter]):
"""
Generate a function to initialize sub-modules in the `module` with `shard_states`
from huggingface checkpoint.
Args:
module (torch.nn.Module): the global module to be initialized
shard_states (Dict[str, torch.nn.Parameter]): the shard states from huggingface checkpoint
Returns:
init_fn (Callable): a function to initialize sub-modules in the `module` with `shard_states`
"""
state2fqn = {}
for name, state in itertools.chain(
module.named_parameters(remove_duplicate=False), module.named_buffers(remove_duplicate=False)
):
state2fqn.setdefault(state, []).append(name)
# remove standalone parameters and buffers
shared = {s for s, names in state2fqn.items() if len(names) > 1}
materialized_states = {}
@torch.no_grad()
def create_and_sync_state(param_name, state, is_param):
assert param_name in shard_states, f"{param_name} not loaded"
device = get_device_id()
if is_param:
param = torch.nn.Parameter(torch.empty_like(state.data, device=device), requires_grad=state.requires_grad)
else: # buffer
param = torch.empty_like(state.data, device=device)
loaded = shard_states[param_name]
if isinstance(loaded, torch.nn.Parameter | torch.Tensor):
# NOTE: loaded.dtype can be different with param.dtype
param.data.copy_(loaded.data)
dist.broadcast(param.data, src=dist.get_rank())
else:
assert isinstance(loaded, int) # the rank that holds the state
dist.broadcast(param.data, src=loaded)
shard_states.pop(param_name)
del loaded
return param
def init_fn(sub_mod: torch.nn.Module, recurse: bool = True):
param_and_buffers = tuple(sub_mod.named_parameters(recurse=False)) + tuple(sub_mod.named_buffers(recurse=False))
# param_and_buffers = sorted(sub_mod.named_parameters(recurse=False), key=lambda x: x[0])
for name, state in param_and_buffers:
if not state.is_meta:
continue
is_param = name in sub_mod._parameters
fqn = state2fqn[state].pop(0)
# non-persistent buffers will not be saved in state dict, we can safely skip it
if (not is_param) and fqn not in shard_states:
if state.is_meta:
raise RuntimeError(
f"find a non-persistent buffer ({fqn}) initiated with device meta. Such buffer is not saved "
f"in checkpoint and user should guarantee to init in CPU / GPU device."
)
continue
# for shared parameter, we get it from the first time it is created
if state in shared:
if state not in materialized_states:
materialized_states[state] = create_and_sync_state(fqn, state, is_param)
else:
if fqn in shard_states:
shard_states.pop(fqn)
materialize_state = materialized_states[state]
# for not shared parameter, we create it directly
else:
materialize_state = create_and_sync_state(fqn, state, is_param)
if is_param:
sub_mod._parameters[name] = materialize_state
else:
sub_mod._buffers[name] = materialize_state
if recurse:
for module in sub_mod.children():
init_fn(module, recurse=True)
# for debug
# if len(shard_states) == 0: print("clear")
return sub_mod
return init_fn
def fsdp_version(model):
if isinstance(model, FSDP):
return 1
elif isinstance(model, FSDPModule):
return 2
else:
return 0
def get_fsdp_state_ctx(model, state_type, state_cfg, optim_cfg):
if fsdp_version(model) == 1:
return FSDP.state_dict_type(model, state_type, state_cfg, optim_cfg)
else:
return nullcontext()
def get_fsdp_full_state_dict(model: torch.nn.Module, offload_to_cpu: bool = True, rank0_only: bool = True):
"""
Get the full state dict from an FSDP model.
Args:
model (torch.nn.Module): The FSDP model to get state dict from
offload_to_cpu (bool, optional): Whether to offload the state dict to CPU. Defaults to True.
rank0_only (bool, optional): Whether to only get state dict on rank 0. Defaults to True.
Returns:
dict: The full state dict of the model
Raises:
NotImplementedError: If the FSDP version is unknown
"""
if fsdp_version(model) == 1:
from torch.distributed.fsdp import FullStateDictConfig, StateDictType
state_dict_config = FullStateDictConfig(offload_to_cpu=offload_to_cpu, rank0_only=rank0_only)
with get_fsdp_state_ctx(
model, state_type=StateDictType.FULL_STATE_DICT, state_cfg=state_dict_config, optim_cfg=None
):
state_dict = model.state_dict()
return state_dict
elif fsdp_version(model) == 2:
from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict
state_dict_config = StateDictOptions(
full_state_dict=True, cpu_offload=offload_to_cpu, broadcast_from_rank0=not rank0_only
)
state_dict = get_model_state_dict(model, options=state_dict_config)
return state_dict
else:
raise NotImplementedError(f"Unknown FSDP version {fsdp_version}")
def fsdp2_load_full_state_dict(model: torch.nn.Module, full_state: dict, device_mesh=None, cpu_offload=None):
"""
Loads the full state dict (could be only on rank 0) into the sharded model. This is done by broadcasting the
parameters from rank 0 to all other ranks. This function modifies the model in-place.
Args:
model (`torch.nn.Module`): The model to load the state dict into
full_state (`dict`): The full state dict to load, can only be on rank 0
"""
if version.parse(torch.__version__) >= version.parse("2.7.0"):
from torch.distributed.checkpoint.state_dict import StateDictOptions, set_model_state_dict
else:
# official torch 2.6.0 set_model_state_dict API leads to OOM
# use torch 2.7.0 copy from verl/third_party/torch/distributed/checkpoint
from verl.third_party.torch.distributed.checkpoint.state_dict import StateDictOptions, set_model_state_dict
# To broadcast, it needs to be instantiated in the GPU.
if dist.get_rank() == 0:
model = model.to(device=get_device_id(), non_blocking=True)
else:
model = model.to_empty(device=get_device_id())
cpu_offload = cpu_offload is not None
options = StateDictOptions(full_state_dict=True, cpu_offload=cpu_offload, broadcast_from_rank0=True)
set_model_state_dict(model, full_state, options=options)
# rotary_emb is not in state_dict, so we need to broadcast it manually
for name, buf in model.named_buffers():
dist.broadcast(buf, src=0)
if cpu_offload:
model.to("cpu", non_blocking=True)
for buf in model.buffers():
buf.data = buf.data.to(get_device_id())
@contextmanager
def maybe_patch_fsdp_module(model):
if fully_shard_module is None:
yield
return
orig_fsdp_module = fully_shard_module.FSDPModule
class FSDPModuleABC(ABC, orig_fsdp_module):
pass
try:
if isinstance(model, ABC):
fully_shard_module.FSDPModule = FSDPModuleABC
yield
finally:
fully_shard_module.FSDPModule = orig_fsdp_module
def apply_fsdp2(model, fsdp_kwargs, config):
"""model: AutoModelForCausalLM"""
assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)"
default_transformer_cls_names_to_wrap = getattr(model, "_no_split_modules", None)
fsdp_transformer_layer_cls_to_wrap = config.get("wrap_policy", {}).get(
"transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap
)
if isinstance(fsdp_transformer_layer_cls_to_wrap, str):
fsdp_transformer_layer_cls_to_wrap = [fsdp_transformer_layer_cls_to_wrap]
assert len(fsdp_transformer_layer_cls_to_wrap) > 0 and fsdp_transformer_layer_cls_to_wrap[0] is not None
modules = []
for name, module in model.named_modules():
if module.__class__.__name__ in fsdp_transformer_layer_cls_to_wrap or (
isinstance(module, nn.Embedding) and not model.config.tie_word_embeddings
):
modules.append(module)
for idx, module in enumerate(modules):
# if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
# print(f"wrap module {module.__class__.__name__}")
with maybe_patch_fsdp_module(module):
fully_shard(module, **fsdp_kwargs)
# if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
# print(f"wrap module {model.__class__.__name__}")
with maybe_patch_fsdp_module(model):
fully_shard(model, **fsdp_kwargs) # fsdp2 will not reshard_after_forward for root module
def get_shard_placement_fn(fsdp_size):
"""Choose the dimension that can divide fsdp_size to avoid padding"""
def shard_placement_fn(param):
shape = list(param.shape)
for i in range(len(shape)):
if shape[i] % fsdp_size == 0:
return Shard(i)
return Shard(0)
return shard_placement_fn
def fsdp2_clip_grad_norm_(parameters, max_norm, norm_type=2.0, error_if_nonfinite=False, foreach=None):
"""torch.nn.utils.clip_grad_norm_ cann't run on cpu parameter DTensor"""
from torch.nn.utils.clip_grad import _clip_grads_with_norm_, _get_total_norm
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
else:
# prevent generators from being exhausted
parameters = list(parameters)
grads = [p.grad for p in parameters if p.grad is not None]
total_norm = _get_total_norm(grads, norm_type, error_if_nonfinite, foreach)
total_norm = total_norm.to(get_device_id(), non_blocking=True)
_clip_grads_with_norm_(parameters, max_norm, total_norm, foreach)
return total_norm
def layered_summon_lora_params(fsdp_module) -> OrderedDict:
from peft.utils.save_and_load import get_peft_model_state_dict
def __prefix_submodules(module, prefix):
for name, submodule in module.named_modules():
if name.startswith(prefix) and "." not in name[len(prefix) :]:
yield name, submodule
lora_params = OrderedDict()
prefix_list = [
# fsdp
"_fsdp_wrapped_module.base_model.model.",
"_fsdp_wrapped_module.base_model.model.model.",
"_fsdp_wrapped_module.base_model.model.model.layers.",
"_fsdp_wrapped_module.base_model.model.model.language_model.layers.",
# fsdp2
"base_model.model.",
"base_model.model.model.",
"base_model.model.model.layers.",
"base_model.model.model.language_model.layers.",
]
peft_model = getattr(fsdp_module, "_fsdp_wrapped_module", fsdp_module)
for prefix in prefix_list:
for name, submodule in __prefix_submodules(fsdp_module, prefix):
prefix = name.replace("_fsdp_wrapped_module.base_model.model.", "base_model.model.")
if name.endswith(".model") or name.endswith(".layers"):
continue
if fsdp_version(submodule) > 0:
with FSDP.summon_full_params(submodule, writeback=False):
sub_lora_params = get_peft_model_state_dict(peft_model, state_dict=submodule.state_dict())
sub_lora_params = {
f"{prefix}.{name}": param.full_tensor().detach().cpu()
if hasattr(param, "full_tensor")
else param.detach().cpu()
for name, param in sub_lora_params.items()
}
lora_params.update(sub_lora_params)
submodule._is_root = False
get_torch_device().empty_cache()
return lora_params
def collect_lora_params(module: FSDP, layered_summon: bool, base_sync_done: bool) -> OrderedDict:
"""
collect lora params or full params if base model is not ready in vllm
work with if isinstance(self.module._fsdp_wrapped_module, PeftModel)
"""
from peft.utils.save_and_load import get_peft_model_state_dict
lora_params = OrderedDict()
peft_model = getattr(module, "_fsdp_wrapped_module", module)
if fsdp_version(module) > 0:
if layered_summon:
if not base_sync_done:
raise ValueError(
"To use layered_summon, you must make sure base-model is preloaded in vllm, e.g. let "
"rollout.load_format=safetensors"
)
lora_params = layered_summon_lora_params(module)
else:
with FSDP.summon_full_params(module, writeback=False):
if base_sync_done:
lora_params = get_peft_model_state_dict(peft_model)
lora_params = {
name: param.full_tensor().detach().cpu()
if hasattr(param, "full_tensor")
else param.detach().cpu()
for name, param in lora_params.items()
}
else:
model = peft_model.base_model.model
orig_dev = "cpu" if "cpu" in str(next(model.parameters()).device) else get_device_name()
model = model.to("cpu")
for name, param in model.state_dict().items():
if any(x in name for x in ["_flat_param", "lora_"]):
continue
name = name.replace("_fsdp_wrapped_module.", "").replace(".base_layer", "")
lora_params[name] = (
param.full_tensor().detach().cpu()
if hasattr(param, "full_tensor")
else param.detach().cpu()
)
model = model.to(orig_dev)
get_torch_device().empty_cache()
else:
if base_sync_done:
lora_params = get_peft_model_state_dict(peft_model)
else:
model = peft_model.base_model.model
orig_dev = "cpu" if "cpu" in str(next(model.parameters()).device) else get_device_name()
model = model.to("cpu")
for name, param in model.state_dict().items():
if any(x in name for x in ["_flat_param", "lora_"]):
continue
name = name.replace("_fsdp_wrapped_module.", "").replace(".base_layer", "")
lora_params[name] = param.detach().cpu()
model = model.to(orig_dev)
return lora_params
def replace_lora_wrapper(k, peft_config):
"""Replace LoRA parameter keys with base layer equivalents.
Transforms LoRA parameter names to their corresponding base layer
names for proper weight loading in vLLM when base model sync is not done.
Args:
k (str): Original parameter key name.
Returns:
str: Transformed parameter key for base layer.
"""
stacked_params = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
if k.endswith(".weight"):
module_k = k[: -len(".weight")]
if check_exclude_modules(peft_config, module_k):
return k
elif any([module_k.endswith(s) for s in stacked_params]) or check_target_modules(peft_config, module_k):
return f"{module_k}.base_layer.weight"
if k.endswith(".bias"):
module_k = k[: -len(".bias")]
if check_exclude_modules(peft_config, module_k):
return k
elif any([module_k.endswith(s) for s in stacked_params]) or check_target_modules(peft_config, module_k):
return f"{module_k}.base_layer.bias"
return k
def set_reshard_after_forward(module: FSDPModule, reshard_after_forward: bool, recurse: bool = True) -> None:
"""
Sets if the module should reshard parameters after forward. This can be
used to change the ``reshard_after_forward`` FSDP arg at runtime. For
example, this can be used to set the FSDP root module's value to
``True`` (since it is otherwise specially set to ``False``), or it can
set an FSDP module's value to ``False`` for running evals and set back
to ``True`` for training.
Args:
reshard_after_forward (bool): Whether to reshard parameters after
forward.
recurse (bool): Whether to set for all FSDP submodules or just the
passed-in module.
---
Copied from https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/_fully_shard/_fully_shard.py to
address the absence of the set_reshard_after_forward function in torch versions earlier than 2.8.0.
"""
if not isinstance(reshard_after_forward, bool):
raise ValueError(f"reshard_after_forward should be a bool, got {type(reshard_after_forward)}")
self_module = cast(nn.Module, module)
modules = list(self_module.modules()) if recurse else [self_module]
for module in modules:
if isinstance(module, FSDPModule):
state = module._get_fsdp_state()
state._auto_reshard_after_forward = False
if fsdp_param_group := state._fsdp_param_group:
fsdp_param_group.post_forward_mesh_info = _get_post_forward_mesh_info(
reshard_after_forward, fsdp_param_group.mesh_info
)
def normalize_peft_param_name(params: dict) -> dict:
"""
Converts peft model parameter name to base parameter name
For example,
base_model.model.model.embed_tokens.weight -> model.embed_tokens.weight
base_model.model.model.layers.0.self_attn.q_proj.base_layer.weight -> model.layers.0.self_attn.q_proj.weight
and remove params such as base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight,
base_model.model.model.layers.0.self_attn.q_proj.lora_B.default.weight
"""
def _normalize_peft_name(name: str) -> str:
return name.replace("base_model.model.", "").replace("base_model.", "").replace(".base_layer", "")
def _is_lora_key(name: str) -> bool:
# catch typical PEFT keys
return ("lora_" in name) or (".adapter_" in name)
params = [(_normalize_peft_name(k), v) for k, v in params.items()]
# strip any residual LoRA tensors
params = {k: v for k, v in params if not _is_lora_key(k)}
return params
def _merge_or_unmerge_lora_(module, merge: bool):
"""Merge or unmerge LoRA adapters in a module.
Args:
module: The module containing LoRA layers
merge: If True, merge LoRA into base model; if False, unmerge LoRA
"""
from peft.tuners.lora import LoraLayer
with torch.no_grad():
for m in module.modules():
if isinstance(m, LoraLayer):
is_merged = getattr(m, "merged", False)
if merge and not is_merged:
m.merge()
elif (not merge) and is_merged:
m.unmerge()
# merged_adapters
def _clean_merged_lora_(module):
"""Cleans the merged lora adapters"""
from peft.tuners.lora import LoraLayer
with torch.no_grad():
for m in module.modules():
if isinstance(m, LoraLayer):
merged_adapters = getattr(m, "merged_adapters", False)
if merged_adapters:
m.merged_adapters = []
def fsdp_merge_unmerge(module: nn.Module, do_merge: bool):
"""Merge or unmerge LoRA adapters in FSDP module.
For FSDP (v1), it gathers all model parameters to each device, which may cause OOM.
For FSDP2, it gathers model parameters layer-by-layer to reduce memory footprint.
Args:
module: The FSDP module to merge/unmerge LoRA adapters
do_merge: If True, merge LoRA into base model; if False, unmerge LoRA
"""
version = fsdp_version(module)
assert version in [1, 2], f"fsdp_merge_unmerge requires FSDP module, got version {version}"
if version == 1:
# Unshard → merge → Reshard
with FSDP.summon_full_params(module, writeback=True, with_grads=False):
_merge_or_unmerge_lora_(module, merge=do_merge)
else:
# FSDP2: Unshard → merge → Reshard layer-by-layer
for name, submodule in module.named_modules():
if isinstance(submodule, FSDPModule) and name != "": # skip root model
with FSDP.summon_full_params(submodule, writeback=True, with_grads=False):
_merge_or_unmerge_lora_(submodule, merge=do_merge)
def backup_base_model_weights(module):
"""Backup base model weights to CPU with LoRA temporarily disabled.
This function temporarily disables LoRA adapters, backs up the clean base model weights
to CPU, then re-enables the adapters.
Args:
module: The PEFT model with LoRA adapters
Returns:
dict: Dictionary mapping parameter name to CPU tensor backup of base model weights
"""
from peft import PeftModel
backup = {}
with torch.no_grad():
# Check if module is a PEFT model
if isinstance(module, PeftModel):
# Temporarily disable adapters to get clean base model weights
with module.disable_adapter():
# Backup base model weights (excluding lora parameters)
for name, param in module.named_parameters():
if "lora" not in name.lower():
backup[name] = param.data.clone().cpu()
else:
# For non-PEFT models, just backup all parameters
for name, param in module.named_parameters():
backup[name] = param.data.clone().cpu()
return backup
def restore_base_model_weights(module, backup):
"""Restore base model weights from CPU backup.
This function restores the base model weights from the CPU backup, effectively
undoing any LoRA merge operations.
Args:
module: The PEFT model with LoRA adapters
backup: Dictionary mapping parameter name to CPU tensor backup of base model weights
"""
with torch.no_grad():
for name, param in module.named_parameters():
if name in backup:
param.data.copy_(backup[name].to(param.device))
@contextmanager
def merged_lora_context(actor, backup_adapters=False):
"""Context manager to temporarily merge LoRA adapters.
This context manager merges LoRA adapters into the base model weights,
performs operations (like syncing weights to vLLM), then restores the base model
weights from backup.
Args:
actor: The actor module with LoRA adapters to merge
backup_adapters: If True, backup base model weights (with LoRA disabled) before
merging and restore them after. This is more numerically stable than unmerging.
Yields:
None
"""
base_weights_backup = None
if backup_adapters:
# Backup base model weights with LoRA temporarily disabled
base_weights_backup = backup_base_model_weights(actor)
# Merge LoRA adapters into base model
fsdp_merge_unmerge(actor, do_merge=True)
try:
# Do work while merged (sync_to_vllm / generate / etc.)
yield
finally:
if backup_adapters and base_weights_backup is not None:
# Restore base model weights from CPU backup (effectively undoing the merge)
restore_base_model_weights(actor, base_weights_backup)
_clean_merged_lora_(actor)
else:
# Fall back to unmerge if no backup was made
fsdp_merge_unmerge(actor, do_merge=False)
|
verl__utils__groupwise.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Group-wise helpers for RL training utilities.
Public API:
- as_torch_index(index, device=None) -> torch.LongTensor
- group_mean_std(scores, gidx, eps=1e-6, device=None) -> (mean_g, std_g, count_g)
Default device policy:
- If `device` is None:
* In pytest (detected by env "PYTEST_CURRENT_TEST"): use CPU.
* Else if CUDA is available: use CUDA.
* Else: use CPU.
- You can override via env "VERL_FORCE_DEVICE" (e.g., "cuda:0" / "cpu").
Notes:
- as_torch_index: canonicalizes arbitrary group labels to a contiguous 1-D torch.long
tensor in range [0..G-1]. Robust to torch/numpy/list/tuple, ints/floats/bools,
numeric strings, UUIDs, mixed object arrays. Near-integer floats (|x-round(x)|<=1e-6)
are rounded; otherwise factorization is applied.
- group_mean_std: pure-PyTorch per-group mean/std with Bessel correction for variance
(denominator max(count-1, 1)). Singleton groups fallback to mean=0, std=1 for
compatibility with common “native” conventions.
"""
from __future__ import annotations
import os
from typing import Any, Optional
import numpy as np
import torch
from verl.utils.device import get_device_name
__all__ = ["as_torch_index", "group_mean_std"]
def _resolve_device(explicit: Optional[torch.device | str]) -> torch.device:
"""
Resolve device according to policy described in the module docstring.
Priority:
1) explicit argument
2) VERL_FORCE_DEVICE env
3) pytest detection -> cpu
4) cuda if available, else cpu
"""
if explicit is not None:
return torch.device(explicit)
forced = os.getenv("VERL_FORCE_DEVICE")
if forced:
return torch.device(forced)
# Heuristic: pytest sets PYTEST_CURRENT_TEST
if "PYTEST_CURRENT_TEST" in os.environ:
return torch.device("cpu")
return torch.device(get_device_name())
def _to_1d_numpy_object_array(x: Any) -> np.ndarray:
"""Best-effort: convert arbitrary input into a 1-D numpy array; fallback to object dtype."""
try:
arr = np.asarray(x)
except Exception:
try:
arr = np.array(list(x), dtype=object)
except Exception:
arr = np.array([x], dtype=object)
if arr.ndim != 1:
arr = arr.reshape(-1)
return arr
def as_torch_index(index: Any, device: torch.device | str | None = None) -> torch.Tensor:
"""
Convert arbitrary group labels to a contiguous 1-D torch.long tensor (0..G-1).
Args:
index: Any iterable of labels or tensor/ndarray.
device: Target device; if None, resolved via _resolve_device().
Returns:
torch.LongTensor with shape (N,)
"""
target = _resolve_device(device)
# ---------- Fast path: torch.Tensor ----------
if isinstance(index, torch.Tensor):
t = index.reshape(-1)
if t.dtype in (
torch.int64,
torch.int32,
torch.int16,
torch.int8,
getattr(torch, "uint8", torch.uint8),
torch.bool,
):
return t.to(device=target, dtype=torch.long)
if t.dtype in (torch.float16, torch.float32, torch.float64, torch.bfloat16):
t64 = t.to(dtype=torch.float64)
rounded = torch.round(t64)
if torch.allclose(t64, rounded, rtol=0.0, atol=1e-6):
return rounded.to(device=target, dtype=torch.long)
arr = np.array([str(x.item()) for x in t], dtype=object)
else:
arr = np.array([str(x.item()) if hasattr(x, "item") else str(x) for x in t], dtype=object)
else:
# ---------- Non-torch: go through numpy ----------
arr = _to_1d_numpy_object_array(index)
# Pure integers (incl. bool)
if arr.dtype != object and np.issubdtype(arr.dtype, np.integer):
return torch.from_numpy(arr.astype(np.int64, copy=False)).to(device=target)
# Floats nearly equal to integers
if arr.dtype != object and np.issubdtype(arr.dtype, np.floating):
arr64 = arr.astype(np.float64, copy=False)
rounded = np.rint(arr64)
if np.allclose(arr64, rounded, rtol=0.0, atol=1e-6):
return torch.from_numpy(rounded.astype(np.int64)).to(device=target)
# fall through
# Try numeric string coercion
try:
coerced = arr.astype(np.int64)
return torch.from_numpy(coerced).to(device=target)
except Exception:
pass
if arr.dtype != object:
arr = arr.astype(object)
# ---------- Factorization (UUIDs / mixed types / arbitrary labels) ----------
try:
_, inv = np.unique(arr, return_inverse=True)
except Exception:
sarr = np.array([str(x) for x in arr], dtype=object)
_, inv = np.unique(sarr, return_inverse=True)
inv = inv.astype(np.int64, copy=False)
return torch.from_numpy(inv).to(device=target)
@torch.no_grad()
def group_mean_std(
scores: torch.Tensor,
gidx: torch.Tensor,
eps: float = 1e-6,
device: torch.device | str | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Compute per-group mean/std/count in pure PyTorch.
mean_g = sum / count
std_g = sqrt( max( (sum2 - sum^2/count) / max(count-1, 1), eps ) )
Singleton groups fallback to mean=0, std=1.
Args:
scores: (N,) float tensor.
gidx : (N,) long/int tensor with group indices (0..G-1).
eps : Numerical floor for variance.
device: Target device; if None, resolved via _resolve_device().
Returns:
mean_g: (G,) float32
std_g : (G,) float32
count : (G,) float32
"""
target = _resolve_device(device)
scores = scores.reshape(-1).to(device=target, dtype=torch.float32)
gidx = gidx.reshape(-1).to(device=target, dtype=torch.long)
if scores.numel() != gidx.numel():
raise ValueError(f"scores and gidx length mismatch: {scores.numel()} vs {gidx.numel()}")
G = int(torch.max(gidx).item()) + 1 if gidx.numel() > 0 else 0
if G == 0:
# Return empty tensors on the selected device
empty = torch.empty(0, device=target, dtype=torch.float32)
return empty, empty, empty
ones = torch.ones_like(scores, dtype=torch.float32)
count = torch.zeros(G, device=target, dtype=torch.float32).index_add_(0, gidx, ones)
s1 = torch.zeros(G, device=target, dtype=torch.float32).index_add_(0, gidx, scores)
s2 = torch.zeros(G, device=target, dtype=torch.float32).index_add_(0, gidx, scores * scores)
mean = s1 / count.clamp_min(1.0)
var_num = s2 - (s1 * s1) / count.clamp_min(1.0)
denom = (count - 1.0).clamp_min(1.0)
var = var_num / denom
std = torch.sqrt(torch.clamp(var, min=eps))
# Singleton groups: mean=0, std=1
single = count <= 1.0
if torch.any(single):
mean = mean.clone()
std = std.clone()
mean[single] = 0.0
std[single] = 1.0
return mean, std, count
|
verl__utils__hdfs_io.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN"))
_HDFS_PREFIX = "hdfs://"
_HDFS_BIN_PATH = shutil.which("hdfs")
def exists(path: str, **kwargs) -> bool:
r"""Works like os.path.exists() but supports hdfs.
Test whether a path exists. Returns False for broken symbolic links.
Args:
path (str): path to test
Returns:
bool: True if the path exists, False otherwise
"""
if _is_non_local(path):
return _exists(path, **kwargs)
return os.path.exists(path)
def _exists(file_path: str):
"""hdfs capable to check whether a file_path is exists"""
if file_path.startswith("hdfs"):
return _run_cmd(_hdfs_cmd(f"-test -e {file_path}")) == 0
return os.path.exists(file_path)
def makedirs(name, mode=0o777, exist_ok=False, **kwargs) -> None:
r"""Works like os.makedirs() but supports hdfs.
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
Args:
name (str): directory to create
mode (int): file mode bits
exist_ok (bool): if True, do not raise an exception if the directory already exists
kwargs: keyword arguments for hdfs
"""
if _is_non_local(name):
# TODO(haibin.lin):
# - handle OSError for hdfs(?)
# - support exist_ok for hdfs(?)
_mkdir(name, **kwargs)
else:
os.makedirs(name, mode=mode, exist_ok=exist_ok)
def _mkdir(file_path: str) -> bool:
"""hdfs mkdir"""
if file_path.startswith("hdfs"):
_run_cmd(_hdfs_cmd(f"-mkdir -p {file_path}"))
else:
os.makedirs(file_path, exist_ok=True)
return True
def copy(src: str, dst: str, **kwargs) -> bool:
r"""Works like shutil.copy() for file, and shutil.copytree for dir, and supports hdfs.
Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If source and destination are the same file, a SameFileError will be
raised.
Arg:
src (str): source file path
dst (str): destination file path
kwargs: keyword arguments for hdfs copy
Returns:
str: destination file path
"""
if _is_non_local(src) or _is_non_local(dst):
# TODO(haibin.lin):
# - handle SameFileError for hdfs files(?)
# - return file destination for hdfs files
return _copy(src, dst)
else:
if os.path.isdir(src):
return shutil.copytree(src, dst, **kwargs)
else:
return shutil.copy(src, dst, **kwargs)
def _copy(from_path: str, to_path: str, timeout: int = None) -> bool:
if to_path.startswith("hdfs"):
if from_path.startswith("hdfs"):
returncode = _run_cmd(_hdfs_cmd(f"-cp -f {from_path} {to_path}"), timeout=timeout)
else:
returncode = _run_cmd(_hdfs_cmd(f"-put -f {from_path} {to_path}"), timeout=timeout)
else:
if from_path.startswith("hdfs"):
returncode = _run_cmd(
_hdfs_cmd(
f"-get \
{from_path} {to_path}"
),
timeout=timeout,
)
else:
try:
shutil.copy(from_path, to_path)
returncode = 0
except shutil.SameFileError:
returncode = 0
except Exception as e:
logger.warning(f"copy {from_path} {to_path} failed: {e}")
returncode = -1
return returncode == 0
def _run_cmd(cmd: str, timeout=None):
return os.system(cmd)
def _hdfs_cmd(cmd: str) -> str:
return f"{_HDFS_BIN_PATH} dfs {cmd}"
def _is_non_local(path: str):
return path.startswith(_HDFS_PREFIX)
|
verl__utils__kernel__fp8_kernel.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
logger = logging.getLogger(__name__)
# Check if Triton is available
_TRITON_AVAILABLE = False
try:
import triton
import triton.language as tl
_TRITON_AVAILABLE = True
except ImportError:
logger.debug("Triton not available, FP8 Triton kernels will not be used")
# Environment variable to control Triton FP8 usage (set to "1" to disable)
_DISABLE_TRITON_FP8 = os.environ.get("VERL_DISABLE_TRITON_FP8", "0").lower() in ("1", "true", "yes")
# FP8 constants
FP8_DTYPE = torch.float8_e4m3fn
FP8_MAX = torch.finfo(FP8_DTYPE).max
FP8_MIN = -FP8_MAX
def ceil_div(x: int, y: int) -> int:
"""Perform ceiling division of two integers."""
return (x + y - 1) // y
def is_triton_available() -> bool:
"""Check if Triton is available for FP8 kernels."""
return _TRITON_AVAILABLE
if _TRITON_AVAILABLE:
@triton.jit
def _blockwise_cast_to_fp8_kernel(
X,
Y,
S,
stride_xm,
stride_xn,
stride_ym,
stride_yn,
stride_sm,
stride_sn,
M,
N,
eps,
fp8_min,
fp8_max,
BLOCK_M: tl.constexpr = 128,
BLOCK_N: tl.constexpr = 128,
):
"""Triton kernel for blockwise FP8 quantization.
Each program instance handles one block of size (BLOCK_M, BLOCK_N).
Computes per-block scale and quantizes to FP8 in a single pass.
Refer to https://github.com/THUDM/slime/blob/main/slime/backends/megatron_utils/kernels/fp8_kernel.py
"""
pid_m = tl.cast(tl.program_id(axis=0), tl.int64)
pid_n = tl.cast(tl.program_id(axis=1), tl.int64)
# Compute block offsets
off_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# Create masks for boundary handling
mask_m = off_m < M
mask_n = off_n < N
mask = mask_m[:, None] & mask_n[None, :]
# Load input block and convert to float32 for precision
x = tl.load(X + off_m[:, None] * stride_xm + off_n[None, :] * stride_xn, mask=mask, other=0.0).to(tl.float32)
# Compute block-wise absolute maximum with epsilon for numerical stability
_absmax = tl.maximum(tl.max(tl.abs(x)), eps)
# Compute scale: scale = absmax / fp8_max
x_s = _absmax / fp8_max
# Compute inverse scale for quantization
s_inv = 1.0 / x_s
# Quantize: clamp(x * s_inv, fp8_min, fp8_max)
y_q = tl.clamp(x * s_inv, fp8_min, fp8_max).to(Y.dtype.element_ty)
# Store quantized values and scale
tl.store(Y + off_m[:, None] * stride_ym + off_n[None, :] * stride_yn, y_q, mask=mask)
tl.store(S + pid_m * stride_sm + pid_n * stride_sn, x_s)
def blockwise_cast_to_fp8_triton(
x: torch.Tensor,
weight_block_size: list[int] | tuple[int, int] | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Quantize a 2D tensor to FP8 using blockwise quantization with Triton.
This function provides high-performance FP8 quantization with minimal memory overhead.
All computations (abs, max, scale, clamp) are performed in a single Triton kernel,
eliminating intermediate tensor allocations.
Args:
x: Input tensor of shape (M, N), must be 2D.
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Defaults to [128, 128] if None.
Returns:
Tuple of (quantized_tensor, scale_tensor):
- quantized_tensor: FP8 quantized tensor of shape (M, N)
- scale_tensor: Per-block scale factors of shape (ceil(M/BLOCK_M), ceil(N/BLOCK_N))
This is the inverse scale (multiply to dequantize).
"""
assert x.dim() == 2, f"Expected 2D tensor, got {x.dim()}D"
# Default block size
BLOCK_M, BLOCK_N = 128, 128
if weight_block_size is not None:
BLOCK_M, BLOCK_N = weight_block_size[0], weight_block_size[1]
M, N = x.shape
# Pre-allocate output tensors (only memory allocation in this function)
y = torch.empty(M, N, device=x.device, dtype=FP8_DTYPE)
s = torch.empty(ceil_div(M, BLOCK_M), ceil_div(N, BLOCK_N), dtype=torch.float32, device=x.device)
# Grid: one program per block
def grid(meta):
return (triton.cdiv(M, meta["BLOCK_M"]), triton.cdiv(N, meta["BLOCK_N"]))
# Tune kernel parameters based on memory layout
if x.is_contiguous():
kwargs = {"BLOCK_M": BLOCK_M, "BLOCK_N": BLOCK_N, "num_warps": 8, "num_stages": 2}
else:
kwargs = {"BLOCK_M": BLOCK_M, "BLOCK_N": BLOCK_N, "num_warps": 1, "num_stages": 4}
# Launch kernel
_blockwise_cast_to_fp8_kernel[grid](
x,
y,
s,
*x.stride(),
*y.stride(),
*s.stride(),
M,
N,
1e-10, # eps for numerical stability
FP8_MIN,
FP8_MAX,
**kwargs,
)
return y, s
def scaled_fp8_blockwise_triton(
data_hp: torch.Tensor,
weight_block_size: list[int] | tuple[int, int],
) -> tuple[torch.Tensor, torch.Tensor]:
"""High-performance FP8 blockwise quantization using Triton kernel.
This is the recommended function to use for FP8 quantization when Triton is available.
It handles padding automatically and returns results in the expected format.
Args:
data_hp: Input high-precision tensor of shape (M, N).
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Returns:
Tuple of (fp8_data, descale):
- fp8_data: FP8 quantized tensor of original shape
- descale: Per-block descale factors (inverse of scale, for dequantization)
Raises:
RuntimeError: If Triton is not available.
"""
if not _TRITON_AVAILABLE:
raise RuntimeError("Triton is required for scaled_fp8_blockwise_triton but is not available")
block_size0 = weight_block_size[0]
block_size1 = weight_block_size[1]
# Save original shape for potential cropping
original_shape = data_hp.shape
# Pad dimensions to be multiples of block size if needed
pad_dim0 = (block_size0 - data_hp.shape[0] % block_size0) % block_size0
pad_dim1 = (block_size1 - data_hp.shape[1] % block_size1) % block_size1
if pad_dim0 > 0 or pad_dim1 > 0:
logger.debug(
f"Padding weight from {data_hp.shape} to "
f"({data_hp.shape[0] + pad_dim0}, {data_hp.shape[1] + pad_dim1}) "
f"for blockwise FP8 quantization"
)
data_hp = torch.nn.functional.pad(data_hp, (0, pad_dim1, 0, pad_dim0), mode="constant", value=0)
# Call Triton kernel
fp_data, scale = blockwise_cast_to_fp8_triton(data_hp, weight_block_size)
# Remove padding to restore original shape
if pad_dim0 > 0 or pad_dim1 > 0:
fp_data = fp_data[: original_shape[0], : original_shape[1]].contiguous()
# Return scale as descale (the Triton kernel returns scale, we need to return it as-is
# since it's already the inverse scale format expected by vLLM/SGLang)
return fp_data, scale
def _scaled_fp8_blockwise_pytorch(
data_hp: torch.Tensor,
weight_block_size: list[int] | tuple[int, int],
) -> tuple[torch.Tensor, torch.Tensor]:
"""PyTorch implementation of blockwise FP8 quantization.
Memory-optimized implementation that:
- Uses in-place operations where possible
- Explicitly deletes intermediate tensors
- Minimizes peak memory usage during quantization
Args:
data_hp: Input high-precision tensor of shape (M, N).
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Returns:
Tuple of (fp8_data, descale):
- fp8_data: FP8 quantized tensor
- descale: Per-block descale factors for dequantization
"""
block_size0 = weight_block_size[0]
block_size1 = weight_block_size[1]
assert block_size0 == block_size1, "Block sizes must be equal"
# Save unpadded shape for later cropping
original_shape = data_hp.shape
# Pad dimensions to be multiples of block size if needed
pad_dim0 = (block_size0 - data_hp.shape[0] % block_size0) % block_size0
pad_dim1 = (block_size1 - data_hp.shape[1] % block_size1) % block_size1
if pad_dim0 > 0 or pad_dim1 > 0:
logger.debug(
f"Padding weight from {data_hp.shape} to "
f"({data_hp.shape[0] + pad_dim0}, {data_hp.shape[1] + pad_dim1}) "
f"for blockwise FP8 quantization"
)
data_hp = torch.nn.functional.pad(data_hp, (0, pad_dim1, 0, pad_dim0), mode="constant", value=0)
# FP8
max_dtype = FP8_MAX
padded_shape = data_hp.shape
blk_m, blk_n = data_hp.shape[0] // block_size0, data_hp.shape[1] // block_size1
# Reshape and permute - these are views, no memory allocation
data_hp = data_hp.reshape(blk_m, block_size0, blk_n, block_size1)
data_hp = data_hp.permute(0, 2, 1, 3).contiguous()
# Flatten to (BLK_M, BLK_N, BLOCK_SIZE_M * BLOCK_SIZE_N) in float32 for precision
data_hp = data_hp.to(torch.float32).flatten(start_dim=2)
# Calculate max absolute value per block - use fused abs+amax
max_abs = data_hp.abs().amax(dim=-1, keepdim=True)
# Compute scale in-place where possible
scale_fp = torch.empty_like(max_abs)
torch.div(max_dtype, max_abs, out=scale_fp)
# Handle edge cases: zero and inf
scale_fp = torch.where(max_abs == 0, torch.ones_like(scale_fp), scale_fp)
scale_fp = torch.where(max_abs == torch.inf, torch.ones_like(scale_fp), scale_fp)
del max_abs # Free max_abs memory
# Compute descale before modifying data
descale_fp = torch.reciprocal(scale_fp)
# Scale and clamp in a memory-efficient way
data_hp.mul_(scale_fp)
del scale_fp # Free scale memory
data_hp.clamp_(min=-max_dtype, max=max_dtype)
# Convert to FP8
fp_data = data_hp.to(FP8_DTYPE)
del data_hp # Free float32 data
# Reshape back to original layout
fp_data = fp_data.reshape(blk_m, blk_n, block_size0, block_size1).permute(0, 2, 1, 3).reshape(padded_shape)
# Remove padding to restore original shape
if original_shape[0] != padded_shape[0] or original_shape[1] != padded_shape[1]:
fp_data = fp_data[: original_shape[0], : original_shape[1]].contiguous()
return fp_data, descale_fp
def scaled_fp8_blockwise(
data_hp: torch.Tensor,
weight_block_size: list[int] | tuple[int, int],
) -> tuple[torch.Tensor, torch.Tensor]:
"""Cast tensor from high precision to FP8 with blockwise quantization.
This function automatically selects the best available implementation:
1. Triton kernel (if available): Highest performance, minimal memory overhead
2. PyTorch fallback: Memory-optimized implementation using in-place operations
To disable Triton and force PyTorch fallback, set environment variable:
VERL_DISABLE_TRITON_FP8=1
Args:
data_hp: Input tensor of shape (M, N) in high precision (bf16/fp16/fp32).
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Returns:
Tuple of (fp8_data, descale):
- fp8_data: FP8 quantized tensor
- descale: Per-block descale factors for dequantization
"""
assert len(data_hp.shape) == 2, "Only 2d input tensor is supported"
# Use Triton kernel if available and not disabled
if _TRITON_AVAILABLE and not _DISABLE_TRITON_FP8:
return scaled_fp8_blockwise_triton(data_hp, weight_block_size)
# PyTorch fallback implementation (memory-optimized)
return _scaled_fp8_blockwise_pytorch(data_hp, weight_block_size)
|
verl__utils__kernel__kernels.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementations of the linear cross entropy with token entropy kernel.
"""
import typing
from dataclasses import dataclass
import torch
import torch.distributed as dist
from verl.utils.device import get_device_capability, get_device_name, is_cuda_available
try:
import triton
import triton.language as tl
HAVE_TRITON = True
SUPPORT_CUDA_TMA = is_cuda_available and get_device_capability()[0] >= 9 and hasattr(tl, "make_tensor_descriptor")
except ImportError:
HAVE_TRITON = False
SUPPORT_CUDA_TMA = False
from verl.utils.device import get_torch_device
if not HAVE_TRITON:
from contextlib import contextmanager
from unittest.mock import MagicMock
@contextmanager
def null_decorator(*args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and callable(args[0]):
return args[0]
else:
def inner(func):
return func
return inner
triton = MagicMock()
triton.jit = null_decorator
triton.autotune = null_decorator
tl = MagicMock()
elif SUPPORT_CUDA_TMA:
# TMA descriptors require a global memory allocation
def alloc_fn(size: int, alignment: int, stream: typing.Optional[int]):
return torch.empty(size, device=get_device_name(), dtype=torch.int8)
# https://github.com/triton-lang/triton/commit/43625fc968b693ab51884ca95adbcf3e43483fd0
# Triton 3.5.0 stores allocators in ContextVar; values do not propagate to new
# threads by default. Some execution paths in verl use thread pools (e.g.,
# concurrent.futures), so we set a ContextVar *default* to avoid falling
# back to NullAllocator in worker threads.
try:
import contextvars
import triton.runtime._allocation as _triton_allocation
if isinstance(getattr(_triton_allocation, "_allocator", None), contextvars.ContextVar):
_triton_allocation._allocator = contextvars.ContextVar(
_triton_allocation._allocator.name,
default=alloc_fn,
)
except (ImportError, AttributeError):
pass
triton.set_allocator(alloc_fn)
@dataclass
class EntropyReductionEnum:
"""
Enum for the reduction method of cross entropy.
"""
_None = 0
_Sum = 1
_Mean = 2
def get_entropy_reduction_enum_number(reduction: str) -> int:
"""
Get the enum number for the reduction method of cross entropy.
"""
_enum = EntropyReductionEnum._None
if reduction == "none":
_enum = EntropyReductionEnum._None
elif reduction == "sum":
_enum = EntropyReductionEnum._Sum
elif reduction == "mean":
_enum = EntropyReductionEnum._Mean
else:
raise ValueError(f"Invalid reduction: {reduction}")
return _enum
def get_entropy_reduction_enum(ce_reduction: int) -> EntropyReductionEnum:
"""
Get the enum for the reduction method of cross entropy.
"""
_enum = EntropyReductionEnum._None
if ce_reduction == 0:
_enum = EntropyReductionEnum._None
elif ce_reduction == 1:
_enum = EntropyReductionEnum._Sum
elif ce_reduction == 2:
_enum = EntropyReductionEnum._Mean
else:
raise ValueError(f"Invalid ce_reduction: {ce_reduction}")
return _enum
@dataclass
class BackwardEnum:
"""
Enum for the backward method.
"""
_Total_Fuse_MN = (
0 # Fuse d_logits & d_hidden & d_weight, no intermediate storage, requires fp32 for d_hidden & d_weight
)
_Total_Separate = 1 # Store d_logits, no special requirements for d_hidden & d_weight
_Split_Dlogits_N = 2 # split d_logits along its N dimension, aka. vocab_size
_Split_Dlogits_M = 3 # split d_logits along its M dimension, aka. num_tokens
@dataclass
class Config:
"""Configuration for efficient entropy kernel operations.
Args:
_backward (BackwardEnum): Backward computation method. Defaults to BackwardEnum._Split_Dlogits_N.
_use_triton (bool): Whether to use Triton kernels for computation. Defaults to True.
"""
_backward: BackwardEnum = BackwardEnum._Split_Dlogits_N
_use_triton: bool = True
_config = Config()
def set_backward_method(backward_method: BackwardEnum):
"""
Set the backward method.
"""
global _config
_config._backward = backward_method
@triton.autotune(
configs=[triton.Config({"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 256, "BLOCK_SIZE_K": 32}, num_stages=3, num_warps=8)],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_kernel_general_mainloop(
rank,
hidden_ptr,
weight_ptr,
labels_ptr,
num_tokens,
hidden_size,
vocab_size,
vocab_per_split,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
max_ptr,
stride_max_m: tl.int64,
stride_max_n: tl.int64,
accu_ptr,
stride_accu_m: tl.int64,
stride_accu_n: tl.int64,
entropy_b_ptr,
stride_entropy_b_m: tl.int64,
stride_entropy_b_n: tl.int64,
global_logprobs_ptr,
stride_global_logprobs: tl.int64,
global_logprobs_scalar_ptr,
rcp_temperature: tl.float32,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
USE_TMA: tl.constexpr,
):
"""
forward mainloop
"""
pid = tl.program_id(axis=0)
num_splits = (vocab_size + vocab_per_split - 1) // vocab_per_split
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(vocab_per_split, BLOCK_SIZE_N)
pid_m = pid % num_pid_m
pid_n = pid // num_pid_m
if pid_m == 0 and pid_n == 0:
tl.store(global_logprobs_scalar_ptr, 0.0)
# create pointers for the first blocks of hidden
start_offs_am = pid_m * BLOCK_SIZE_M
offs_am = start_offs_am + tl.arange(0, BLOCK_SIZE_M)
offs_k = tl.arange(0, BLOCK_SIZE_K)
if USE_TMA:
# using TMA and device-side descriptor creation
hidden_desc = tl.make_tensor_descriptor(
hidden_ptr,
shape=[num_tokens, hidden_size],
strides=[stride_hidden_m, 1],
block_shape=[BLOCK_SIZE_M, BLOCK_SIZE_K],
)
weight_desc = tl.make_tensor_descriptor(
weight_ptr,
shape=[vocab_size, hidden_size],
strides=[stride_weight_n, 1],
block_shape=[BLOCK_SIZE_N, BLOCK_SIZE_K],
)
else:
hidden_ptrs = hidden_ptr + (offs_am[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
# load labels for this block
labels = tl.load(labels_ptr + offs_am, mask=offs_am < num_tokens)
# traverse over N dimension
# _max = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
_max = tl.full((BLOCK_SIZE_M,), -float("inf"), dtype=tl.float32)
_accu = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
_entropy_b = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
_logprobs = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
for n in range(0, num_pid_n):
start_offs_bn = pid_n * vocab_per_split + n * BLOCK_SIZE_N
offs_bn = start_offs_bn + tl.arange(0, BLOCK_SIZE_N)
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
if not USE_TMA:
# weight_ptrs = weight_ptr + (offs_k[:, None] * stride_weight_k + offs_bn[None, :] * stride_weight_n)
weight_ptrs = weight_ptr + (offs_bn[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
# iterate over K dimension
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
if USE_TMA:
# load the next block of hidden and weight
start_offs_k = k * BLOCK_SIZE_K
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
# load the next block of hidden and weight
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K)
& (offs_bn[:, None] < (min((pid_n + 1) * vocab_per_split, vocab_size))),
other=0.0,
)
# advance the ptrs to the next K block
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
# GEMM
logits = tl.dot(_hidden, _weight.trans(), logits)
if not USE_TMA:
# reset hidden_ptrs for next iteration
hidden_ptrs -= hidden_size * stride_hidden_k
# scale logits by temperature
logits *= rcp_temperature
# update global maximum
_max_old = _max
m_pid_n = tl.max(logits, axis=1)
_max = tl.maximum(_max_old, m_pid_n)
exp_logits = tl.exp(logits - _max[:, None])
coeff = tl.exp(_max_old - _max)
_accu = coeff * _accu + tl.sum(exp_logits, axis=1)
_entropy_b = _entropy_b * coeff + tl.sum(logits * exp_logits, axis=1)
label_mask = (offs_bn + rank * vocab_size)[None, :] == labels[:, None]
_logprobs += tl.sum(logits * label_mask, axis=1)
# store maximum
offs_max_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_max_n = pid_n
maximum_ptrs = max_ptr + offs_max_n * stride_max_n + offs_max_m * stride_max_m
tl.store(maximum_ptrs, _max, mask=(offs_max_m < num_tokens) & (offs_max_n < num_splits))
# store entropy
accu_ptrs = accu_ptr + offs_max_n * stride_accu_n + offs_max_m * stride_accu_m
tl.store(accu_ptrs, _accu, mask=(offs_max_m < num_tokens) & (offs_max_n[None] < num_splits))
entropy_b_ptrs = entropy_b_ptr + offs_max_n * stride_entropy_b_n + offs_max_m * stride_entropy_b_m
tl.store(entropy_b_ptrs, _entropy_b, mask=(offs_max_m < num_tokens) & (offs_max_n < num_splits))
# store logprobs
vocab_left_idx = pid_n * vocab_per_split + rank * vocab_size
vocab_right_idx = min((pid_n + 1) * vocab_per_split, vocab_size) + rank * vocab_size
mask = (labels >= vocab_left_idx) & (labels < vocab_right_idx)
mask &= offs_am < num_tokens
global_logprobs_ptrs = global_logprobs_ptr + offs_am * stride_global_logprobs
# tl.atomic_add(global_logprobs_ptrs, _logprobs, mask=mask)
tl.store(global_logprobs_ptrs, _logprobs, mask=mask)
@triton.autotune(configs=[triton.Config({"BLOCK_SIZE_M": 16, "BLOCK_SIZE_N": 64})], key=["num_tokens", "num_splits"])
@triton.jit
def efficient_entropy_triton_kernel_epilogue(
max_ptr,
stride_max_m: tl.int64,
stride_max_n: tl.int64,
num_tokens,
num_splits,
global_max_ptr,
stride_global_max: tl.int64,
accu_ptr,
stride_accu_m: tl.int64,
stride_accu_n: tl.int64,
global_accu_ptr,
stride_global_accu: tl.int64,
entropy_b_ptr,
stride_entropy_b_m: tl.int64,
stride_entropy_b_n: tl.int64,
global_entropy_b_ptr,
stride_global_entropy_b: tl.int64,
global_entropy_ptr,
stride_global_entropy: tl.int64,
global_logprobs_ptr,
stride_global_logprobs: tl.int64,
global_logprobs_scalar_ptr,
reduction: int,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
):
"""
foward epilogue
"""
pid_m = tl.program_id(axis=0)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
global_max = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
global_accu = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
global_entropy_b = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
for pid_n in range(0, tl.cdiv(num_splits, BLOCK_SIZE_N)):
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
max_ptrs = max_ptr + offs_m[:, None] * stride_max_m + offs_n[None, :] * stride_max_n
_max = tl.load(max_ptrs, mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits), other=0.0)
accu_ptrs = accu_ptr + offs_m[:, None] * stride_accu_m + offs_n[None, :] * stride_accu_n
_accu = tl.load(accu_ptrs, mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits), other=0.0)
entropy_b_ptrs = entropy_b_ptr + offs_m[:, None] * stride_entropy_b_m + offs_n[None, :] * stride_entropy_b_n
_entropy_b = tl.load(
entropy_b_ptrs, mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits), other=0.0
)
# local reduction
_max_old = global_max
_local_max = tl.max(_max, axis=1)
global_max = tl.maximum(global_max, _local_max)
_scale = tl.exp(_max - global_max[:, None])
_coeff = tl.exp(_max_old - global_max)
global_accu = _coeff * global_accu + tl.sum(_scale * _accu, axis=1)
global_entropy_b = _coeff * global_entropy_b + tl.sum(_scale * _entropy_b, axis=1)
# store
maximum_ptrs = global_max_ptr + offs_m * stride_global_max
tl.store(maximum_ptrs, global_max, mask=offs_m < num_tokens)
# store entropy_b
global_entropy_b = tl.fdiv(global_entropy_b, global_accu) # entropy_b
tl.store(global_entropy_b_ptr + offs_m * stride_global_entropy_b, global_entropy_b, mask=offs_m < num_tokens)
# store entropy
global_accu_ptrs = global_accu_ptr + offs_m * stride_global_accu
tl.store(global_accu_ptrs, global_accu, mask=offs_m < num_tokens)
global_entropy = tl.log(global_accu) + global_max - global_entropy_b # entropy_a
global_entropy_ptrs = global_entropy_ptr + offs_m * stride_global_entropy
tl.store(global_entropy_ptrs, global_entropy, mask=offs_m < num_tokens)
# update logprobs
global_logprobs_ptrs = global_logprobs_ptr + offs_m * stride_global_logprobs
global_logprobs = tl.load(global_logprobs_ptrs, mask=offs_m < num_tokens)
global_logprobs = global_max + tl.log(global_accu) - global_logprobs
global_logprobs = -1 * global_logprobs
if reduction == 0:
tl.store(global_logprobs_ptrs, global_logprobs, mask=offs_m < num_tokens)
elif reduction == 1:
global_logprobs_scalar = tl.sum(global_logprobs, axis=0)
tl.atomic_add(global_logprobs_scalar_ptr, global_logprobs_scalar)
elif reduction == 2:
global_logprobs_scalar = tl.sum(global_logprobs, axis=0) / num_tokens.to(tl.float32)
tl.atomic_add(global_logprobs_scalar_ptr, global_logprobs_scalar)
@triton.autotune(configs=[triton.Config({"BLOCK_SIZE_M": 16, "BLOCK_SIZE_N": 64})], key=["num_tokens", "num_splits"])
@triton.jit
def efficient_entropy_triton_kernel_epilogue_tp(
num_tokens,
num_splits,
reduced_max_ptr,
stride_reduced_max_m: tl.int64,
stride_reduced_max_n: tl.int64,
original_max_ptr,
stride_original_max_m: tl.int64,
stride_original_max_n: tl.int64,
accu_ptr,
stride_accu_m: tl.int64,
stride_accu_n: tl.int64,
entropy_b_ptr,
stride_entropy_b_m: tl.int64,
stride_entropy_b_n: tl.int64,
global_max_ptr,
stride_global_max: tl.int64,
global_accu_ptr,
stride_global_accu: tl.int64,
global_entropy_b_ptr,
stride_global_entropy_b: tl.int64,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
):
pid_m = tl.program_id(axis=0)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
global_max = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
global_accu = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
global_entropy_b = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
for pid_n in range(0, tl.cdiv(num_splits, BLOCK_SIZE_N)):
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
_reduced_max = tl.load(
reduced_max_ptr + offs_m[:, None] * stride_reduced_max_m + offs_n[None, :] * stride_reduced_max_n,
mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits),
other=0.0,
)
_original_max = tl.load(
original_max_ptr + offs_m[:, None] * stride_original_max_m + offs_n[None, :] * stride_original_max_n,
mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits),
other=0.0,
)
_accu = tl.load(
accu_ptr + offs_m[:, None] * stride_accu_m + offs_n[None, :] * stride_accu_n,
mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits),
other=0.0,
)
# local reduce-max
_max_old = global_max
_local_max = tl.max(_reduced_max, axis=1)
global_max = tl.maximum(global_max, _local_max)
# update accumulate
_coeff = tl.exp(_max_old - global_max)
_scale = tl.exp(_original_max - global_max[:, None])
global_accu = _coeff * global_accu + tl.sum(_scale * _accu, axis=1)
# update entropy_b
_entropy_b = tl.load(
entropy_b_ptr + offs_m[:, None] * stride_entropy_b_m + offs_n[None, :] * stride_entropy_b_n,
mask=(offs_m[:, None] < num_tokens) & (offs_n[None, :] < num_splits),
other=0.0,
)
global_entropy_b = _coeff * global_entropy_b + tl.sum(_scale * _entropy_b, axis=1)
# store
tl.store(global_max_ptr + offs_m * stride_global_max, global_max, mask=offs_m < num_tokens)
tl.store(global_accu_ptr + offs_m * stride_global_accu, global_accu, mask=offs_m < num_tokens)
tl.store(global_entropy_b_ptr + offs_m * stride_global_entropy_b, global_entropy_b, mask=offs_m < num_tokens)
@triton.autotune(configs=[triton.Config({"BLOCK_SIZE_M": 16})], key=["num_tokens"])
@triton.jit
def efficient_entropy_triton_epilogue_tp_update(
num_tokens,
logprobs_ptr,
stride_logprobs: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accumulate_ptr,
stride_accumulate: tl.int64,
entropy_b_ptr,
stride_entropy_b: tl.int64,
entropy_ptr,
stride_entropy: tl.int64,
logprobs_scalar_ptr,
reduction: int,
BLOCK_SIZE_M: tl.constexpr,
):
pid_m = tl.program_id(axis=0)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
maximum = tl.load(maximum_ptr + offs_m * stride_maximum, mask=offs_m < num_tokens)
accumulate = tl.load(accumulate_ptr + offs_m * stride_accumulate, mask=offs_m < num_tokens)
entropy_b = tl.load(entropy_b_ptr + offs_m * stride_entropy_b, mask=offs_m < num_tokens)
entropy_b = tl.fdiv(entropy_b, accumulate)
tl.store(entropy_b_ptr + offs_m * stride_entropy_b, entropy_b, mask=offs_m < num_tokens)
entropy = tl.log(accumulate) + maximum - entropy_b
tl.store(entropy_ptr + offs_m * stride_entropy, entropy, mask=offs_m < num_tokens)
logprobs = tl.load(logprobs_ptr + offs_m * stride_logprobs, mask=offs_m < num_tokens)
logprobs = maximum + tl.log(accumulate) - logprobs
logprobs = -1 * logprobs
if reduction == 0:
tl.store(logprobs_ptr + offs_m * stride_logprobs, logprobs, mask=offs_m < num_tokens)
elif reduction == 1:
logprobs_scalar = tl.sum(logprobs, axis=0)
tl.atomic_add(logprobs_scalar_ptr, logprobs_scalar)
elif reduction == 2:
logprobs_scalar = tl.sum(logprobs, axis=0) / num_tokens.to(tl.float32)
tl.atomic_add(logprobs_scalar_ptr, logprobs_scalar)
_dedicated_stream, _dedicated_events = None, None
def efficient_entropy_forward(
hidden: torch.Tensor,
weight: torch.Tensor,
labels: torch.Tensor,
reduction: typing.Optional[int] = 2,
temperature: typing.Optional[float] = 1.0,
dist_process_group: typing.Optional[dist.ProcessGroup] = None,
) -> list[torch.Tensor]:
"""
forward host function
"""
assert hidden.is_cuda and weight.is_cuda and labels.is_cuda
assert weight.device == hidden.device and labels.device == hidden.device
assert hidden.dim() == 2 and weight.dim() == 2 and labels.dim() == 1
assert hidden.is_contiguous() and weight.is_contiguous() and labels.is_contiguous()
assert hidden.shape[0] == labels.shape[0] and hidden.shape[1] == weight.shape[1]
_rank = 0 if dist_process_group is None else dist.get_rank(dist_process_group)
_world_size = 1 if dist_process_group is None else dist.get_world_size(dist_process_group)
if dist_process_group is not None and not hasattr(efficient_entropy_forward, "_initialized"):
global _dedicated_stream, _dedicated_events
_dedicated_stream = get_torch_device().Stream(hidden.device)
_dedicated_events = [get_torch_device().Event() for _ in range(2)]
efficient_entropy_forward._initialized = True
num_tokens, hidden_size = hidden.shape
num_tokens = labels.shape[0]
vocab_size, hidden_size = weight.shape
assert hidden_size % 128 == 0
REDUCTION = get_entropy_reduction_enum(reduction)
if REDUCTION == EntropyReductionEnum._None:
if dist_process_group is None:
logprobs = torch.empty((num_tokens,), device=hidden.device, dtype=torch.float32)
else:
logprobs = torch.zeros((num_tokens,), device=hidden.device, dtype=torch.float32)
elif REDUCTION in (EntropyReductionEnum._Sum, EntropyReductionEnum._Mean):
logprobs = torch.empty((), device=hidden.device, dtype=torch.float32)
else:
raise ValueError(f"Invalid reduction: {reduction}")
entropy = torch.empty((num_tokens,), device=hidden.device, dtype=torch.float32)
assert logprobs.is_contiguous() and entropy.is_contiguous()
maximum = torch.empty_like(entropy)
accumulate_and_entropy_b = torch.empty((num_tokens * 2,), device=hidden.device, dtype=torch.float32)
accumulate_and_entropy_b_view = accumulate_and_entropy_b.view(2, num_tokens)
accumulate = accumulate_and_entropy_b_view[0, :]
entropy_b = accumulate_and_entropy_b_view[1, :]
assert maximum.is_contiguous() and accumulate.is_contiguous() and entropy_b.is_contiguous()
vocab_per_split = 1024
assert vocab_per_split % 128 == 0
num_splits = (vocab_size + vocab_per_split - 1) // vocab_per_split
_max = torch.empty((num_tokens, num_splits), device=hidden.device, dtype=torch.float32)
_accu = torch.empty((num_tokens, num_splits), device=hidden.device, dtype=torch.float32)
_entropy_b = torch.empty((num_tokens, num_splits), device=hidden.device, dtype=torch.float32)
if REDUCTION == EntropyReductionEnum._None:
_logprobs = logprobs
else:
_logprobs = torch.empty((num_tokens,), device=hidden.device, dtype=torch.float32)
assert _accu.is_contiguous() and _entropy_b.is_contiguous() and _max.is_contiguous()
assert _accu.is_cuda and _entropy_b.is_cuda and _max.is_cuda
if _config._use_triton:
# 1D kernel launch, then split the tile
def mainloop_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]) * num_splits,)
efficient_entropy_kernel_general_mainloop[mainloop_grid](
_rank,
hidden,
weight,
labels,
num_tokens,
hidden_size,
vocab_size,
vocab_per_split,
hidden.stride(0),
hidden.stride(1),
weight.stride(0),
weight.stride(1),
_max,
_max.stride(0),
_max.stride(1),
_accu,
_accu.stride(0),
_accu.stride(1),
_entropy_b,
_entropy_b.stride(0),
_entropy_b.stride(1),
_logprobs,
_logprobs.stride(0),
logprobs,
1.0 / temperature,
USE_TMA=SUPPORT_CUDA_TMA and hidden.stride(1) == 1 and weight.stride(1) == 1,
)
else:
raise AssertionError("Triton is required for efficient entropy kernel")
# reduction on maximum and maximum_indices
def epilogue_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]),)
if dist_process_group is None:
efficient_entropy_triton_kernel_epilogue[epilogue_grid](
_max,
_max.stride(0),
_max.stride(1),
num_tokens,
num_splits,
maximum,
maximum.stride(0),
_accu,
_accu.stride(0),
_accu.stride(1),
accumulate,
accumulate.stride(0),
_entropy_b,
_entropy_b.stride(0),
_entropy_b.stride(1),
entropy_b,
entropy_b.stride(0),
entropy,
entropy.stride(0),
_logprobs,
_logprobs.stride(0),
logprobs,
REDUCTION,
)
else:
# tensor-parallel
_max_backup = _max.clone()
dist.all_reduce(_max, op=dist.ReduceOp.MAX, group=dist_process_group)
get_torch_device().current_stream().record_event(_dedicated_events[0])
with get_torch_device().stream(_dedicated_stream):
_dedicated_stream.wait_event(_dedicated_events[0])
dist.all_reduce(_logprobs, op=dist.ReduceOp.SUM, group=dist_process_group)
_dedicated_stream.record_event(_dedicated_events[1])
efficient_entropy_triton_kernel_epilogue_tp[epilogue_grid](
num_tokens,
num_splits,
_max,
_max.stride(0),
_max.stride(1),
_max_backup,
_max_backup.stride(0),
_max_backup.stride(1),
_accu,
_accu.stride(0),
_accu.stride(1),
_entropy_b,
_entropy_b.stride(0),
_entropy_b.stride(1),
maximum,
maximum.stride(0),
accumulate,
accumulate.stride(0),
entropy_b,
entropy_b.stride(0),
)
get_torch_device().current_stream().wait_event(_dedicated_events[1])
dist.all_reduce(accumulate_and_entropy_b, op=dist.ReduceOp.SUM, group=dist_process_group)
# update logprobs & entropy
efficient_entropy_triton_epilogue_tp_update[epilogue_grid](
num_tokens,
_logprobs,
_logprobs.stride(0),
maximum,
maximum.stride(0),
accumulate,
accumulate.stride(0),
entropy_b,
entropy_b.stride(0),
entropy,
entropy.stride(0),
logprobs,
REDUCTION,
)
return (logprobs, entropy, maximum, accumulate, entropy_b)
# NOTE: merge d_weight & d_hidden here, split along M & N
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
)
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_general_mainloop_MN(
num_tokens: int,
hidden_size: int,
vocab_size: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b: tl.int64,
d_hidden_ptr,
stride_d_hidden_m: tl.int64,
stride_d_hidden_k: tl.int64,
d_weight_ptr,
stride_d_weight_n: tl.int64,
stride_d_weight_k: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
USE_TMA: tl.constexpr,
):
"""
backward mainloop, where d_logits & d_hidden & d_weight are fused
"""
# block swizzling
# pid = tl.program_id(axis=0)
# num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
# pid_m = pid % num_pid_m
# pid_n = pid // num_pid_m
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(vocab_size, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
start_offs_am = pid_m * BLOCK_SIZE_M
offs_am = start_offs_am + tl.arange(0, BLOCK_SIZE_M)
start_offs_bn = pid_n * BLOCK_SIZE_N
offs_bn = start_offs_bn + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
if USE_TMA:
# using TMA and device-side descriptor creation
hidden_desc = tl.make_tensor_descriptor(
hidden_ptr,
shape=[num_tokens, hidden_size],
strides=[stride_hidden_m, 1],
block_shape=[BLOCK_SIZE_M, BLOCK_SIZE_K],
)
weight_desc = tl.make_tensor_descriptor(
weight_ptr,
shape=[vocab_size, hidden_size],
strides=[stride_weight_n, 1],
block_shape=[BLOCK_SIZE_N, BLOCK_SIZE_K],
)
maximum_ptrs = maximum_ptr + offs_am * stride_maximum
maximum = tl.load(maximum_ptrs, mask=offs_am < num_tokens, other=0.0)
accu_ptrs = accu_ptr + offs_am * stride_accu
accu = tl.load(accu_ptrs, mask=offs_am < num_tokens, other=1e-6) # epsilon to avoid division by zero
accu_rcp = tl.fdiv(1.0, accu)
d_entropy_ptrs = d_entropy_ptr + offs_am * stride_d_entropy
d_entropy = tl.load(d_entropy_ptrs, mask=offs_am < num_tokens, other=0.0)
if reduction == 0: # none
d_logprobs_ptrs = d_logprobs_ptr + offs_am * stride_d_logprobs
d_logprobs = tl.load(d_logprobs_ptrs, mask=offs_am < num_tokens, other=0.0)
elif reduction == 1: # sum
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else: # mean
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b_ptrs = entropy_b_ptr + offs_am * stride_entropy_b
entropy_b = tl.load(entropy_b_ptrs, mask=offs_am < num_tokens, other=0.0)
if not USE_TMA:
hidden_ptrs = hidden_ptr + (offs_am[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
# weight_ptrs = weight_ptr + (offs_k[:, None] * stride_weight_k + offs_bn[None, :] * stride_weight_n)
weight_ptrs = weight_ptr + (offs_bn[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
labels_ptrs = labels_ptr + offs_am * stride_labels
labels = tl.load(labels_ptrs, mask=offs_am < num_tokens, other=0)
d_hidden_ptrs = d_hidden_ptr + offs_am[:, None] * stride_d_hidden_m + offs_k[None, :] * stride_d_hidden_k
# d_weight_ptrs = d_weight_ptr + offs_k[:, None] * stride_d_weight_k + offs_bn[None, :] * stride_d_weight_n
d_weight_ptrs = d_weight_ptr + offs_bn[:, None] * stride_d_weight_n + offs_k[None, :] * stride_d_weight_k
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
if USE_TMA:
start_offs_k = k * BLOCK_SIZE_K
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_size),
other=0.0,
)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
logits = tl.dot(_hidden, _weight.T, logits)
if not USE_TMA:
hidden_ptrs -= hidden_size * stride_hidden_k
weight_ptrs -= hidden_size * stride_weight_k
# scale logits by temperature
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_bn + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
# scale d_logits by temperature
d_logits *= rcp_temperature
# loop for d_weight & d_hidden
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
start_offs_k = k * BLOCK_SIZE_K
if USE_TMA:
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
else:
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
# _d_weight = tl.dot(tl.trans(_hidden).to(tl.float32), d_logits)
# tl.atomic_add(d_weight_ptrs,
# _d_weight,
# mask=(offs_k[:, None] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[None, :] < vocab_size))
_d_weight = tl.dot(d_logits.trans(), _hidden.to(tl.float32))
tl.atomic_add(
d_weight_ptrs,
_d_weight,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_size),
)
if USE_TMA:
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
# _weight = tl.load(
# weight_ptrs,
# mask=(offs_k[:, None] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[None, :] < vocab_size),
# other=0.0
# )
# _d_hidden = tl.dot(d_logits, tl.trans(_weight).to(tl.float32))
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_size),
other=0.0,
)
_d_hidden = tl.dot(d_logits, _weight.to(tl.float32))
tl.atomic_add(
d_hidden_ptrs,
_d_hidden,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
)
if not USE_TMA:
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
d_hidden_ptrs += BLOCK_SIZE_K * stride_d_hidden_k
d_weight_ptrs += BLOCK_SIZE_K * stride_d_weight_k
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
),
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_d_hidden(
num_tokens: int,
hidden_size: int,
vocab_size: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b: tl.int64,
d_hidden_ptr,
stride_d_hidden_m: tl.int64,
stride_d_hidden_k: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
"""
backward d_hidden
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
pid_m = pid % num_pid_m
pid_k = pid // num_pid_m
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_k = tl.arange(0, BLOCK_SIZE_K)
result_offs_k = pid_k * BLOCK_SIZE_K + offs_k
maximum = tl.load(maximum_ptr + offs_m * stride_maximum, mask=offs_m < num_tokens, other=0.0)
accu = tl.load(accu_ptr + offs_m * stride_accu, mask=offs_m < num_tokens, other=1e-6)
accu_rcp = tl.fdiv(1.0, accu)
d_entropy = tl.load(d_entropy_ptr + offs_m * stride_d_entropy, mask=offs_m < num_tokens, other=0.0)
if reduction == 0:
d_logprobs = tl.load(d_logprobs_ptr + offs_m * stride_d_logprobs, mask=offs_m < num_tokens, other=0.0)
elif reduction == 1:
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else:
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b = tl.load(entropy_b_ptr + offs_m * stride_entropy_b, mask=offs_m < num_tokens, other=0.0)
labels = tl.load(labels_ptr + offs_m * stride_labels, mask=offs_m < num_tokens, other=0)
# iterate over vocab_size
d_hidden = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_K), dtype=tl.float32)
for n in range(0, tl.cdiv(vocab_size, BLOCK_SIZE_N)):
offs_n = n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
hidden_ptrs = hidden_ptr + (offs_m[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
weight_ptrs = weight_ptr + (offs_n[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
# iterate over hidden_size to get logits
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_m[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_n[:, None] < vocab_size),
other=0.0,
)
logits = tl.dot(_hidden, _weight.trans(), logits)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
# scale logits by temperature
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_n + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
# scale d_logits
d_logits *= rcp_temperature
# calculate d_hidden
weight_ptrs = weight_ptr + (offs_n[:, None] * stride_weight_n + result_offs_k[None, :] * stride_weight_k)
_weight = tl.load(
weight_ptrs, mask=(result_offs_k[None, :] < hidden_size) & (offs_n[:, None] < vocab_size), other=0.0
)
d_hidden = tl.dot(d_logits.to(weight_ptr.dtype.element_ty), _weight, d_hidden)
# write back
tl.store(
d_hidden_ptr + offs_m[:, None] * stride_d_hidden_m + result_offs_k[None, :] * stride_d_hidden_k,
d_hidden,
mask=(offs_m[:, None] < num_tokens) & (result_offs_k[None, :] < hidden_size),
)
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
),
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_d_weight(
num_tokens: int,
hidden_size: int,
vocab_size: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b: tl.int64,
d_weight_ptr,
stride_d_weight_n: tl.int64,
stride_d_weight_k: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
pid = tl.program_id(axis=0)
num_pid_n = tl.cdiv(vocab_size, BLOCK_SIZE_N)
pid_n = pid % num_pid_n
pid_k = pid // num_pid_n
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
result_offs_k = pid_k * BLOCK_SIZE_K + offs_k
d_weight = tl.zeros((BLOCK_SIZE_N, BLOCK_SIZE_K), dtype=tl.float32)
for m in range(0, tl.cdiv(num_tokens, BLOCK_SIZE_M)):
offs_m = m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
maximum = tl.load(maximum_ptr + offs_m * stride_maximum, mask=offs_m < num_tokens, other=0.0)
accu = tl.load(accu_ptr + offs_m * stride_accu, mask=offs_m < num_tokens, other=1e-6)
accu_rcp = tl.fdiv(1.0, accu)
d_entropy = tl.load(d_entropy_ptr + offs_m * stride_d_entropy, mask=offs_m < num_tokens, other=0.0)
if reduction == 0:
d_logprobs = tl.load(d_logprobs_ptr + offs_m * stride_d_logprobs, mask=offs_m < num_tokens, other=0.0)
elif reduction == 1:
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else:
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b = tl.load(entropy_b_ptr + offs_m * stride_entropy_b, mask=offs_m < num_tokens, other=0.0)
labels = tl.load(labels_ptr + offs_m * stride_labels, mask=offs_m < num_tokens, other=0)
hidden_ptrs = hidden_ptr + (offs_m[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
weight_ptrs = weight_ptr + (offs_n[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_m[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_n[:, None] < vocab_size),
other=0.0,
)
logits = tl.dot(_hidden, _weight.trans(), logits)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_n + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
d_logits *= rcp_temperature
hidden_ptrs = hidden_ptr + (offs_m[:, None] * stride_hidden_m + result_offs_k[None, :] * stride_hidden_k)
_hidden = tl.load(
hidden_ptrs, mask=(result_offs_k[None, :] < hidden_size) & (offs_m[:, None] < num_tokens), other=0.0
)
d_weight = tl.dot(d_logits.to(d_weight_ptr.dtype.element_ty).trans(), _hidden, d_weight)
# write back
tl.store(
d_weight_ptr + offs_n[:, None] * stride_d_weight_n + result_offs_k[None, :] * stride_d_weight_k,
d_weight,
mask=(offs_n[:, None] < vocab_size) & (result_offs_k[None, :] < hidden_size),
)
# NOTE: split tile from d_logits' perspective
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 256, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
),
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_general_d_logits(
num_tokens: int,
hidden_size: int,
vocab_size: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b,
d_logits_ptr,
stride_d_logits_m: tl.int64,
stride_d_logits_n: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
USE_TMA: tl.constexpr,
):
"""
backward d_logits
"""
# block swizzling
# pid = tl.program_id(axis=0)
# num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
# pid_m = pid % num_pid_m
# pid_n = pid // num_pid_m
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(vocab_size, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
start_offs_am = pid_m * BLOCK_SIZE_M
offs_am = start_offs_am + tl.arange(0, BLOCK_SIZE_M)
start_offs_bn = pid_n * BLOCK_SIZE_N
offs_bn = start_offs_bn + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
maximum_ptrs = maximum_ptr + offs_am * stride_maximum
maximum = tl.load(maximum_ptrs, mask=offs_am < num_tokens, other=0.0)
accu_ptrs = accu_ptr + offs_am * stride_accu
accu = tl.load(accu_ptrs, mask=offs_am < num_tokens, other=1e-6) # epsilon to avoid division by zero
accu_rcp = tl.fdiv(1.0, accu)
d_entropy_ptrs = d_entropy_ptr + offs_am * stride_d_entropy
d_entropy = tl.load(d_entropy_ptrs, mask=offs_am < num_tokens, other=0.0)
if reduction == 0: # none
d_logprobs_ptrs = d_logprobs_ptr + offs_am * stride_d_logprobs
d_logprobs = tl.load(d_logprobs_ptrs, mask=offs_am < num_tokens, other=0.0)
elif reduction == 1: # sum
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else: # mean
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b_ptrs = entropy_b_ptr + offs_am * stride_entropy_b
entropy_b = tl.load(entropy_b_ptrs, mask=offs_am < num_tokens, other=0.0)
labels_ptrs = labels_ptr + offs_am * stride_labels
labels = tl.load(labels_ptrs, mask=offs_am < num_tokens, other=0)
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
if USE_TMA:
# using TMA and device-side descriptor creation
hidden_desc = tl.make_tensor_descriptor(
hidden_ptr,
shape=[num_tokens, hidden_size],
strides=[stride_hidden_m, 1],
block_shape=[BLOCK_SIZE_M, BLOCK_SIZE_K],
)
weight_desc = tl.make_tensor_descriptor(
weight_ptr,
shape=[vocab_size, hidden_size],
strides=[stride_weight_n, 1],
block_shape=[BLOCK_SIZE_N, BLOCK_SIZE_K],
)
else:
hidden_ptrs = hidden_ptr + (offs_am[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
# weight_ptrs = weight_ptr + (offs_k[:, None] * stride_weight_k + offs_bn[None, :] * stride_weight_n)
weight_ptrs = weight_ptr + (offs_bn[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
if USE_TMA:
start_offs_k = k * BLOCK_SIZE_K
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_size),
other=0.0,
)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
logits = tl.dot(_hidden, _weight.T, logits)
if not USE_TMA:
hidden_ptrs -= hidden_size * stride_hidden_k
weight_ptrs -= hidden_size * stride_weight_k
# scale logits by temperature
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_bn + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
# scale d_logits by temperature
d_logits *= rcp_temperature
# store d_logits
d_logits_ptrs = d_logits_ptr + offs_am[:, None] * stride_d_logits_m + offs_bn[None, :] * stride_d_logits_n
tl.store(
d_logits_ptrs,
d_logits, # will be implicitly converted to d_logits_ptrs.dtype.element_ty
mask=(offs_am[:, None] < num_tokens) & (offs_bn[None, :] < vocab_size),
)
@triton.autotune(
configs=[
triton.Config(
{"BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 256, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 16},
num_stages=3,
num_warps=8,
),
],
key=["num_tokens", "hidden_size", "vocab_size"],
)
@triton.jit
def efficient_entropy_backward_kernel_general_d_logits_split_N(
split_idx: int,
num_tokens: int,
hidden_size: int,
vocab_size: int,
vocab_per_split: int,
rank: int,
hidden_ptr,
stride_hidden_m: tl.int64,
stride_hidden_k: tl.int64,
weight_ptr,
stride_weight_n: tl.int64,
stride_weight_k: tl.int64,
labels_ptr,
stride_labels: tl.int64,
maximum_ptr,
stride_maximum: tl.int64,
accu_ptr,
stride_accu: tl.int64,
d_entropy_ptr,
stride_d_entropy: tl.int64,
d_logprobs_ptr,
stride_d_logprobs: tl.int64,
reduction: int,
entropy_b_ptr,
stride_entropy_b,
d_logits_ptr,
stride_d_logits_m: tl.int64,
stride_d_logits_n: tl.int64,
rcp_temperature: tl.float32,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
USE_TMA: tl.constexpr,
):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(num_tokens, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(vocab_per_split, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
start_offs_am = pid_m * BLOCK_SIZE_M
offs_am = start_offs_am + tl.arange(0, BLOCK_SIZE_M)
start_offs_bn = split_idx * vocab_per_split + pid_n * BLOCK_SIZE_N
offs_bn = start_offs_bn + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
maximum = tl.load(maximum_ptr + offs_am * stride_maximum, mask=offs_am < num_tokens, other=0.0)
accu = tl.load(accu_ptr + offs_am * stride_accu, mask=offs_am < num_tokens, other=1e-6)
accu_rcp = tl.fdiv(1.0, accu)
d_entropy = tl.load(d_entropy_ptr + offs_am * stride_d_entropy, mask=offs_am < num_tokens, other=0.0)
if reduction == 0:
d_logprobs = tl.load(d_logprobs_ptr + offs_am * stride_d_logprobs, mask=offs_am < num_tokens, other=0.0)
elif reduction == 1:
d_logprobs = tl.load(d_logprobs_ptr)
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
else:
d_logprobs = tl.fdiv(tl.load(d_logprobs_ptr), num_tokens.to(tl.float32))
d_logprobs = tl.broadcast_to(d_logprobs, (BLOCK_SIZE_M,))
d_logprobs = -1 * d_logprobs
entropy_b = tl.load(entropy_b_ptr + offs_am * stride_entropy_b, mask=offs_am < num_tokens, other=0.0)
labels = tl.load(labels_ptr + offs_am * stride_labels, mask=offs_am < num_tokens, other=0)
logits = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
if USE_TMA:
# using TMA and device-side descriptor creation
hidden_desc = tl.make_tensor_descriptor(
hidden_ptr,
shape=[num_tokens, hidden_size],
strides=[stride_hidden_m, 1],
block_shape=[BLOCK_SIZE_M, BLOCK_SIZE_K],
)
weight_desc = tl.make_tensor_descriptor(
weight_ptr,
shape=[vocab_size, hidden_size],
strides=[stride_weight_n, 1],
block_shape=[BLOCK_SIZE_N, BLOCK_SIZE_K],
)
else:
hidden_ptrs = hidden_ptr + (offs_am[:, None] * stride_hidden_m + offs_k[None, :] * stride_hidden_k)
weight_ptrs = weight_ptr + (offs_bn[:, None] * stride_weight_n + offs_k[None, :] * stride_weight_k)
vocab_right_bound = min((split_idx + 1) * vocab_per_split, vocab_size)
for k in range(0, tl.cdiv(hidden_size, BLOCK_SIZE_K)):
if USE_TMA:
start_offs_k = k * BLOCK_SIZE_K
_hidden = hidden_desc.load([start_offs_am, start_offs_k])
_weight = weight_desc.load([start_offs_bn, start_offs_k])
else:
_hidden = tl.load(
hidden_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_am[:, None] < num_tokens),
other=0.0,
)
_weight = tl.load(
weight_ptrs,
mask=(offs_k[None, :] < hidden_size - k * BLOCK_SIZE_K) & (offs_bn[:, None] < vocab_right_bound),
other=0.0,
)
hidden_ptrs += BLOCK_SIZE_K * stride_hidden_k
weight_ptrs += BLOCK_SIZE_K * stride_weight_k
logits = tl.dot(_hidden, _weight.T, logits)
logits *= rcp_temperature
exp_logits = tl.exp(logits - maximum[:, None])
mask = (offs_bn + rank * vocab_size)[None, :] == labels[:, None]
d_logits = d_logprobs[:, None] * (exp_logits * accu_rcp[:, None] - mask)
d_logits += d_entropy[:, None] * (-exp_logits * accu_rcp[:, None]) * (logits - entropy_b[:, None])
d_logits *= rcp_temperature
# filter d_logits with mask
result_offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
mask = (offs_am[:, None] < num_tokens) & (result_offs_n[None, :] < vocab_per_split)
tl.store(
d_logits_ptr + offs_am[:, None] * stride_d_logits_m + result_offs_n[None, :] * stride_d_logits_n, d_logits, mask
)
def efficient_entropy_backward(
dlogprobs: torch.Tensor,
dentropy: torch.Tensor,
hidden: torch.Tensor,
weight: torch.Tensor,
labels: torch.Tensor,
maximum: torch.Tensor,
acc: torch.Tensor,
entropy_b: torch.Tensor,
reduction: typing.Optional[int] = 2,
should_return_fp32_grad: bool = False,
temperature: typing.Optional[float] = 1.0,
dist_process_group: typing.Optional[dist.ProcessGroup] = None,
) -> list[torch.Tensor]:
"""
backward host function
"""
assert hidden.is_cuda and weight.is_cuda and labels.is_cuda
assert weight.device == hidden.device and labels.device == hidden.device
assert hidden.dim() == 2 and weight.dim() == 2 and labels.dim() == 1
assert hidden.is_contiguous() and weight.is_contiguous() and labels.is_contiguous()
assert hidden.shape[0] == labels.shape[0] and hidden.shape[1] == weight.shape[1]
_rank = 0 if dist_process_group is None else dist.get_rank(dist_process_group)
_world_size = 1 if dist_process_group is None else dist.get_world_size(dist_process_group)
num_tokens, hidden_size = hidden.shape
num_tokens = labels.shape[0]
vocab_size, hidden_size = weight.shape
assert hidden_size % 128 == 0
REDUCTION = get_entropy_reduction_enum(reduction)
if REDUCTION == EntropyReductionEnum._None:
assert dlogprobs.shape == (num_tokens,)
else:
assert dlogprobs.dim() == 0
assert dlogprobs.is_contiguous() and dentropy.is_contiguous()
assert dlogprobs.is_cuda and dentropy.is_cuda
assert dlogprobs.device == hidden.device and dlogprobs.device == dentropy.device
assert dentropy.shape == (num_tokens,)
d_hidden, d_weight = None, None
if _config._backward == BackwardEnum._Total_Fuse_MN or should_return_fp32_grad:
d_hidden = torch.zeros_like(hidden, dtype=torch.float32, device=hidden.device)
d_weight = torch.zeros_like(weight, dtype=torch.float32, device=weight.device)
else:
d_hidden = torch.empty_like(hidden, dtype=hidden.dtype, device=hidden.device)
d_weight = torch.empty_like(weight, dtype=hidden.dtype, device=weight.device)
assert d_hidden.is_contiguous() and d_weight.is_contiguous()
assert maximum.is_contiguous() and acc.is_contiguous()
assert maximum.device == hidden.device and acc.device == hidden.device
assert maximum.shape == labels.shape == acc.shape
assert maximum.is_cuda and acc.is_cuda
vocab_per_split = 1024
assert vocab_per_split % 128 == 0
num_splits = (vocab_size + vocab_per_split - 1) // vocab_per_split
assert entropy_b.is_contiguous() and entropy_b.is_cuda
assert entropy_b.shape == (num_tokens,)
if _config._backward == BackwardEnum._Total_Fuse_MN:
# --- Triton doesn't materialize d_logits at all. Split tiles at the perspective of d_logits.
def mainloop_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]) * triton.cdiv(vocab_size, meta["BLOCK_SIZE_N"]),)
efficient_entropy_backward_kernel_general_mainloop_MN[mainloop_grid](
num_tokens,
hidden_size,
vocab_size,
_rank,
hidden,
hidden.stride(0),
hidden.stride(1),
weight,
weight.stride(0),
weight.stride(1),
labels,
labels.stride(0),
maximum,
maximum.stride(0),
acc,
acc.stride(0),
dentropy,
dentropy.stride(0),
dlogprobs,
dlogprobs.stride(0) if REDUCTION == EntropyReductionEnum._None else 0,
REDUCTION,
entropy_b,
entropy_b.stride(0),
d_hidden,
d_hidden.stride(0),
d_hidden.stride(1),
d_weight,
d_weight.stride(0),
d_weight.stride(1),
1.0 / temperature,
USE_TMA=SUPPORT_CUDA_TMA and hidden.stride(1) == 1 and weight.stride(1) == 1,
)
elif _config._backward == BackwardEnum._Total_Separate:
_d_logits = torch.empty((num_tokens, vocab_size), device=hidden.device, dtype=hidden.dtype).contiguous()
assert _d_logits.is_contiguous()
if _config._use_triton:
def d_logits_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]) * triton.cdiv(vocab_size, meta["BLOCK_SIZE_N"]),)
efficient_entropy_backward_kernel_general_d_logits[d_logits_grid](
num_tokens,
hidden_size,
vocab_size,
_rank,
hidden,
hidden.stride(0),
hidden.stride(1),
weight,
weight.stride(0),
weight.stride(1),
labels,
labels.stride(0),
maximum,
maximum.stride(0),
acc,
acc.stride(0),
dentropy,
dentropy.stride(0),
dlogprobs,
dlogprobs.stride(0) if REDUCTION == EntropyReductionEnum._None else 0,
REDUCTION,
entropy_b,
entropy_b.stride(0),
_d_logits,
_d_logits.stride(0),
_d_logits.stride(1),
1.0 / temperature,
USE_TMA=SUPPORT_CUDA_TMA and hidden.stride(1) == 1 and weight.stride(1) == 1,
)
torch.matmul(_d_logits, weight, out=d_hidden)
torch.matmul(_d_logits.T, hidden, out=d_weight)
else:
raise AssertionError("Triton is required for efficient entropy kernel")
elif _config._backward == BackwardEnum._Split_Dlogits_N:
vocab_per_split = 9504
num_splits = (vocab_size + vocab_per_split - 1) // vocab_per_split
_d_logits = torch.empty((num_tokens, vocab_per_split), device=hidden.device, dtype=hidden.dtype).contiguous()
assert _d_logits.is_contiguous()
def d_logits_grid(meta):
return (triton.cdiv(num_tokens, meta["BLOCK_SIZE_M"]) * triton.cdiv(vocab_per_split, meta["BLOCK_SIZE_N"]),)
for split_idx in range(num_splits):
efficient_entropy_backward_kernel_general_d_logits_split_N[d_logits_grid](
split_idx,
num_tokens,
hidden_size,
vocab_size,
vocab_per_split,
_rank,
hidden,
hidden.stride(0),
hidden.stride(1),
weight,
weight.stride(0),
weight.stride(1),
labels,
labels.stride(0),
maximum,
maximum.stride(0),
acc,
acc.stride(0),
dentropy,
dentropy.stride(0),
dlogprobs,
dlogprobs.stride(0) if REDUCTION == EntropyReductionEnum._None else 0,
REDUCTION,
entropy_b,
entropy_b.stride(0),
_d_logits,
_d_logits.stride(0),
_d_logits.stride(1),
1.0 / temperature,
USE_TMA=SUPPORT_CUDA_TMA and hidden.stride(1) == 1 and weight.stride(1) == 1,
)
if split_idx == (num_splits - 1):
vocab_right_bound = min((split_idx + 1) * vocab_per_split, vocab_size) - split_idx * vocab_per_split
_d_logits = _d_logits[:, :vocab_right_bound].contiguous()
if split_idx == 0:
torch.matmul(
_d_logits, weight[split_idx * vocab_per_split : (split_idx + 1) * vocab_per_split, :], out=d_hidden
)
else:
d_hidden += torch.matmul(
_d_logits, weight[split_idx * vocab_per_split : (split_idx + 1) * vocab_per_split, :]
)
torch.matmul(
_d_logits.T, hidden, out=d_weight[split_idx * vocab_per_split : (split_idx + 1) * vocab_per_split, :]
)
elif _config._backward == BackwardEnum._Split_Dlogits_M:
raise NotImplementedError("BackwardEnum._Split_Dlogits_M is not implemented yet")
return d_hidden, d_weight
|
verl__utils__kernel__linear_cross_entropy.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import torch
import torch.distributed as dist
class LinearCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(
ctx,
hidden: torch.Tensor,
weight: torch.Tensor,
labels: torch.Tensor,
temperature: typing.Optional[float] = 1.0,
reduction: typing.Optional[str] = "none",
dist_process_group: typing.Optional[dist.ProcessGroup] = None,
) -> list[torch.Tensor]:
"""_summary_
Args:
ctx (_type_): _description_
hidden (torch.Tensor): (batch_size, num_tokens, hidden_size) -> (batch_size * num_tokens, hidden_size)
weight (torch.Tensor): (vocab_size, hidden_size)
labels (torch.Tensor): (batch_size, num_tokens) -> (batch_size * num_tokens, )
temperature (typing.Optional[float], optional): _description_. Defaults to 1.0.
reduction (typing.Optional[str], optional): _description_. Defaults to "none".
dist_process_group (typing.Optional[dist.ProcessGroup], optional): _description_. Defaults to None.
Returns:
typing.List[torch.Tensor]: _description_
"""
assert isinstance(temperature, float), f"temperature must be a float, but got {type(temperature)}"
assert isinstance(reduction, str), f"reduction must be a str, but got {type(reduction)}"
with torch.cuda.nvtx.range("LinearCrossEntropy-forward"):
from . import kernels
REDUCTION = kernels.get_entropy_reduction_enum_number(reduction.lower())
original_hidden_shape = hidden.shape
if len(hidden.shape) != 2:
hidden = hidden.view(-1, hidden.shape[-1]) # (batch_size * num_tokens, hidden_size)
if len(labels.shape) != 1:
labels = labels.view(-1)
logprobs, entropy, _maximum, _accumulate, _entropy_b = kernels.efficient_entropy_forward(
hidden, weight, labels, REDUCTION, temperature, dist_process_group
)
ctx.save_for_backward(hidden, weight, labels, _maximum, _accumulate, _entropy_b)
ctx.original_hidden_shape = original_hidden_shape
ctx.REDUCTION = REDUCTION
ctx.dist_process_group = dist_process_group
ctx.should_return_fp32_grad = False
ctx.temperature = temperature
return logprobs, entropy
@staticmethod
def backward(ctx, dlogprobs: torch.Tensor, dentropy: torch.Tensor) -> list[torch.Tensor]:
from . import kernels
with torch.cuda.nvtx.range("LinearCrossEntropy-backward"):
(hidden, weight, labels, _maximum, _accumulate, _entropy_b) = ctx.saved_tensors
REDUCTION = ctx.REDUCTION
dist_process_group = ctx.dist_process_group
should_return_fp32_grad = ctx.should_return_fp32_grad
temperature = ctx.temperature
d_hidden, d_weight = kernels.efficient_entropy_backward(
dlogprobs,
dentropy,
hidden,
weight,
labels,
_maximum,
_accumulate,
_entropy_b,
REDUCTION,
should_return_fp32_grad,
temperature,
dist_process_group,
)
d_hidden = d_hidden.view(ctx.original_hidden_shape)
return (d_hidden, d_weight, None, None, None, None)
linear_cross_entropy = LinearCrossEntropy.apply
|
verl__utils__logger__aggregate_logger.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A Ray logger will receive logging info from different processes.
"""
import datetime
import logging
import numbers
import pprint
import torch
def concat_dict_to_str(dict: dict, step):
output = [f"step:{step}"]
for k, v in dict.items():
if isinstance(v, numbers.Number):
output.append(f"{k}:{pprint.pformat(v)}")
output_str = " - ".join(output)
return output_str
class LocalLogger:
"""
A local logger that logs messages to the console.
Args:
print_to_console (bool): Whether to print to the console.
"""
def __init__(self, print_to_console=True):
self.print_to_console = print_to_console
def flush(self):
pass
def log(self, data, step):
if self.print_to_console:
print(concat_dict_to_str(data, step=step), flush=True)
class DecoratorLoggerBase:
"""
Base class for all decorators that log messages.
Args:
role (str): The role (the name) of the logger.
logger (logging.Logger): The logger instance to use for logging.
level (int): The logging level.
rank (int): The rank of the process.
log_only_rank_0 (bool): If True, only log for rank 0.
"""
def __init__(
self, role: str, logger: logging.Logger = None, level=logging.DEBUG, rank: int = 0, log_only_rank_0: bool = True
):
self.role = role
self.logger = logger
self.level = level
self.rank = rank
self.log_only_rank_0 = log_only_rank_0
self.logging_function = self.log_by_logging
if logger is None:
self.logging_function = self.log_by_print
def log_by_print(self, log_str):
if not self.log_only_rank_0 or self.rank == 0:
print(f"{self.role} {log_str}", flush=True)
def log_by_logging(self, log_str):
if self.logger is None:
raise ValueError("Logger is not initialized")
if not self.log_only_rank_0 or self.rank == 0:
self.logger.log(self.level, f"{self.role} {log_str}")
def print_rank_0(message):
"""If distributed is initialized, print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
def print_with_rank(message: str, rank: int = 0, log_only_rank_0: bool = False):
"""_summary_
Print a message with rank information.
This function prints the message only if `log_only_rank_0` is False or if the rank is 0.
Args:
message (str): _description_
rank (int, optional): _description_. Defaults to 0.
log_only_rank_0 (bool, optional): _description_. Defaults to False.
"""
if not log_only_rank_0 or rank == 0:
print(f"[Rank {rank}] {message}", flush=True)
def print_with_rank_and_timer(message: str, rank: int = 0, log_only_rank_0: bool = False):
"""_summary_
Print a message with rank information and a timestamp.
This function prints the message only if `log_only_rank_0` is False or if the rank is 0.
Args:
message (str): _description_
rank (int, optional): _description_. Defaults to 0.
log_only_rank_0 (bool, optional): _description_. Defaults to False.
"""
now = datetime.datetime.now()
message = f"[{now.strftime('%Y-%m-%d %H:%M:%S')}] [Rank {rank}] {message}"
if not log_only_rank_0 or rank == 0:
print(message, flush=True)
def log_with_rank(message: str, rank, logger: logging.Logger, level=logging.INFO, log_only_rank_0: bool = False):
"""_summary_
Log a message with rank information using a logger.
This function logs the message only if `log_only_rank_0` is False or if the rank is 0.
Args:
message (str): The message to log.
rank (int): The rank of the process.
logger (logging.Logger): The logger instance to use for logging.
level (int, optional): The logging level. Defaults to logging.INFO.
log_only_rank_0 (bool, optional): If True, only log for rank 0. Defaults to False.
"""
if not log_only_rank_0 or rank == 0:
logger.log(level, f"[Rank {rank}] {message}")
|
verl__utils__megatron__memory.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from verl.utils.device import get_device_id
class MemoryBuffer:
def __init__(self, numel, numel_padded, dtype):
self.numel = numel
self.numel_padded = numel_padded
self.dtype = dtype
self.data = torch.zeros(self.numel_padded, dtype=self.dtype, device=get_device_id(), requires_grad=False)
def zero(self):
"""Reset the buffer to zero."""
self.data.zero_()
def get(self, shape, start_index):
"""Return a tensor with the input `shape` as a view into the
1-D data starting at `start_index`."""
end_index = start_index + shape.numel()
assert end_index <= self.numel, "requested tensor is out of the buffer range."
buffer_tensor = self.data[start_index:end_index]
buffer_tensor = buffer_tensor.view(shape)
return buffer_tensor
|
verl__utils__megatron__pipeline_parallel.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from megatron.core import parallel_state as mpu
from .sequence_parallel import pad_to_sequence_parallel
def compute_transformers_input_shapes(batches, meta_info):
from flash_attn.bert_padding import unpad_input # flash 2 is a must for Megatron
# pre-compute input shapes for each micro-batch at each pp stage
input_shapes = []
for model_inputs in batches:
input_ids = model_inputs["input_ids"]
attention_mask = model_inputs["attention_mask"]
input_ids_rmpad = unpad_input(input_ids.unsqueeze(dim=-1), attention_mask)[0] # (total_nnz, 1)
if meta_info["sequence_parallel"]:
input_ids_rmpad = pad_to_sequence_parallel(input_ids_rmpad)
# compute shapes for model_inputs
input_shapes.append(
torch.Size(
[
input_ids_rmpad.shape[0] // mpu.get_tensor_model_parallel_world_size(),
1,
meta_info["hidden_size"],
]
)
)
else:
# compute shapes for model_inputs
input_shapes.append(torch.Size([input_ids_rmpad.shape[0], 1, meta_info["hidden_size"]]))
return input_shapes
def make_batch_generator(batches, vpp_size):
"""
Creates a batch generator suitable for Megatron pipeline parallelism,
handling virtual pipeline parallelism (VPP).
If VPP is used (vpp_size > 1), it duplicates the batch iterator for each
virtual pipeline stage. Otherwise, it returns a single iterator.
Args:
batches: An iterable (e.g., list) of micro-batches.
vpp_size (int): The virtual pipeline model parallel size.
Returns:
An iterator or a list of iterators over the micro-batches.
"""
if vpp_size > 1:
# has vpp
batch_generator = [batches] * vpp_size # number of vpp chunks
batch_generator = [iter(b) for b in batch_generator]
else:
# no vpp
batch_generator = iter(batches)
return batch_generator
|
verl__utils__megatron__router_replay_patch.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from enum import Enum
import torch
try:
from megatron.core.transformer.moe.moe_utils import (
apply_router_token_dropping,
compute_routing_scores_for_aux_loss,
group_limited_topk,
)
from megatron.core.transformer.moe.token_dispatcher import MoEAlltoAllTokenDispatcher
except ImportError:
warnings.warn("NPU not support router replay for now.", stacklevel=2)
MoEAlltoAllTokenDispatcher = None
from megatron.core.transformer.moe.router import TopKRouter
from megatron.core.transformer.transformer_config import TransformerConfig
# https://github.com/THUDM/slime/blob/main/slime/utils/routing_replay.py
class RouterReplayAction(Enum):
RECORD = "record"
REPLAY_FORWARD = "replay_forward"
REPLAY_BACKWARD = "replay_backward"
class RouterReplay:
"""
A class to manage the recording and replaying of MoE routing decisions.
It holds all router instances and provides static methods to globally
control recording and replaying.
"""
# Static variable to hold all router instances, one per MoE layer.
router_instances = []
@staticmethod
def set_replay_data(all_layers_topk_indices: list):
"""
Distributes the topk indices for all layers to their respective RouterReplay instances.
:param all_layers_topk_indices: A list of tensors, where each tensor contains the
topk indices for a specific layer. The order
must match the instantiation order of the routers.
"""
if len(all_layers_topk_indices) != len(RouterReplay.router_instances):
raise ValueError(
f"The number of replay tensors ({len(all_layers_topk_indices)}) "
f"does not match the number of router instances ({len(RouterReplay.router_instances)})."
)
for i, router_instance in enumerate(RouterReplay.router_instances):
router_instance.set_target_indices(all_layers_topk_indices[i])
@staticmethod
def get_recorded_data() -> list:
"""
Collects the recorded topk indices from all RouterReplay instances.
:return: A list of tensors, each containing the recorded topk indices for a layer.
"""
return [router.get_recorded_indices() for router in RouterReplay.router_instances]
@staticmethod
def clear_global_indices():
"""Clears the recorded and target topk indices in all instances."""
for router in RouterReplay.router_instances:
router.clear_indices()
def __init__(self):
"""Initializes a RouterReplay instance for a specific layer."""
self.target_topk_idx = None # For replay
self.recorded_topk_idx = None # For recording
self.router_replay_action = None # Router replay action for this layer
self.replay_backward_list = [] # List of tensors for backward pass replay
self.layer_number = None # Global layer index if available
RouterReplay.router_instances.append(self)
def set_target_indices(self, topk_indices: torch.Tensor):
"""Sets the target topk indices for replay."""
self.target_topk_idx = topk_indices
self.replay_backward_list.append(topk_indices)
def get_recorded_indices(self):
"""Returns the recorded topk indices."""
return self.recorded_topk_idx
def record_indices(self, topk_indices: torch.Tensor):
"""Records the topk indices."""
self.recorded_topk_idx = topk_indices
def clear_indices(self):
"""Clears the recorded and target topk indices."""
self.recorded_topk_idx = None
self.target_topk_idx = None
self.replay_backward_list = []
def set_router_replay_action(self, router_replay_action: RouterReplayAction):
"""Sets the router replay action for this layer."""
self.router_replay_action = router_replay_action
def clear_router_replay_action(self):
"""Clears the router replay action for this layer."""
self.router_replay_action = None
@staticmethod
def set_global_router_replay_action(router_replay_action: RouterReplayAction):
"""Sets the router replay action for all router instances."""
for router in RouterReplay.router_instances:
router.set_router_replay_action(router_replay_action)
@staticmethod
def clear_global_router_replay_action():
"""Clears the router replay action for all router instances."""
for router in RouterReplay.router_instances:
router.clear_router_replay_action()
def _patched_topk_routing_with_score_function(
logits: torch.Tensor,
topk: int,
use_pre_softmax: bool,
num_groups: int,
group_topk: int,
score_function: str,
expert_bias: torch.Tensor,
fused: bool,
router_replay: RouterReplay,
scaling_factor: float,
):
"""
Patched version of topk_routing_with_score_function that supports router replay.
"""
num_tokens, num_experts = logits.shape
def _compute_topk(scores, topk, num_groups=None, group_topk=None):
if group_topk:
return group_limited_topk(
scores=scores,
topk=topk,
num_tokens=num_tokens,
num_experts=num_experts,
num_groups=num_groups,
group_topk=group_topk,
)
else:
return torch.topk(scores, k=topk, dim=1)
def compute_topk(scores, topk, num_groups=None, group_topk=None):
# Default behavior if no replay is active
routing_action = router_replay.router_replay_action if router_replay is not None else None
if routing_action is None:
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
if routing_action == RouterReplayAction.RECORD:
probs, top_indices = _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
if router_replay is not None:
router_replay.record_indices(top_indices)
return probs, top_indices
elif routing_action == RouterReplayAction.REPLAY_FORWARD:
if router_replay is None or router_replay.target_topk_idx is None:
# Fallback if replay data is not available
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
# Use the provided indices for replay
top_indices = router_replay.target_topk_idx
# Ensure indices are on the correct device
top_indices = top_indices.to(scores.device)
# Gather the scores for the replayed indices to get the probabilities
probs = scores.gather(1, top_indices)
return probs, top_indices
elif routing_action == RouterReplayAction.REPLAY_BACKWARD:
if router_replay is None or not router_replay.replay_backward_list:
# Fallback if replay data is not available
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
# Use the last recorded indices for backward replay
top_indices = router_replay.replay_backward_list.pop(0)
# Ensure indices are on the correct device
top_indices = top_indices.to(scores.device)
# Gather the scores for the replayed indices to get the probabilities
probs = scores.gather(1, top_indices)
return probs, top_indices
else: # Unknown action, fallback
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
if score_function == "softmax":
if use_pre_softmax:
scores = torch.softmax(logits, dim=-1, dtype=torch.float32).type_as(logits)
probs, top_indices = compute_topk(scores, topk, num_groups, group_topk)
else:
scores, top_indices = compute_topk(logits, topk, num_groups, group_topk)
probs = torch.softmax(scores, dim=-1, dtype=torch.float32).type_as(logits)
elif score_function == "sigmoid":
scores = torch.sigmoid(logits.float()).type_as(logits)
if expert_bias is not None:
scores_for_routing = scores + expert_bias
_, top_indices = compute_topk(scores_for_routing, topk, num_groups, group_topk)
scores = torch.gather(scores, dim=1, index=top_indices).type_as(logits)
else:
scores, top_indices = compute_topk(scores, topk, num_groups, group_topk)
probs = scores / (scores.sum(dim=-1, keepdim=True) + 1e-20) if topk > 1 else scores
else:
raise ValueError(f"Invalid score_function: {score_function}")
if scaling_factor:
probs = probs * scaling_factor
if torch.are_deterministic_algorithms_enabled():
# build [num_tokens, num_experts] from [num_tokens, topk]
routing_probs = torch.zeros_like(logits)
rows = torch.arange(num_tokens, device=logits.device).unsqueeze(1)
routing_probs.index_put_((rows, top_indices), probs, accumulate=False)
routing_map = torch.zeros_like(logits, dtype=logits.dtype)
routing_map.index_put_((rows, top_indices), torch.ones_like(probs, dtype=routing_map.dtype), accumulate=False)
routing_map = routing_map.bool()
else:
# TODO Try using element-wise operations instead of scatter?
routing_probs = torch.zeros_like(logits).scatter(1, top_indices, probs)
routing_map = torch.zeros_like(logits).int().scatter(1, top_indices, 1).bool()
return routing_probs, routing_map
def patched_routing(self, logits: torch.Tensor, *args, **kwargs):
"""Top-k routing function
Args:
logits (torch.Tensor): Logits tensor after gating.
Returns:
probs (torch.Tensor): The probabilities of token to experts assignment.
routing_map (torch.Tensor): The mapping of token to experts assignment,
with shape [num_tokens, num_experts].
"""
seq_length, bsz = logits.shape[:2]
logits = logits.view(-1, self.config.num_moe_experts)
# Apply Z-Loss
logits = self.apply_z_loss(logits)
# Calculate probs and routing_map for token dispatching
if self.routing_type == "sinkhorn":
probs, routing_map = self.sinkhorn_load_balancing(logits)
else:
probs, routing_map = _patched_topk_routing_with_score_function(
logits=logits,
topk=self.topk,
use_pre_softmax=self.config.moe_router_pre_softmax,
num_groups=self.config.moe_router_num_groups,
group_topk=self.config.moe_router_group_topk,
scaling_factor=self.config.moe_router_topk_scaling_factor,
score_function=self.score_function,
expert_bias=self.expert_bias,
fused=self.config.moe_router_fusion,
router_replay=self.router_replay,
)
# Apply token dropping to probs and routing_map.
if self.config.moe_expert_capacity_factor is not None:
probs, routing_map = apply_router_token_dropping(
probs,
routing_map,
router_topk=self.topk,
capacity_factor=self.config.moe_expert_capacity_factor,
drop_policy=self.config.moe_token_drop_policy,
pad_to_capacity=self.config.moe_pad_expert_input_to_capacity,
)
# Apply each aux loss type and attach aux loss autograd function to probs
if self.training and torch.is_grad_enabled() and self.is_aux_loss_enabled():
# Calculate scores and routing_map for aux loss
routing_map_for_aux_loss, scores_for_aux_loss = compute_routing_scores_for_aux_loss(
logits, self.topk, self.score_function, fused=self.config.moe_router_fusion
)
probs = self._apply_aux_loss(probs, scores_for_aux_loss, routing_map_for_aux_loss)
probs = self._apply_seq_aux_loss(probs, scores_for_aux_loss, routing_map_for_aux_loss, seq_length, bsz)
probs = self._apply_global_aux_loss(probs, scores_for_aux_loss, routing_map_for_aux_loss)
# Update expert bias and tokens_per_expert
# Prevent extra local tokens accumulation on evaluation or activation recomputation
if self.enable_expert_bias and torch.is_grad_enabled():
with torch.no_grad():
self.local_tokens_per_expert += routing_map.sum(dim=0)
return probs, routing_map
def apply_router_replay_patch():
"""
Applies the monkey patch for MoE Router Replay functionality.
This patch dynamically adds the 'enable_routing_replay' attribute to TransformerConfig
and modifies the TopKRouter to support recording and replaying of routing decisions.
"""
print("Applying Router Replay Patch...")
# Clear router instances to avoid state leakage between model initializations.
RouterReplay.router_instances.clear()
# Step 1: Patch TransformerConfig to include the feature flag
if not hasattr(TransformerConfig, "enable_routing_replay"):
# Add class attribute with default value
TransformerConfig.enable_routing_replay = False
# Store original __init__ method
original_tf_config_init = TransformerConfig.__init__
# Define new __init__ method that safely handles enable_routing_replay parameter
def patched_tf_config_init(self, *args, **kwargs):
# Simple solution: remove the unknown parameter before calling original constructor
enable_routing_replay = kwargs.pop("enable_routing_replay", TransformerConfig.enable_routing_replay)
# Call original constructor with remaining kwargs
original_tf_config_init(self, *args, **kwargs)
# Set the instance attribute
self.enable_routing_replay = enable_routing_replay
# Apply the patch
TransformerConfig.__init__ = patched_tf_config_init
# Step 2: Patch TopKRouter only once to ensure idempotency.
if hasattr(TopKRouter, "_router_replay_patched"):
return
original_init = TopKRouter.__init__
original_set_layer_number = TopKRouter.set_layer_number
def patched_set_layer_number(self, layer_number: int):
original_set_layer_number(self, layer_number)
if self.router_replay is not None:
self.router_replay.layer_number = layer_number
# Step 3: Define the new __init__ method
def patched_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
self.router_replay = None
if self.config.enable_routing_replay:
self.router_replay = RouterReplay()
# Step 4: Patch MoEAlltoAllTokenDispatcher.preprocess to handle router replay
# When router replay is enabled, duplicate indices in top_indices can cause
# routing_map.sum() < num_tokens * topk, leading to split size mismatch in alltoall.
if MoEAlltoAllTokenDispatcher is not None and not hasattr(MoEAlltoAllTokenDispatcher, "_preprocess_patched"):
original_preprocess = MoEAlltoAllTokenDispatcher.preprocess
def patched_preprocess(self, routing_map):
"""Patched preprocess that handles router replay correctly for alltoall dispatcher."""
# Call original preprocess
result = original_preprocess(self, routing_map)
# Fix num_out_tokens when router replay is enabled
if (
getattr(self.config, "enable_routing_replay", False)
and not self.drop_and_pad
and self.config.moe_expert_capacity_factor is None
and not (
getattr(self.config, "moe_router_padding_for_quantization", None)
or getattr(self.config, "moe_router_padding_for_fp8", None)
)
):
# With router replay, duplicate indices can reduce the actual routed
# token count, so derive it from the routing map instead.
self.num_out_tokens = int(routing_map.sum().item())
return result
MoEAlltoAllTokenDispatcher.preprocess = patched_preprocess
MoEAlltoAllTokenDispatcher._preprocess_patched = True
# Step 5: Apply the patches
TopKRouter.__init__ = patched_init
TopKRouter.routing = patched_routing
TopKRouter.set_layer_number = patched_set_layer_number
TopKRouter._router_replay_patched = True
|
verl__utils__megatron__router_replay_utils.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Router Replay Utilities
Utilities for handling router replay functionality in Megatron models.
"""
import warnings
from typing import Optional
import torch
try:
from megatron.core.pipeline_parallel.utils import is_vp_first_stage, is_vp_last_stage
except ImportError:
warnings.warn("NPU not support router replay for now.", stacklevel=2)
pass
from megatron.core import parallel_state as mpu
from megatron.core.pipeline_parallel.schedules import get_schedule_table
from megatron.core.tensor_parallel import gather_from_sequence_parallel_region, scatter_to_sequence_parallel_region
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.transformer_layer import get_transformer_layer_offset
from verl.models.mcore.util import (
postprocess_packed_seqs,
preprocess_packed_seqs,
preprocess_thd_no_padding,
)
from verl.utils.device import get_device_name
from verl.utils.megatron.router_replay_patch import RouterReplay, RouterReplayAction
device_name = get_device_name()
# from megatron.core.transformer.transformer_block import get_num_layers_to_build
def get_num_layers_to_build(
config: TransformerConfig, vp_stage: Optional[int] = None, pp_rank: Optional[int] = None
) -> int:
"""
Determine the number of transformer layers to build for the current pipeline stage.
Args:
config (TransformerConfig): Configuration object containing transformer model parameters.
vp_stage (Optional[int]): Virtual pipeline stage number.
pp_rank (Optional[int]): Pipeline parallel rank.
Returns:
int: The number of layers to be built for the current pipeline stage.
"""
# If we have a custom PP layout, straightforwardly
# return the number of decoders in the layout array.
if hasattr(config, "pipeline_model_parallel_layout") and config.pipeline_model_parallel_layout is not None:
from megatron.core.transformer.enums import LayerType
return config.pipeline_model_parallel_layout.get_num_layers_to_build(
layer_type=LayerType.decoder, vp_stage=vp_stage
)
# Fallback for legacy tests.
if pp_rank is None:
pp_rank = mpu.get_pipeline_model_parallel_rank()
is_first_pp_stage = pp_rank == 0
is_last_pp_stage = pp_rank == config.pipeline_model_parallel_size - 1
if config.num_layers_in_first_pipeline_stage is not None or config.num_layers_in_last_pipeline_stage is not None:
assert not (config.account_for_embedding_in_pipeline_split or config.account_for_loss_in_pipeline_split), (
" \
Does not support standalone embedding stage and standalone loss stage with uneven pp"
)
# Number of layers to distribute over rest of pipeline stages
layers_to_distribute = config.num_layers
# Number of pipeline stages left for distributing transformer layers
pipeline_stages_left = config.pipeline_model_parallel_size
# If the uneven first (last) pipeline stage is enabled, remove the specified number
# of layers to calculate the number of layers on each middle pipeline stage.
if config.num_layers_in_first_pipeline_stage is not None:
layers_to_distribute -= config.num_layers_in_first_pipeline_stage
pipeline_stages_left -= 1
if config.num_layers_in_last_pipeline_stage is not None:
layers_to_distribute -= config.num_layers_in_last_pipeline_stage
pipeline_stages_left -= 1
# If pp_size <= 2, we do not have any intermediate pipeline stages, and we do not
# need to check if the left over layers are divisible by the left over stages.
if pipeline_stages_left > 0:
assert layers_to_distribute % pipeline_stages_left == 0, (
"With uneven pipelineing the left over layers must be divisible by left over stages"
)
num_layers_per_pipeline_rank = layers_to_distribute // pipeline_stages_left
else:
num_layers_per_pipeline_rank = 0
# If the uneven first (last) pipeline stage is enabled, return the specified number
# of layers for all virtual pipeline parallel stages within the first (last) pipeline
# parallel stage.
if is_first_pp_stage and config.num_layers_in_first_pipeline_stage is not None:
num_layers_per_pipeline_rank = config.num_layers_in_first_pipeline_stage
if is_last_pp_stage and config.num_layers_in_last_pipeline_stage is not None:
num_layers_per_pipeline_rank = config.num_layers_in_last_pipeline_stage
else:
# Include the embedding layer and loss layer into pipeline parallelism partition
num_layers = config.num_layers
if config.account_for_embedding_in_pipeline_split:
num_layers += 1
if config.account_for_loss_in_pipeline_split:
num_layers += 1
assert num_layers % config.pipeline_model_parallel_size == 0, (
"num_layers should be divisible by pipeline_model_parallel_size"
)
num_layers_per_pipeline_rank = num_layers // config.pipeline_model_parallel_size
vp_size = config.virtual_pipeline_model_parallel_size
if vp_size is not None and config.pipeline_model_parallel_size > 1:
# Interleaved pipeline parallelism:
# Number of layers in each model chunk is the number of layers in the stage,
# divided by the number of model chunks in a stage.
# With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0] [2] [4] [6]
# Stage 1: [1] [3] [5] [7]
# With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0, 1] [4, 5]
# Stage 1: [2, 3] [6, 7]
assert num_layers_per_pipeline_rank % vp_size == 0, (
f"num_layers_per_pipeline_rank {num_layers_per_pipeline_rank} \
should be divisible by vp_size {vp_size}"
)
num_layers_per_virtual_stage = num_layers_per_pipeline_rank // vp_size
num_layers_to_build = num_layers_per_virtual_stage
else:
# Non-interleaved pipeline parallelism:
# Each stage gets a contiguous set of layers.
num_layers_to_build = num_layers_per_pipeline_rank
# The embedding (or loss) layer cannot function as a standalone transformer layer
# Reduce the number of layers to construct by 1 on the first (or last) stage if the
# embedding (or loss) layer is included in the pipeline parallelism partition and placement.
if config.account_for_embedding_in_pipeline_split:
if is_vp_first_stage(vp_stage, vp_size) and is_first_pp_stage:
num_layers_to_build -= 1
assert num_layers_to_build >= 0, "Not enough layers in the first virtual pipeline stage"
if config.account_for_loss_in_pipeline_split:
if is_vp_last_stage(vp_stage, vp_size) and is_last_pp_stage:
num_layers_to_build -= 1
assert num_layers_to_build >= 0, "Not enough layers in the last virtual pipeline stage"
return num_layers_to_build
def merge_router_topk_indices(attention_mask, input_ids, mini_layer_topk_idx_list, tf_config, vp_rank=None):
"""
Merge recorded router top-k indices across sequence-parallel ranks for all router instances,
then pack/unpack them to align with the original (batch, seq_len) layout and append the result.
Args:
attention_mask (torch.Tensor): Attention mask of shape [batch_size, seq_len]. Used to determine
the valid token positions during pack/unpack.
input_ids (torch.Tensor): Input token IDs of shape [batch_size, seq_len]. Used together with
attention_mask for sequence packing/unpacking.
mini_layer_topk_idx_list (list): A Python list to which the merged top-k indices tensor will be appended.
tf_config: Megatron/Transformer engine configuration object. Used to locate router instances for
the current micro-batch.
vp_rank (Optional[int]): Virtual pipeline stage rank override. If None, the current VP rank from
Megatron parallel state will be used.
Returns:
None: The function has side effects only; it appends a tensor of shape
[1, dynamic_bs_all, layer_num, topk] to mini_layer_topk_idx_list.
"""
with torch.no_grad():
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
layers_topk_idx = []
for router in router_instances_list:
layers_topk_idx.append(router.recorded_topk_idx.to(torch.uint8)) # dynamic_bs, topk
# layer_num, dynamic_bs, topk -> dynamic_bs, layer_num, topk
layers_topk_idx = torch.stack(layers_topk_idx).permute(1, 0, 2).to(device_name)
# dynamic_bs, layer_num, topk -> 1, dynamic_bs_all, layer_num, topk
layers_topk_idx = (
gather_from_sequence_parallel_region(layers_topk_idx, tensor_parallel_output_grad=False)
.unsqueeze(0)
.contiguous()
)
batch_size, seq_len = attention_mask.shape[:2]
_, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=True)
layers_topk_idx = postprocess_packed_seqs(
layers_topk_idx, packed_seq_params, attention_mask, batch_size, seq_len, post_process=True
)
mini_layer_topk_idx_list.append(layers_topk_idx.cpu())
def set_router_replay_data(layers_topk_idx, attention_mask, tf_config, vp_rank=None):
"""
Scatter the packed router top-k indices back to sequence-parallel ranks and update each local
RouterReplay instance with target indices for replay mode.
This function prepares the per-layer, per-sample top-k routing decisions (recorded during an earlier
forward) so that subsequent replay passes can follow exactly the same routing.
Args:
layers_topk_idx (torch.Tensor): Router top-k indices with shape [bs, max_seq_len, layer_num, topk].
This should be the merged output produced by merge_router_topk_indices.
attention_mask (torch.Tensor): Attention mask [batch_size, seq_len] used for pack/unpack alignment.
tf_config: Megatron/Transformer engine configuration object.
vp_rank (Optional[int]): Virtual pipeline stage rank override. If None, the current VP rank from
Megatron parallel state will be used.
Returns:
None: The function updates internal RouterReplay instances in-place.
"""
with torch.no_grad():
if layers_topk_idx.is_nested:
layers_topk_idx_rmpad, _ = preprocess_thd_no_padding(layers_topk_idx, pre_process=True)
else:
layers_topk_idx_rmpad, _ = preprocess_packed_seqs(layers_topk_idx, attention_mask, pre_process=True)
layers_topk_idx_rmpad = layers_topk_idx_rmpad.contiguous() # 1, dynamic_bs_all, layer_num, topk
# 1, dynamic_bs_split, layer_num, topk
layers_topk_idx_rmpad_split = scatter_to_sequence_parallel_region(
layers_topk_idx_rmpad.to(device_name).squeeze(dim=0)
).unsqueeze(dim=0)
# dynamic_bs_split, layer_num, topk -> layer_num, dynamic_bs_split, topk
layers_topk_idx_reshape = layers_topk_idx_rmpad_split.permute(0, 2, 1, 3).squeeze(
dim=0
) # layer_num, dynamic_bs_all, topk
num_layers_in_data = layers_topk_idx_reshape.shape[0]
use_global_layer_index = getattr(tf_config, "num_layers", None) == num_layers_in_data
local_rank_info = get_current_rank_layer_info(tf_config, vp_rank)
offset, _ = local_rank_info["start"], local_rank_info["end"]
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
for i, router in enumerate(router_instances_list):
layer_idx = None
if use_global_layer_index:
layer_number = getattr(router, "layer_number", None)
if layer_number is not None:
layer_idx = layer_number - 1
if layer_idx is None:
layer_idx = i + offset
if layer_idx < 0 or layer_idx >= num_layers_in_data:
raise ValueError(
f"router replay layer index {layer_idx} out of range for data with {num_layers_in_data} layers"
)
router.set_target_indices(layers_topk_idx_reshape[layer_idx].to(torch.int64))
def reorder_and_merge_vpp_layers(
micro_batch_tensor_list,
num_microbatches: int,
vpp_size: int,
microbatch_group_size_per_vp_stage: int,
) -> torch.Tensor:
"""
Reorder and merge per-VPP layer blocks into a contiguous layer dimension.
Given a tensor shaped as [bs*vpp_size, max_token_len, layer_num_per_vpp, topk], this function:
1) Builds the schedule table for virtual microbatches and reorders the first dimension so that entries
belonging to the same model chunk (VPP stage) become contiguous.
2) Reshapes and merges the (vpp_size, layer_num_per_vpp) into a single layer dimension, producing
[bs, max_token_len, layer_num, topk].
Args:
micro_batch_tensor_list : the list of Input tensor.
num_microbatches (int): Number of microbatches per pipeline stage (bs).
vpp_size (int): Virtual pipeline parallel size (number of model chunks).
microbatch_group_size_per_vp_stage (int): Number of consecutive microbatches processed per VPP stage.
Returns:
torch.Tensor: Output tensor of shape [bs, max_token_len, layer_num, topk].
Raises:
ValueError: If input tensor dimensionality or expected sizes do not match.
RuntimeError: If the computed output shape is unexpected or the schedule length mismatches.
"""
# 1) Build schedule table: map each virtual_microbatch_id -> (microbatch_id, model_chunk_id)
schedule_table = get_schedule_table(num_microbatches, vpp_size, microbatch_group_size_per_vp_stage)
# 2) Group by model_chunk_id to build reorder indices so entries of the same chunk become contiguous along dim 0
tensor_by_chunk = [[] for _ in range(vpp_size)]
mini_tensor_list = []
for vidx, (_mb, chunk_id) in enumerate(schedule_table):
tensor_by_chunk[chunk_id].append(micro_batch_tensor_list[vidx])
for chunk_id in range(vpp_size):
mini_tensor_list.append(torch.cat(tensor_by_chunk[chunk_id], dim=0))
out = torch.cat(mini_tensor_list, dim=2)
return out
def get_current_rank_layer_info(tf_config, vp_rank=None):
# When vp_rank is None, default to the current VP rank (or 0 if VP is disabled).
"""Return the local layer range/count for the current process and the full assignment table.
Args:
tf_config: Configuration object used by compute_pipeline_layer_assignment.
vp_rank (Optional[int]): Explicit virtual pipeline stage rank to query. If None, uses
mpu.get_virtual_pipeline_model_parallel_rank() when VP is enabled; otherwise 0.
Returns:
Tuple[dict, dict]: A tuple of (local_assignment, all_assignments) where local_assignment contains
keys {"start", "end", "count"} for the current (pp_rank, vp_stage).
"""
if vp_rank is None:
vp_rank = 0
num_layers_to_build = get_num_layers_to_build(tf_config, vp_stage=vp_rank)
offset = get_transformer_layer_offset(tf_config, vp_stage=vp_rank)
local = {}
local["start"] = offset
local["end"] = offset + num_layers_to_build
local["count"] = num_layers_to_build
return local
def pp_gather(local_layers_router_map, tf_config):
# TODO: Consider non-uniform layer allocation cases.
"""
Gather local router maps from all PP ranks into a global router map.
Args:
local_layers_router_map (torch.Tensor): Local router map of shape
[bs, max_seq_len, local_num_layers, topk].
tf_config: Configuration providing pipeline_model_parallel_size.
Returns:
torch.Tensor: Global router map of shape [bs, max_seq_len, num_layers, topk] placed on CPU.
"""
pp_size = tf_config.pipeline_model_parallel_size
if pp_size <= 1:
return local_layers_router_map
pp_group = mpu.get_pipeline_model_parallel_group()
world_size = torch.distributed.get_world_size(pp_group)
local_layers_router_map = local_layers_router_map.to(device_name)
layers_topk_idx_global_list = [
torch.empty(
size=local_layers_router_map.shape,
dtype=local_layers_router_map.dtype,
device=local_layers_router_map.device,
)
for _ in range(world_size)
]
torch.distributed.all_gather(
tensor=local_layers_router_map,
tensor_list=layers_topk_idx_global_list,
group=pp_group,
async_op=False,
)
vp_size = tf_config.virtual_pipeline_model_parallel_size
if vp_size is not None:
vpp_router_map_offset = [[] for _ in range(pp_size)]
for pp_stage in range(pp_size):
vpp_router_map_offset[pp_stage].append(0)
for vp_stage in range(vp_size):
num_layers_to_build = get_num_layers_to_build(tf_config, vp_stage, pp_stage)
vpp_router_map_offset[pp_stage].append(num_layers_to_build + vpp_router_map_offset[pp_stage][-1])
layers_topk_idx_global = []
for vp_stage in range(vp_size):
for pp_stage in range(pp_size):
piece = slice(vpp_router_map_offset[pp_stage][vp_stage], vpp_router_map_offset[pp_stage][vp_stage + 1])
layers_topk_idx_global.append(layers_topk_idx_global_list[pp_stage][:, :, piece, :])
global_router_map = torch.cat(layers_topk_idx_global, dim=2).to("cpu")
else:
global_router_map = torch.cat(layers_topk_idx_global_list, dim=2).to("cpu")
return global_router_map
class RouterReplayHelper:
"""Helper class to query router replay state and locate local RouterReplay instances."""
@staticmethod
def get_micro_batch_router_list(tf_config, vp_rank=None):
"""
Return the list of RouterReplay instances corresponding to the current micro-batch and local
(pp_rank, vp_stage) layer range.
When virtual pipeline (VPP) is enabled, the local range for the PP rank is expanded to include
all VP stages by multiplying the per-VP count by vp_size. The returned slice is taken from the
global RouterReplay.router_instances list.
Args:
tf_config: Configuration object used to compute layer assignments.
vp_rank (Optional[int]): Explicit virtual pipeline stage to query. If None, the current VP
rank from Megatron parallel state is used when available.
Returns:
list: A contiguous sublist of RouterReplay.router_instances for the local layer range.
"""
vp_size = tf_config.virtual_pipeline_model_parallel_size
if vp_size is not None:
vp_rank = 0 if vp_rank is None else vp_rank
offset = 0
for pre_vp_stage in range(vp_size):
if pre_vp_stage == vp_rank:
break
num_layers_to_build = get_num_layers_to_build(tf_config, pre_vp_stage)
offset += num_layers_to_build
else:
offset = 0
num_layers_to_build = get_num_layers_to_build(tf_config, vp_rank)
router_instances_list = RouterReplay.router_instances[offset : offset + num_layers_to_build]
return router_instances_list
@staticmethod
def is_r2_record_action(tf_config, vp_rank=None) -> bool:
"""Return True if the current router_replay_action is RECORD (R2) for the local router instances.
This inspects the first local RouterReplay instance's router_replay_action and compares it to
RouterReplayAction.RECORD.
"""
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
return router_instances_list and router_instances_list[0].router_replay_action == RouterReplayAction.RECORD
@staticmethod
def is_replay_forward_action(tf_config, vp_rank=None) -> bool:
"""Return True if the current router_replay_action is REPLAY_FORWARD for the local router instances.
This inspects the first local RouterReplay instance's router_replay_action and compares it to
RouterReplayAction.REPLAY_FORWARD.
"""
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
return (
router_instances_list and router_instances_list[0].router_replay_action == RouterReplayAction.REPLAY_FORWARD
)
@staticmethod
def is_replay_backward_action(tf_config, vp_rank=None) -> bool:
"""Return True if the current router_replay_action is REPLAY_BACKWARD for the local router instances.
This inspects the first local RouterReplay instance's router_replay_action and compares it to
RouterReplayAction.REPLAY_BACKWARD.
"""
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
return (
router_instances_list
and router_instances_list[0].router_replay_action == RouterReplayAction.REPLAY_BACKWARD
)
|
verl__utils__megatron__sequence_parallel.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from megatron.core import parallel_state as mpu
def mark_parameter_as_sequence_parallel(parameter):
parameter.sequence_parallel = True
def is_sequence_parallel_param(param):
return hasattr(param, "sequence_parallel") and param.sequence_parallel
def pad_to_sequence_parallel(unpad_tokens: torch.Tensor):
"""pad the tokens such that the total length is a multiple of sp world size
Args:
unpad_tokens: (total_nnz, ...). Tokens after removing padding
Returns:
the padded tokens: (total_nnz + pad_size,...)
"""
total_nnz = unpad_tokens.shape[0]
sp_world_size = mpu.get_tensor_model_parallel_world_size()
pad_size = 0 if total_nnz % sp_world_size == 0 else sp_world_size - total_nnz % sp_world_size
if pad_size > 0:
if unpad_tokens.ndim == 1:
unpad_tokens = F.pad(unpad_tokens, (0, pad_size))
elif unpad_tokens.ndim == 2:
unpad_tokens = F.pad(unpad_tokens, (0, 0, 0, pad_size))
else:
raise NotImplementedError(f"Padding dim {unpad_tokens.ndim()} is not supported")
return unpad_tokens
|
verl__utils__megatron__tensor_parallel.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for using tensor_parallel in megatron
"""
from typing import TYPE_CHECKING
import torch
import torch.distributed as dist
from megatron.core import parallel_state as mpu
from torch.nn import init
if TYPE_CHECKING:
from megatron.core import ModelParallelConfig
def update_kwargs_with_config(dictionary: dict, config: "ModelParallelConfig"):
dictionary["config"] = config
return dictionary
def get_default_kwargs_for_model_parallel_config():
model_parallel_config_kwargs = {
"params_dtype": torch.float32,
"use_cpu_initialization": False,
"perform_initialization": True,
"gradient_accumulation_fusion": False,
"sequence_parallel": False,
}
return model_parallel_config_kwargs
def get_default_model_parallel_config():
from megatron.core import ModelParallelConfig
return ModelParallelConfig(**get_default_kwargs_for_model_parallel_config())
def get_common_default_kwargs_for_parallel_linear():
default_model_parallel_config = get_default_model_parallel_config()
common_default_kwargs = {
"init_method": init.xavier_normal_,
"stride": 1,
"keep_master_weight_for_test": False,
"config": default_model_parallel_config,
}
return common_default_kwargs
def get_default_kwargs_for_column_parallel_linear():
from megatron.core import ModelParallelConfig
model_parallel_config_kwargs = get_default_kwargs_for_model_parallel_config()
column_parallel_config_kwargs = {
"async_tensor_model_parallel_allreduce": False,
}
model_parallel_config_kwargs.update(column_parallel_config_kwargs)
column_default_kwargs = {
"config": ModelParallelConfig(**model_parallel_config_kwargs),
}
common_default_kwargs = get_common_default_kwargs_for_parallel_linear()
common_default_kwargs.update(column_default_kwargs)
return common_default_kwargs
def get_default_kwargs_for_row_parallel_linear():
common_default_kwargs = get_common_default_kwargs_for_parallel_linear()
return common_default_kwargs
def get_default_kwargs_for_parallel_embedding():
from megatron.core import ModelParallelConfig
model_parallel_config_kwargs = get_default_kwargs_for_model_parallel_config()
embedding_default_kwargs = {
"init_method": init.xavier_normal_,
"config": ModelParallelConfig(**model_parallel_config_kwargs),
}
return embedding_default_kwargs
def is_tensor_parallel_param(param):
return hasattr(param, "tensor_model_parallel") and param.tensor_model_parallel
def get_tensor_parallel_partition_dim(param):
assert is_tensor_parallel_param(param)
return param.partition_dim
def get_tensor_parallel_partition_stride(param):
assert is_tensor_parallel_param(param)
return param.partition_stride
class _VocabParallelEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits: torch.Tensor) -> torch.Tensor:
@torch.compile(dynamic=True)
def mul_reduce(a, b):
return (a * b).sum(dim=-1, keepdim=True)
logits_max = vocab_parallel_logits.max(dim=-1, keepdim=True).values
dist.all_reduce(logits_max, op=dist.ReduceOp.MAX, group=mpu.get_tensor_model_parallel_group())
normalized_vocab_parallel_logits = vocab_parallel_logits - logits_max
normalized_exp_logits = normalized_vocab_parallel_logits.exp_()
normalized_sum_exp_logits = normalized_exp_logits.sum(dim=-1, keepdim=True)
dist.all_reduce(normalized_sum_exp_logits, group=mpu.get_tensor_model_parallel_group())
softmax_logits = normalized_exp_logits.div_(normalized_sum_exp_logits)
sum_softmax_times_logits = mul_reduce(softmax_logits, vocab_parallel_logits)
dist.all_reduce(sum_softmax_times_logits, group=mpu.get_tensor_model_parallel_group())
entropy = logits_max + normalized_sum_exp_logits.log() - sum_softmax_times_logits
ctx.save_for_backward(vocab_parallel_logits, softmax_logits, sum_softmax_times_logits)
return entropy.squeeze(dim=-1)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
vocab_parallel_logits, softmax_logits, sum_softmax_times_logits = ctx.saved_tensors
# reuse softmax_logits as grad
vocab_parallel_logits.sub_(sum_softmax_times_logits)
softmax_logits.mul_(vocab_parallel_logits)
softmax_logits.mul_(grad_output.unsqueeze(dim=-1))
# recover vocab_parallel_logits
vocab_parallel_logits.add_(sum_softmax_times_logits)
softmax_logits.mul_(-1)
return softmax_logits
def vocab_parallel_entropy(vocab_parallel_logits: torch.Tensor) -> torch.Tensor:
"""Compute entropy when the logits are sharded in tp ranks
Args:
vocab_parallel_logits: (total_nnz, vocab_size // tp_size)
Returns: (total_nnz,)
"""
return _VocabParallelEntropy.apply(vocab_parallel_logits)
def vocab_parallel_log_probs_from_logits(logits, labels):
"""TODO(zhangchi.usc1992): We may change the implementation later"""
from megatron.core import tensor_parallel
return -tensor_parallel.vocab_parallel_cross_entropy(vocab_parallel_logits=logits, target=labels)
def vocab_parallel_log_probs_from_logits_response_rmpad(input_ids, attention_mask, logits_rmpad, response_length):
"""Similar to log_probs_from_logits_response_rmpad, but the logits_rmpad is now spliited across tensor parallel
region.
This will further reduce the peak memory usage during training
Args:
input_ids: [batch_size, seqlen]
attention_mask: [batch_size, seqlen]
logits_rmpad: [total_nnz, vocab_size // tp_size]
response_length: int
"""
from flash_attn.bert_padding import pad_input, unpad_input
batch_size, seqlen = input_ids.shape
input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1), attention_mask=attention_mask)
input_ids_rmpad = input_ids_rmpad.squeeze(-1)
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0)
full_log_probs_rmpad = vocab_parallel_log_probs_from_logits(
logits=logits_rmpad, labels=input_ids_rmpad_rolled
) # (total_nnz,)
full_output = pad_input(
hidden_states=full_log_probs_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen
)
output = full_output.squeeze(-1)[:, -response_length - 1 : -1] # [batch_size, response_length]
return output
|
verl__utils__megatron_peft_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for PEFT (Parameter-Efficient Fine-Tuning) of Megatron in VERL."""
import os
from pathlib import Path
from typing import Iterator
import torch
# Map megatron lora target modules to HF-style module names for vLLM
MEGATRON_TO_HF_MODULES = {
"linear_qkv": ["q_proj", "k_proj", "v_proj"],
"linear_proj": ["o_proj"],
"linear_fc1": ["gate_proj", "up_proj"],
"linear_fc2": ["down_proj"],
"router": ["gate"],
# Canonical LoRA mappings
"linear_q": ["q_proj"],
"linear_k": ["k_proj"],
"linear_v": ["v_proj"],
"linear_fc1_up": ["up_proj"],
"linear_fc1_gate": ["gate_proj"],
# MLA mappings
"linear_kv_down_proj": ["kv_a_proj_with_mqa"],
"linear_kv_up_proj": ["kv_b_proj"],
"linear_q_down_proj": ["q_a_proj"],
"linear_q_up_proj": ["q_b_proj"],
"linear_q_proj": ["q_proj"],
}
# Modules with stacked parameters that need .base_layer suffix in vLLM
STACKED_PARAMS = [
".q_proj.weight",
".q_proj.bias",
".k_proj.weight",
".k_proj.bias",
".v_proj.weight",
".v_proj.bias",
".o_proj.weight",
".o_proj.bias",
".gate_proj.weight",
".up_proj.weight",
".down_proj.weight",
".mlp.gate.weight",
".mlp.gate.bias",
".mlp.gate.e_score_correction_bias",
".kv_a_proj_with_mqa.weight",
".kv_b_proj.weight",
".q_a_proj.weight",
".q_b_proj.weight",
]
def _get_rank_checkpoint_path(base_path: str) -> str:
"""Get rank-specific checkpoint path following Megatron's convention.
Returns path like: base_path/mp_rank_{tp:02d}_{pp:03d}_{ep:03d}/
Args:
base_path: Base checkpoint directory
Returns:
Rank-specific subdirectory path
"""
from megatron.core import mpu
tensor_rank = mpu.get_tensor_model_parallel_rank()
pipeline_rank = mpu.get_pipeline_model_parallel_rank()
expert_rank = mpu.get_expert_model_parallel_rank()
pipeline_parallel = mpu.get_pipeline_model_parallel_world_size() > 1
expert_parallel = mpu.get_expert_model_parallel_world_size() > 1
if not pipeline_parallel:
rank_path = os.path.join(base_path, f"mp_rank_{tensor_rank:02d}")
else:
rank_path = os.path.join(base_path, f"mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}")
if expert_parallel:
rank_path = rank_path + f"_{expert_rank:03d}"
return rank_path
def get_adapter_state_dict(model):
"""Extract only adapter parameters from a model.
Args:
model: PyTorch model (possibly wrapped in DDP/Float16Module)
Returns:
Dict of adapter parameter names to tensors
"""
from verl.utils.megatron_utils import unwrap_model
# Unwrap model from DDP/Float16Module
unwrapped = unwrap_model(model)
if isinstance(unwrapped, list):
unwrapped = unwrapped[0]
adapter_state = {}
for name, param in unwrapped.named_parameters():
if ".adapter." in name.lower():
adapter_state[name] = param.data.clone()
return adapter_state
def save_adapter_checkpoint(
model: torch.nn.Module | list[torch.nn.Module],
checkpoint_path: str,
rank: int = 0,
):
"""Save only adapter parameters to checkpoint.
This is much more efficient than saving the full model when using PEFT,
as adapters typically represent <1% of total parameters.
Uses Megatron's distributed checkpoint structure: each rank saves to
checkpoint_path/mp_rank_{tp:02d}_{pp:03d}/adapter.pt
Args:
model: Model or list of models
checkpoint_path: Base path to save checkpoint (rank-specific subdirs created)
rank: Process rank (used for logging only)
"""
if isinstance(model, list):
models = model
else:
models = [model]
# Get adapter state from first model
adapter_state = get_adapter_state_dict(models[0])
if not adapter_state:
if rank == 0:
print("Warning: No adapter parameters found to save")
return
# Get rank-specific directory path
Path(checkpoint_path).mkdir(parents=True, exist_ok=True)
rank_path = _get_rank_checkpoint_path(checkpoint_path)
adapter_file = rank_path + "_adapter.pt"
torch.save(
{
"adapter_state_dict": adapter_state,
},
adapter_file,
)
if rank == 0:
print(f"Saved {len(adapter_state)} adapter parameters to {checkpoint_path} (distributed)")
def load_adapter_checkpoint(
model: torch.nn.Module | list[torch.nn.Module],
checkpoint_path: str,
strict: bool = True,
):
"""Load adapter parameters from checkpoint.
Loads from Megatron's distributed checkpoint structure: reads from
checkpoint_path/mp_rank_{tp:02d}_{pp:03d}/adapter.pt for each rank.
Args:
model: Model or list of models
checkpoint_path: Base path to checkpoint directory
strict: Whether to strictly enforce parameter name matching
"""
from megatron.core import mpu
from verl.utils.megatron_utils import unwrap_model
# Get rank-specific path
rank_path = _get_rank_checkpoint_path(checkpoint_path)
adapter_file = rank_path + "_adapter.pt"
if not os.path.isfile(adapter_file):
raise FileNotFoundError(f"Adapter checkpoint not found: {adapter_file}")
checkpoint = torch.load(adapter_file, map_location="cpu")
adapter_state = checkpoint.get("adapter_state_dict", {})
if not adapter_state:
print("Warning: No adapter parameters found in checkpoint")
return
if isinstance(model, list):
models = model
else:
models = [model]
# Load adapter parameters into each model (for VPP, models may have multiple chunks)
loaded_count = 0
for m in models:
unwrapped = unwrap_model(m)
if isinstance(unwrapped, list):
unwrapped = unwrapped[0]
# Load parameters
_, unexpected = unwrapped.load_state_dict(adapter_state, strict=False)
if strict and unexpected:
raise RuntimeError(f"Error loading adapter checkpoint:\nUnexpected keys: {unexpected}")
loaded_count += len(adapter_state)
if (
mpu.get_data_parallel_rank() == 0
and mpu.get_tensor_model_parallel_rank() == 0
and mpu.get_pipeline_model_parallel_rank() == 0
):
print(f"Loaded {len(adapter_state)} adapter parameters from {checkpoint_path}")
def count_adapter_parameters(model):
"""Count the number of trainable adapter parameters.
Args:
model: PyTorch model
Returns:
Tuple of (adapter_params, total_params, percentage)
"""
from verl.utils.megatron_utils import unwrap_model
unwrapped = unwrap_model(model)
if isinstance(unwrapped, list):
unwrapped = unwrapped[0]
adapter_params = 0
total_params = 0
for name, param in unwrapped.named_parameters():
total_params += param.numel()
if "lora" in name.lower() or "adapter" in name.lower():
if param.requires_grad:
adapter_params += param.numel()
percentage = 100 * adapter_params / total_params if total_params > 0 else 0
return adapter_params, total_params, percentage
def print_adapter_info(model):
"""Print information about adapter parameters in the model."""
adapter_params, total_params, percentage = count_adapter_parameters(model)
print(f"\n{'=' * 60}")
print("PEFT Adapter Information:")
print(f" Total parameters: {total_params:,}")
print(f" Adapter parameters: {adapter_params:,}")
print(f" Trainable percentage: {percentage:.2f}%")
print(f"{'=' * 60}\n")
def convert_megatron_to_hf_target_modules(megatron_modules: list[str]) -> list[str]:
"""Convert megatron lora target modules to HF-style module names.
Args:
megatron_modules: List of megatron-style module names.
Returns:
List of HF-style module names with duplicates removed.
"""
hf_target_modules = []
for module in megatron_modules:
if module in MEGATRON_TO_HF_MODULES:
hf_target_modules.extend(MEGATRON_TO_HF_MODULES[module])
else:
hf_target_modules.append(module)
# Remove duplicates while preserving order
return list(dict.fromkeys(hf_target_modules))
def build_peft_config_for_vllm(lora_config: dict) -> dict:
"""Build a peft_config dict compatible with vLLM's PEFTHelper from megatron lora config.
Args:
lora_config: Megatron lora configuration dictionary.
Returns:
A dictionary compatible with vLLM's PEFTHelper.from_dict().
"""
from peft import TaskType
target_modules = lora_config.get("target_modules", ["linear_qkv", "linear_proj", "linear_fc1", "linear_fc2"])
exclude_modules = lora_config.get("exclude_modules", [])
hf_target_modules = convert_megatron_to_hf_target_modules(target_modules)
hf_exclude_modules = convert_megatron_to_hf_target_modules(exclude_modules)
return {
"task_type": TaskType.CAUSAL_LM,
"r": lora_config.get("rank", 0),
"lora_alpha": lora_config.get("alpha", 32),
"target_modules": hf_target_modules,
"exclude_modules": hf_exclude_modules,
"bias": "none",
"lora_dropout": lora_config.get("dropout", 0.0),
}
# vLLM needs to target all-linear no matter about specific LoRA config
def add_base_layer_suffix(
params: Iterator[tuple[str, torch.Tensor]],
model_type: str,
) -> Iterator[tuple[str, torch.Tensor]]:
"""Yield param pairs with a base-layer suffix added to the param name.
Args:
params: Iterator of (param_name, tensor)
model_type: The type of the model (e.g., "llama").
"""
stacked_params = STACKED_PARAMS
# TODO: other models may have more special treatment, or integrate this into Megatron-Bridge
if model_type == "llama":
stacked_params = [".embed_tokens.weight", *STACKED_PARAMS]
for name, param in params:
ending_suffix = ""
for suffix in stacked_params:
if name.endswith(suffix):
ending_suffix = suffix
break
if ending_suffix:
suffix = ending_suffix.rsplit(".", 1)[-1]
name = f"{name[: -len(suffix)]}base_layer.{suffix}"
yield name, param
__all__ = [
"get_adapter_state_dict",
"save_adapter_checkpoint",
"load_adapter_checkpoint",
"count_adapter_parameters",
"print_adapter_info",
"convert_megatron_to_hf_target_modules",
"build_peft_config_for_vllm",
"add_base_layer_suffix",
]
|
verl__utils__megatron_utils.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain utilities."""
import gc
import inspect
import logging
import os
import warnings
from dataclasses import dataclass
from typing import Any
import torch
import torch.nn.functional as F
from megatron.core import ModelParallelConfig, mpu, parallel_state, tensor_parallel
from megatron.core.distributed import DistributedDataParallel as DDP
from megatron.core.distributed import DistributedDataParallelConfig
from megatron.core.enums import ModelType
from megatron.core.optimizer import ChainedOptimizer
from megatron.core.parallel_state import get_global_memory_buffer
from megatron.core.transformer import MLATransformerConfig, TransformerConfig
from megatron.core.transformer.module import Float16Module
from megatron.core.transformer.multi_token_prediction import MTPLossLoggingHelper
from megatron.core.utils import get_attr_wrapped_model
from transformers import PretrainedConfig
import verl.utils.megatron.tensor_parallel as tp_utils
from verl.utils.device import get_device_id, get_device_name, get_torch_device
from verl.utils.fs import local_mkdir_safe
from verl.utils.model import normalize_model_name
from verl.utils.torch_dtypes import PrecisionType
from verl.workers.config import HFModelConfig, McoreEngineConfig
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_model_config(model):
return get_attr_wrapped_model(model, "config", allow_none=False)
def get_model(
model_provider_func,
model_type=ModelType.encoder_or_decoder,
wrap_with_ddp=True,
use_distributed_optimizer=True,
transformer_config=None,
override_ddp_config=None,
):
"""Build the model."""
# Build model.
if (
mpu.get_pipeline_model_parallel_world_size() > 1
and mpu.get_virtual_pipeline_model_parallel_world_size() is not None
):
assert model_type != ModelType.encoder_and_decoder, (
"Interleaved schedule not supported for model with both encoder and decoder"
)
model = []
has_vp_stage = inspect.signature(mpu.is_pipeline_first_stage).parameters.get("vp_stage", None) is not None
for i in range(mpu.get_virtual_pipeline_model_parallel_world_size()):
mpu.set_virtual_pipeline_model_parallel_rank(i)
# Set pre_process and post_process only after virtual rank is set.
extra_kwargs = {} if not has_vp_stage else {"ignore_virtual": False, "vp_stage": i}
pre_process = mpu.is_pipeline_first_stage(**extra_kwargs)
post_process = mpu.is_pipeline_last_stage(**extra_kwargs)
this_model = model_provider_func(pre_process=pre_process, post_process=post_process, vp_stage=i)
this_model.model_type = model_type
model.append(this_model)
mpu.set_virtual_pipeline_model_parallel_rank(0)
else:
pre_process = mpu.is_pipeline_first_stage()
post_process = mpu.is_pipeline_last_stage()
add_encoder = True
add_decoder = True
assert model_type != ModelType.encoder_and_decoder, "Model type encoder_and_decoder is not supported"
if model_type == ModelType.encoder_and_decoder:
if mpu.get_pipeline_model_parallel_world_size() > 1:
assert mpu.get_pipeline_model_parallel_split_rank() is not None, (
"Split rank needs to be specified for model with both encoder and decoder"
)
rank = mpu.get_pipeline_model_parallel_rank()
split_rank = mpu.get_pipeline_model_parallel_split_rank()
world_size = mpu.get_pipeline_model_parallel_world_size()
pre_process = rank == 0 or rank == split_rank
post_process = (rank == (split_rank - 1)) or (rank == (world_size - 1))
add_encoder = mpu.is_pipeline_stage_before_split()
add_decoder = mpu.is_pipeline_stage_after_split()
model = model_provider_func(
pre_process=pre_process, post_process=post_process, add_encoder=add_encoder, add_decoder=add_decoder
)
else:
model = model_provider_func(pre_process=pre_process, post_process=post_process)
model.model_type = model_type
if not isinstance(model, list):
model = [model]
# Set tensor model parallel attributes if not set.
# Only parameters that are already tensor model parallel have these
# attributes set for them. We should make sure the default attributes
# are set for all params so the optimizer can use them.
for model_module in model:
for param in model_module.parameters():
tensor_parallel.set_defaults_if_not_set_tensor_model_parallel_attributes(param)
# Print number of parameters.
if mpu.get_data_parallel_rank() == 0:
print(
" > number of parameters on (tensor, pipeline) model parallel rank ({}, {}): {}".format(
mpu.get_tensor_model_parallel_rank(),
mpu.get_pipeline_model_parallel_rank(),
sum([sum([p.nelement() for p in model_module.parameters()]) for model_module in model]),
),
flush=True,
)
# GPU allocation.
if transformer_config is None or (not transformer_config.use_cpu_initialization):
for model_module in model:
model_module.to(f"{get_device_name()}:{get_device_id()}")
# Fp16 conversion.
config: TransformerConfig = get_model_config(model[0])
config.fp8 = None
tfconfig: TransformerConfig = model[0].config
if config.fp16 or config.bf16: # the ModelParallelConfig in GPTModel
model = [Float16Module(config, model_module) for model_module in model]
if wrap_with_ddp:
ddp_models = []
ddp_config_dict = {
"use_distributed_optimizer": use_distributed_optimizer,
"grad_reduce_in_fp32": True,
"overlap_grad_reduce": False,
}
if override_ddp_config is not None:
ddp_config_dict.update(override_ddp_config)
ddp_config = DistributedDataParallelConfig(**ddp_config_dict)
for model_chunk_idx, model_chunk in enumerate(model):
ddp_model = DDP(
config=tfconfig,
module=model_chunk,
disable_bucketing=(model_chunk_idx > 0),
ddp_config=ddp_config,
)
ddp_models.append(ddp_model)
model = ddp_models
# # Broadcast params from data parallel src rank to other data parallel ranks.
# # if args.data_parallel_random_init:
for model_module in model:
model_module.broadcast_params()
return model
@dataclass
class McoreModuleWrapperConfig:
"""Configuration for Mcore module wrapper."""
is_value_model: bool = False
share_embeddings_and_output_weights: bool = False
wrap_with_ddp: bool = True
use_distributed_optimizer: bool = True
def make_megatron_module(
wrap_config: McoreModuleWrapperConfig,
tf_config: TransformerConfig,
hf_config: PretrainedConfig,
bridge: Any = None,
provider: Any = None,
override_model_config: dict[str, Any] = None,
override_ddp_config: dict[str, Any] = None,
peft_cls: Any = None,
peft_config: Any = None,
):
if override_model_config is None:
override_model_config = {}
if bridge is not None:
if provider is None:
from verl.models.mcore.mbridge import freeze_moe_router, make_value_model
value_model_hook = make_value_model
else:
from verl.models.mcore.bridge import freeze_moe_router, make_value_model
hidden_size = (
hf_config.text_config.hidden_size if hasattr(hf_config, "text_config") else hf_config.hidden_size
)
value_model_hook = make_value_model(hidden_size, provider.sequence_parallel)
post_model_creation_callbacks = []
if wrap_config.is_value_model:
post_model_creation_callbacks.append(value_model_hook)
if override_model_config.get("moe_config", {}).get("freeze_moe_router", False):
post_model_creation_callbacks.append(freeze_moe_router)
if provider is not None:
# When using PEFT with Megatron-Bridge, we must apply PEFT transformation
# BEFORE wrapping the model in DDP. This is required because:
# 1. PEFT freezes base model parameters (requires_grad=False)
# 2. DDP must be aware of which parameters are trainable when building gradient buckets
# 3. The distributed optimizer must only track trainable (adapter) parameters
# See Megatron-Bridge docs: training/peft.md
# Register PEFT transformation as pre-wrap hook if peft_cls is specified
# This must happen BEFORE DDP wrapping to avoid KeyError with frozen parameters
if peft_cls is not None:
from verl.utils.megatron_peft_utils import load_adapter_checkpoint, print_adapter_info
def peft_pre_wrap_hook(model):
"""Pre-wrap hook that applies PEFT transformation."""
# Apply PEFT transformation - this will freeze base model and add adapters
# The PEFT callable handles both freezing and transformation
transformed_model = peft_cls(model, training=True)
# Set parameters to save (adapter-only checkpointing)
peft_cls.set_params_to_save(transformed_model)
# Load adapter weights if adapter_path is specified
adapter_path = getattr(peft_config, "adapter_path", None)
if adapter_path is not None and adapter_path:
print(f"Loading adapter weights from: {adapter_path}")
load_adapter_checkpoint(transformed_model, adapter_path)
# Print PEFT statistics
if torch.distributed.get_rank() == 0:
print_adapter_info(transformed_model)
return transformed_model
provider.register_pre_wrap_hook(peft_pre_wrap_hook)
# Register post-creation callbacks (make_value_model, freeze_moe_router) as pre-wrap hooks
for callback in post_model_creation_callbacks:
provider.register_pre_wrap_hook(callback)
# Create DDP config if needed
ddp_config = None
if wrap_config.wrap_with_ddp:
from megatron.bridge.training.config import DistributedDataParallelConfig
ddp_config_dict = {
"use_distributed_optimizer": wrap_config.use_distributed_optimizer,
}
# Apply any DDP config overrides
if override_ddp_config is not None:
ddp_config_dict.update(override_ddp_config)
ddp_config = DistributedDataParallelConfig(**ddp_config_dict)
ddp_config.finalize()
# Now call provide_distributed_model with all hooks registered
# Hooks will be applied automatically before DDP wrapping
model = provider.provide_distributed_model(
wrap_with_ddp=wrap_config.wrap_with_ddp,
ddp_config=ddp_config,
fp16=provider.fp16,
bf16=provider.bf16,
)
# Extract TransformerConfig from the created model
tf_config = get_model_config(model[0] if isinstance(model, list) else model)
else:
model = bridge.get_model(
post_model_creation_callbacks=post_model_creation_callbacks,
wrap_with_ddp=wrap_config.wrap_with_ddp,
fp16=tf_config.fp16,
bf16=tf_config.bf16,
ddp_config=override_ddp_config,
)
if isinstance(tf_config, MLATransformerConfig):
# Keep the same behavior as hf_to_mcore_config_dpskv3
from verl.models.mcore.patch import apply_patch
apply_patch()
else:
def megatron_model_provider(pre_process, post_process, vp_stage=None):
from verl.models.mcore import init_mcore_model
parallel_model = init_mcore_model(
tf_config,
hf_config,
pre_process,
post_process,
share_embeddings_and_output_weights=wrap_config.share_embeddings_and_output_weights,
value=wrap_config.is_value_model,
freeze_moe_router=override_model_config.get("moe_config", {}).get("freeze_moe_router", False),
vp_stage=vp_stage,
)
parallel_model.to(get_device_name())
return parallel_model
model = get_model(
megatron_model_provider,
wrap_with_ddp=wrap_config.wrap_with_ddp,
use_distributed_optimizer=wrap_config.use_distributed_optimizer,
override_ddp_config=override_ddp_config,
)
return model, tf_config
ALL_MODULE_WRAPPER_CLASSNAMES = (DDP, Float16Module)
def unwrap_model(model, module_instances=ALL_MODULE_WRAPPER_CLASSNAMES):
return_list = True
if not isinstance(model, list):
model = [model]
return_list = False
unwrapped_model = []
for model_module in model:
while isinstance(model_module, module_instances):
model_module = model_module.module
unwrapped_model.append(model_module)
if not return_list:
return unwrapped_model[0]
return unwrapped_model
def convert_config(hf_config: PretrainedConfig, megatron_config) -> TransformerConfig:
"""[Deprecated] convert config
Args:
hf_config (PretrainedConfig): _description_
megatron_config (_type_): _description_
Returns:
TransformerConfig: _description_
"""
warnings.warn("[deprecated] use config converter for more model support", stacklevel=2)
print(f"megatron config {megatron_config}")
dt = PrecisionType.to_dtype(megatron_config.params_dtype)
print(f"pipeline_dtype=megatron_config {dt}")
qkv_bias = True if "Qwen2ForCausalLM" in hf_config.architectures else getattr(hf_config, "attention_bias", False)
overlap_p2p_comm = (
mpu.get_virtual_pipeline_model_parallel_world_size() is not None
and mpu.get_virtual_pipeline_model_parallel_world_size() > 1
)
batch_p2p_comm = False
transformer_config = TransformerConfig(
num_layers=hf_config.num_hidden_layers,
hidden_size=hf_config.hidden_size,
num_attention_heads=hf_config.num_attention_heads,
num_query_groups=hf_config.num_key_value_heads,
ffn_hidden_size=hf_config.intermediate_size,
# max_position_embeddings=hf_config.max_position_embeddings,
activation_func=F.silu,
normalization="RMSNorm",
# rotary_percent=False, # default,
gated_linear_unit=True, # for llama
use_cpu_initialization=True,
apply_residual_connection_post_layernorm=False, # check what's this mean
add_bias_linear=False,
tensor_model_parallel_size=mpu.get_tensor_model_parallel_world_size(),
pipeline_model_parallel_size=mpu.get_pipeline_model_parallel_world_size(),
virtual_pipeline_model_parallel_size=mpu.get_virtual_pipeline_model_parallel_world_size(),
context_parallel_size=mpu.get_context_parallel_world_size(),
overlap_p2p_comm=overlap_p2p_comm,
batch_p2p_comm=batch_p2p_comm,
pipeline_dtype=dt,
params_dtype=dt,
sequence_parallel=mpu.get_tensor_model_parallel_world_size() > 1,
variable_seq_lengths=True,
masked_softmax_fusion=True,
moe_token_dispatcher_type="alltoall",
attention_dropout=hf_config.attention_dropout,
hidden_dropout=getattr(hf_config, "hidden_dropout", 0.0),
add_qkv_bias=qkv_bias,
bf16=dt is torch.bfloat16,
)
return transformer_config
def mcore_model_parallel_config(
sequence_parallel: bool,
params_dtype: torch.dtype,
) -> ModelParallelConfig:
# WARNING: Code should not reach this point. This function is deprecated and will be removed.
# Please use hf_to_mcore_config_dense() from verl.models.mcore.config_converter instead.
warnings.warn(
"Code should not reach this point. This function is deprecated and will be removed. Please use "
"hf_to_mcore_config_dense() from verl.models.mcore.config_converter instead.",
DeprecationWarning,
stacklevel=2,
)
return ModelParallelConfig(
tensor_model_parallel_size=mpu.get_tensor_model_parallel_world_size(),
pipeline_model_parallel_size=mpu.get_pipeline_model_parallel_world_size(),
virtual_pipeline_model_parallel_size=mpu.get_virtual_pipeline_model_parallel_world_size(),
context_parallel_size=mpu.get_context_parallel_world_size(),
sequence_parallel=sequence_parallel,
params_dtype=params_dtype,
pipeline_dtype=params_dtype,
bf16=True,
fp16=False,
timers=None,
)
@torch.no_grad()
def offload_megatron_model_to_cpu(models):
"""
In megatron, the model and optimizer storage are:
- bf16 parameter data chunked in model parallel group
- fp32 grad chunked in model parallel group
- fp32 main_parameter chunked in model and dp group
- fp32 optimizer state chunked in model and dp group
"""
for model_chunk in models:
if isinstance(model_chunk, DDP):
model_chunk_all_buffers = [model_chunk.buffers, model_chunk.expert_parallel_buffers]
for buffers in model_chunk_all_buffers:
for buffer in buffers:
# offload parameters
if buffer.param_data.storage().size() > 0:
buffer.param_data.cpu_data = buffer.param_data.data.cpu().pin_memory()
buffer.param_data_size = buffer.param_data.storage().size()
buffer.param_data.storage().resize_(0)
assert buffer.param_data_size == buffer.param_data.cpu_data.storage().size()
if buffer.grad_data.storage().size() > 0:
# if the grad_data size is already zero, we assume that it is already offloaded
buffer.grad_data_size = buffer.grad_data.storage().size()
buffer.grad_data.storage().resize_(0)
else:
# we need this for ref module
for _, param in model_chunk.named_parameters():
param.data = param.data.to("cpu", non_blocking=True)
if param.grad is not None:
param.grad = param.grad.to("cpu", non_blocking=True)
gc.collect()
get_torch_device().empty_cache()
@torch.no_grad()
def load_megatron_model_to_gpu(models, load_grad=True):
for model_chunk in models:
if isinstance(model_chunk, DDP):
model_chunk_all_buffers = [model_chunk.buffers, model_chunk.expert_parallel_buffers]
for buffers in model_chunk_all_buffers:
for buffer in buffers:
# sometimes, we don't want to load grad for pure inference
if load_grad and hasattr(buffer, "grad_data_size"):
buffer.grad_data.storage().resize_(buffer.grad_data_size)
buffer.grad_data.zero_()
if buffer.param_data.storage().size() == 0:
buffer.param_data.storage().resize_(buffer.param_data_size)
# copy data from cpu to cuda
buffer.param_data.copy_(buffer.param_data.cpu_data, non_blocking=True)
else:
# we need this for ref module
device_id = get_device_id()
for _, param in model_chunk.named_parameters():
param.data = param.data.to(device_id, non_blocking=True)
if param.grad is not None:
param.grad = param.grad.to(device_id, non_blocking=True)
gc.collect()
get_torch_device().empty_cache()
@torch.no_grad()
def offload_megatron_copy_params(optimizers):
"""
Offload optimizer parameters to CPU. Supports both Megatron optimizers
and `ChainedOptimizer`, which wraps a list of underlying optimizers.
Args:
optimizers: The optimizer or ChainedOptimizer instance.
"""
def _iter_opts(opt):
if isinstance(opt, ChainedOptimizer):
return opt.chained_optimizers
return [opt]
def offload_tensor_to_cpu(tensor):
if tensor is None:
return
tensor.data = tensor.data.to("cpu", non_blocking=True)
def offload_group_to_cpu(group):
if group is None:
return
if isinstance(group, list):
for param_group in group:
if isinstance(param_group, list):
for param in param_group:
offload_tensor_to_cpu(param)
else:
offload_tensor_to_cpu(param_group)
else:
offload_tensor_to_cpu(group)
# Offload all parameter groups to CPU for each underlying optimizer
for _opt in _iter_opts(optimizers):
if hasattr(_opt, "shard_fp32_from_float16_groups"):
offload_group_to_cpu(_opt.shard_fp32_from_float16_groups)
@torch.no_grad()
def load_megatron_copy_params(optimizers):
"""
Load optimizer parameters back to GPU. Handles ChainedOptimizer.
Args:
optimizers: Optimizer or ChainedOptimizer instance.
"""
def _iter_opts(opt):
if isinstance(opt, ChainedOptimizer):
return opt.chained_optimizers
return [opt]
def load_tensor_to_gpu(tensor):
if tensor is None:
return
device_id = get_device_id()
tensor.data = tensor.data.to(device_id, non_blocking=True)
def load_group_to_gpu(group):
if group is None:
return
if isinstance(group, list):
for param_group in group:
if isinstance(param_group, list):
for param in param_group:
load_tensor_to_gpu(param)
else:
load_tensor_to_gpu(param_group)
else:
load_tensor_to_gpu(group)
# Load all parameter groups to GPU for each underlying optimizer
for _opt in _iter_opts(optimizers):
if hasattr(_opt, "shard_fp32_from_float16_groups"):
load_group_to_gpu(_opt.shard_fp32_from_float16_groups)
@torch.no_grad()
def offload_megatron_optimizer(optimizers):
def _iter_opts(opt):
if isinstance(opt, ChainedOptimizer):
return opt.chained_optimizers
return [opt]
for _opt in _iter_opts(optimizers):
offload_megatron_copy_params(_opt)
## worker may hold zero parameter when enabling custom pipeline layout
if _opt.optimizer is not None:
# HybridDeviceOptimizer: offload all sub-optimizer states to CPU
# TODO: this should be a method in Megatron-LM's HybridDeviceOptimizer
hdo = _opt.optimizer
if all(hasattr(hdo, attr) for attr in ("sub_optimizers", "inner_param_to_orig_param", "state")):
for optimizer in hdo.sub_optimizers:
for param, state in optimizer.state.items():
for k, v in state.items():
if not isinstance(v, torch.Tensor):
continue
orig_param = hdo.inner_param_to_orig_param.get(param, param)
hdo.state[orig_param][k] = state[k] = v.to("cpu")
else:
opt_state_dict_values = _opt.optimizer.state.values()
for v in opt_state_dict_values:
if "exp_avg" in v:
v["exp_avg"] = v["exp_avg"].to("cpu", non_blocking=True)
if "exp_avg_sq" in v:
v["exp_avg_sq"] = v["exp_avg_sq"].to("cpu", non_blocking=True)
try:
# Free TransformerEngine's dummy weight gradients cache
# https://github.com/NVIDIA/TransformerEngine/blob/release_v2.10/transformer_engine/pytorch/module/base.py#L64
from transformer_engine.pytorch.module.base import _dummy_wgrads
_dummy_wgrads.clear()
except ImportError:
pass
# Free Megatron-LM's global memory buffer
get_global_memory_buffer().buffer.clear()
gc.collect()
get_torch_device().empty_cache()
@torch.no_grad()
def load_megatron_optimizer(optimizers):
def _iter_opts(opt):
if isinstance(opt, ChainedOptimizer):
return opt.chained_optimizers
return [opt]
for _opt in _iter_opts(optimizers):
load_megatron_copy_params(_opt)
## worker may hold zero parameter when enabling custom pipeline layout
if _opt.optimizer is not None:
# if we are using HybridDeviceOptimizer, we need to only move gpu optimizer state to gpu
if hasattr(_opt.optimizer, "_move_new_state_to_right_device"):
_opt.optimizer._move_new_state_to_right_device()
else:
opt_state_dict_values = _opt.optimizer.state.values()
for v in opt_state_dict_values:
if "exp_avg" in v:
v["exp_avg"] = v["exp_avg"].to(get_device_id(), non_blocking=True)
if "exp_avg_sq" in v:
v["exp_avg_sq"] = v["exp_avg_sq"].to(get_device_id(), non_blocking=True)
gc.collect()
get_torch_device().empty_cache()
def get_dist_checkpoint_path(checkpoint_path):
local_mkdir_safe(checkpoint_path)
local_mkdir_safe(os.path.join(checkpoint_path, "dist_ckpt"))
return os.path.join(checkpoint_path, "dist_ckpt")
def get_hf_model_checkpoint_path(checkpoint_path):
local_mkdir_safe(checkpoint_path)
local_mkdir_safe(os.path.join(checkpoint_path, "huggingface"))
return os.path.join(checkpoint_path, "huggingface")
def get_transformer_config_checkpoint_path(checkpoint_path):
os.makedirs(checkpoint_path, exist_ok=True)
return os.path.join(checkpoint_path, "transformer_config.json")
def convert_megatron_model_to_transformers_model(
name,
param,
config: PretrainedConfig,
tp_size: int,
num_query_groups: int,
convert_qkv_gate_up_by_trunk_concat=False,
):
"""Convert megatron model to transformers model."""
new_params = {}
def convert_qkv_shard(full_tensor, q_name, k_name, v_name):
nonlocal config
nonlocal tp_size
nonlocal num_query_groups
q_shard_list = []
k_shard_list = []
v_shard_list = []
hidden_size_per_head = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
if config.num_key_value_heads >= tp_size:
q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
num_query_groups_per_partition = num_query_groups // tp_size
qkv_part = full_tensor[i * total_size : (i + 1) * total_size]
q_size_chunk = q_size_tp // num_query_groups_per_partition
kv_size_chunk = kv_size_tp // num_query_groups_per_partition
for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition):
q_part = qkv_part_chunk[:q_size_chunk]
k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk]
v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :]
q_shard_list.append(q_part)
k_shard_list.append(k_part)
v_shard_list.append(v_part)
else:
q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
num_query_groups_per_partition = num_query_groups // tp_size
qkv_part = full_tensor[i * total_size : (i + 1) * total_size]
q_size_chunk = q_size_tp // num_query_groups_per_partition
kv_size_chunk = kv_size_tp // num_query_groups_per_partition
for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition):
q_part = qkv_part_chunk[:q_size_chunk]
k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk]
v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :]
q_shard_list.append(q_part)
if i * config.num_key_value_heads % tp_size == 0:
k_shard_list.append(k_part)
v_shard_list.append(v_part)
new_params[q_name] = torch.cat(q_shard_list, dim=0)
new_params[k_name] = torch.cat(k_shard_list, dim=0)
new_params[v_name] = torch.cat(v_shard_list, dim=0)
def convert_gate_up_shard(full_tensor, gate_name, up_name):
nonlocal config
nonlocal tp_size
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_list = []
up_weight_list = []
for i in range(tp_size):
gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)]
gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp]
up_weight_tp = gate_up_weight_tp[intermediate_size_tp:]
gate_weight_list.append(gate_weight_tp)
up_weight_list.append(up_weight_tp)
new_params[gate_name] = torch.cat(gate_weight_list, dim=0)
new_params[up_name] = torch.cat(up_weight_list, dim=0)
if name == "embedding.word_embeddings.weight":
new_params["model.embed_tokens.weight"] = param
elif "self_attention" in name:
splitted_name = name.split(".")
layer_number = splitted_name[2]
component = splitted_name[4]
param_type = splitted_name[5]
if component == "linear_proj":
new_params[f"model.layers.{layer_number}.self_attn.o_proj.weight"] = param
elif component == "linear_qkv" and not isinstance(param, list):
if param_type == "layer_norm_weight":
new_params[f"model.layers.{layer_number}.input_layernorm.weight"] = param
else:
if convert_qkv_gate_up_by_trunk_concat:
convert_qkv_shard(
param,
f"model.layers.{layer_number}.self_attn.q_proj.{param_type}",
f"model.layers.{layer_number}.self_attn.k_proj.{param_type}",
f"model.layers.{layer_number}.self_attn.v_proj.{param_type}",
)
else:
new_params[f"model.layers.{layer_number}.self_attn.qkv_proj.{param_type}"] = param
elif component == "q_layernorm" or component == "k_layernorm":
hf_component = component.replace("layer", "")
new_params[f"model.layers.{layer_number}.self_attn.{hf_component}.weight"] = param
else:
assert isinstance(param, list) and len(param) == 3
assert param_type == "weight" or param_type == "bias"
new_params[f"model.layers.{layer_number}.self_attn.q_proj.{param_type}"] = param[0]
new_params[f"model.layers.{layer_number}.self_attn.k_proj.{param_type}"] = param[1]
new_params[f"model.layers.{layer_number}.self_attn.v_proj.{param_type}"] = param[2]
elif "mlp" in name:
splitted_name = name.split(".")
layer_number = splitted_name[2]
component = splitted_name[4]
param_type = splitted_name[5]
if component == "linear_fc1" and not isinstance(param, list):
if param_type == "layer_norm_weight":
new_params[f"model.layers.{layer_number}.post_attention_layernorm.weight"] = param
elif param_type == "weight":
if convert_qkv_gate_up_by_trunk_concat:
convert_gate_up_shard(
param,
f"model.layers.{layer_number}.mlp.gate_proj.weight",
f"model.layers.{layer_number}.mlp.up_proj.weight",
)
else:
new_params[f"model.layers.{layer_number}.mlp.gate_up_proj.weight"] = param
elif component == "linear_fc1" and isinstance(param, list):
assert len(param) == 2
assert param_type == "weight" or param_type == "bias"
new_params[f"model.layers.{layer_number}.mlp.gate_proj.weight"] = param[0]
new_params[f"model.layers.{layer_number}.mlp.up_proj.weight"] = param[1]
elif component == "linear_fc2":
new_params[f"model.layers.{layer_number}.mlp.down_proj.weight"] = param
elif name == "decoder.final_layernorm.weight":
new_params["model.norm.weight"] = param
elif name == "output_layer.weight":
new_params["lm_head.weight"] = param
else:
raise ValueError(f"Unknown param name: {name}")
return new_params.keys(), new_params.values()
def broadcast_from_megatron_pp(tensor: torch.Tensor):
# tensor is not None only in one of the pp ranks
if tensor is not None:
shape = tensor.shape
dtype = tensor.dtype
tensor_parallel = getattr(tensor, "tensor_model_parallel", None)
partition_dim = getattr(tensor, "partition_dim", None)
tensor_spec = (shape, dtype, tensor_parallel, partition_dim)
else:
tensor_spec = None
tensor_spec_output = [None] * mpu.get_pipeline_model_parallel_world_size()
torch.distributed.all_gather_object(
object_list=tensor_spec_output, obj=tensor_spec, group=mpu.get_pipeline_model_parallel_group()
)
# find the src rank
target_tensor_spec = None
src_rank = None
for rank, tensor_spec in enumerate(tensor_spec_output):
if tensor_spec is not None:
if target_tensor_spec is None:
target_tensor_spec = tensor_spec
else:
raise ValueError("A tensor exists on two pp ranks")
src_rank = rank
assert target_tensor_spec is not None
if tensor is None:
tensor = torch.empty(size=target_tensor_spec[0], dtype=target_tensor_spec[1], device=get_device_id())
if target_tensor_spec[2] is not None:
tensor.tensor_model_parallel = target_tensor_spec[2]
if target_tensor_spec[3] is not None:
tensor.partition_dim = target_tensor_spec[3]
global_rank = torch.distributed.get_global_rank(group=mpu.get_pipeline_model_parallel_group(), group_rank=src_rank)
torch.distributed.broadcast(tensor=tensor, src=global_rank, group=mpu.get_pipeline_model_parallel_group())
return tensor
def broadcast_str_from_megatron_pp(obj: Any):
obj_output = [None] * mpu.get_pipeline_model_parallel_world_size()
torch.distributed.all_gather_object(object_list=obj_output, obj=obj, group=mpu.get_pipeline_model_parallel_group())
src_rank = None
target_obj = None
for rank, item in enumerate(obj_output):
if item is not None:
if target_obj is not None:
raise ValueError("An object exists on two pp ranks")
target_obj = item
src_rank = rank
assert target_obj is not None, "No valid object found to broadcast."
global_rank = torch.distributed.get_global_rank(group=mpu.get_pipeline_model_parallel_group(), group_rank=src_rank)
obj_output = [None] * torch.distributed.get_world_size(group=mpu.get_pipeline_model_parallel_group())
obj_output[0] = target_obj
torch.distributed.broadcast_object_list(
object_list=obj_output, src=global_rank, group=mpu.get_pipeline_model_parallel_group()
)
return obj_output[0]
def default_tp_concat_fn(
layer_name_mapping,
name,
train_params,
infer_params,
model_config,
hf_config=None,
convert_qkv_gate_up_by_simple_split=False,
):
"""
name: name of the parameter
train_params: training parameters
infer_params (Iterable[torch.Tensor]): a iterator towards list of parameters all-gathered from micro_dp_group
model_config: huggingface model_config
TODO(zhangchi.usc1992): currently, the implementation is adhoc. We can move this function to the model
definition so that it is model-agnostic. If the model doesn't implement this function,
we can throw an error to force user disable TP HybridEngine.
"""
from megatron.core import mpu
train_tp_size = mpu.get_tensor_model_parallel_world_size()
if layer_name_mapping.get("qkv_layer_name") in name and "layer_norm" not in name:
# if the tensor is qkv, for each param on tp, split into q, k, v
# concat q, k, v separately.
q_lst = []
k_lst = []
v_lst = []
num_attention_heads = model_config.num_attention_heads
num_key_value_heads = model_config.num_key_value_heads
if "vision_model" in name:
num_attention_heads = hf_config.vision_config.num_heads
num_key_value_heads = hf_config.vision_config.num_heads
assert num_attention_heads % num_key_value_heads == 0
num_q_per_kv = num_attention_heads // num_key_value_heads
assert infer_params[0].shape[0] % (num_q_per_kv + 2) == 0, (
f"param '{name}' shape '{infer_params[0].shape}' dim0 is not divisible by {num_q_per_kv + 2}"
)
kv_size_per_tp = infer_params[0].shape[0] // (num_q_per_kv + 2)
split_size = [kv_size_per_tp * num_q_per_kv, kv_size_per_tp, kv_size_per_tp]
for infer_param in infer_params:
num_query_groups_per_partition = num_key_value_heads // train_tp_size
for chunk in infer_param.chunk(num_query_groups_per_partition):
split_size = [
kv_size_per_tp * num_q_per_kv // num_query_groups_per_partition,
kv_size_per_tp // num_query_groups_per_partition,
kv_size_per_tp // num_query_groups_per_partition,
]
q, k, v = chunk.split(split_size)
q_lst.append(q)
k_lst.append(k)
v_lst.append(v)
q = torch.cat(q_lst, dim=0)
k = torch.cat(k_lst, dim=0)
v = torch.cat(v_lst, dim=0)
infer_params = torch.cat((q, k, v), dim=0) if not convert_qkv_gate_up_by_simple_split else [q, k, v]
elif (
layer_name_mapping.get("gate_proj_layer_name") in name
and "layer_norm" not in name
and "vision_model.projection" not in name
):
# if the tensor is gate and proj
gate_lst = []
up_lst = []
for infer_param in infer_params:
gate, up = infer_param.chunk(2)
gate_lst.append(gate)
up_lst.append(up)
gate = torch.cat(gate_lst, dim=0)
up = torch.cat(up_lst, dim=0)
infer_params = torch.cat((gate, up), dim=0) if not convert_qkv_gate_up_by_simple_split else [gate, up]
elif "mlp.experts.linear_fc2.weight" in name: # moe
infer_params = torch.cat(infer_params, dim=1)
else:
# concat tensor
infer_params = torch.cat(infer_params, dim=tp_utils.get_tensor_parallel_partition_dim(train_params))
return infer_params
def per_tensor_generator(
actor_module,
model_config,
weight_converter,
transformer_config,
layer_name_mapping,
convert_qkv_gate_up_by_simple_split=True,
):
from megatron.core import parallel_state as mpu
pp_rank = mpu.get_pipeline_model_parallel_rank()
ep_size = mpu.get_expert_model_parallel_world_size()
etp_size = mpu.get_expert_tensor_parallel_world_size()
ep_group = mpu.get_expert_model_parallel_group()
etp_group = mpu.get_expert_tensor_parallel_group()
vpp_size = len(actor_module)
all_gather_group = mpu.get_tensor_model_parallel_group()
all_gather_group_size = torch.distributed.get_world_size(group=all_gather_group)
def tensor_generator():
for scan_vpp_idx in range(vpp_size):
existing_keys = set()
model = unwrap_model(actor_module[scan_vpp_idx])
for name, param in model.named_parameters():
existing_keys.add(name)
yield name, param
# note
# there is a bug in megatron GPTModel
# decoder.layers[n].mlp.router.expert_bias" in GPTModel is not registered in named_parameter, but in
# state_dict(). for now we patch it by adding those keys to extra_keys.
extra_keys = [x for x in model.state_dict().keys() if "_extra_state" not in x and x not in existing_keys]
for name in extra_keys:
yield name, model.state_dict()[name].to(get_device_id())
# we need first make all rank get full model information
meta_info = []
for scan_vpp_idx in range(vpp_size):
existing_keys = set()
model = unwrap_model(actor_module[scan_vpp_idx])
for idx, (name, _) in enumerate(model.named_parameters()):
existing_keys.add(name)
meta_info.append((pp_rank, scan_vpp_idx, idx, name))
extra_keys = [x for x in model.state_dict().keys() if "_extra_state" not in x and x not in existing_keys]
for name in extra_keys:
meta_info.append((pp_rank, scan_vpp_idx, idx, name))
obj_spec_output = [None] * mpu.get_pipeline_model_parallel_world_size()
torch.distributed.all_gather_object(
object_list=obj_spec_output, obj=meta_info, group=mpu.get_pipeline_model_parallel_group()
)
layer_list_meta = [item for sublist in obj_spec_output for item in sublist]
gen_func = tensor_generator()
# lazy load tensor for full model
for cur_pp_rank, scan_vpp_idx, idx, name in layer_list_meta:
if model_config.tie_word_embeddings and ("output_layers" in name):
import warnings
warnings.warn(
"Current model sharing word and embedding weights, skip output layer conversion", stacklevel=2
)
continue
if cur_pp_rank == pp_rank:
try:
cur_name, cur_tensor = next(gen_func)
except StopIteration:
cur_name, cur_tensor = None, None
cur_name = normalize_model_name(name, cur_pp_rank, scan_vpp_idx, transformer_config)
else:
cur_tensor, cur_name = None, None
# pp broadcast model tensor and name
cur_name = broadcast_str_from_megatron_pp(cur_name)
broad_pp_tensor = broadcast_from_megatron_pp(cur_tensor)
# (xya): this is a hack to fix the name of the parameters
while cur_name.startswith("module."):
cur_name = cur_name[len("module.") :]
# EP
if ".mlp.experts.linear_fc" in cur_name and ep_size > 1:
num_experts = weight_converter.mcore_config.num_moe_experts
num_experts_per_rank = num_experts // ep_size
infer_params = [torch.empty_like(broad_pp_tensor) for _ in range(ep_size)]
torch.distributed.all_gather(infer_params, broad_pp_tensor, group=ep_group)
name_prefix, local_expert_id = cur_name.split(".weight")
local_expert_id = int(local_expert_id)
global_expert_ids = [num_experts_per_rank * ep_rank + local_expert_id for ep_rank in range(ep_size)]
global_expert_names = [f"{name_prefix}.weight{expert_id}" for expert_id in global_expert_ids]
for name, param in zip(global_expert_names, infer_params, strict=True):
if etp_size > 1:
# gather etp
etp_params = [torch.empty_like(param) for _ in range(etp_size)]
torch.distributed.all_gather(etp_params, param, group=etp_group)
params = etp_params
else:
params = [param]
merge_params = default_tp_concat_fn(
layer_name_mapping,
name,
broad_pp_tensor,
params,
model_config,
weight_converter.hf_config,
convert_qkv_gate_up_by_simple_split,
)
if not isinstance(merge_params, list):
merge_params = [merge_params]
converted_names, converted_params = weight_converter.convert_param(name, merge_params)
yield from zip(converted_names, [param.detach() for param in converted_params], strict=True)
continue
# tp all gather
if tp_utils.is_tensor_parallel_param(broad_pp_tensor):
# allocate a new tensor with proper size
if all_gather_group_size <= 1:
infer_params = [broad_pp_tensor]
else:
infer_params = [torch.empty_like(broad_pp_tensor) for _ in range(all_gather_group_size)]
torch.distributed.all_gather(infer_params, broad_pp_tensor, group=mpu.get_tensor_model_parallel_group())
infer_params = default_tp_concat_fn(
layer_name_mapping,
cur_name,
broad_pp_tensor,
infer_params,
model_config,
weight_converter.hf_config,
convert_qkv_gate_up_by_simple_split,
)
else:
infer_params = broad_pp_tensor
if not isinstance(infer_params, list):
infer_params = [infer_params]
converted_names, converted_params = weight_converter.convert_param(cur_name, infer_params)
yield from zip(converted_names, [param.detach() for param in converted_params], strict=True)
def get_transformer_layer_offset(pipeline_rank, vp_stage, config: TransformerConfig):
"""
Get the index offset of any pipeline stage, given the level of pipelining.
Make pipeline_rank and vp_stage as two arguments to make it more flexible,
which is able to fetch layer offset for any pipeline stage.
The original function only returns the layer offset for current pipeline stage.
Extension to https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/transformer/transformer_layer.py::get_transformer_layer_offset
"""
has_vp_stage = (
inspect.signature(parallel_state.is_pipeline_first_stage).parameters.get("vp_stage", None) is not None
)
extra_kwargs = {} if not has_vp_stage else {"ignore_virtual": False, "vp_stage": vp_stage}
if config.pipeline_model_parallel_size > 1:
if hasattr(config, "pipeline_model_parallel_layout") and config.pipeline_model_parallel_layout:
from megatron.core.transformer.enums import LayerType
offset = config.pipeline_model_parallel_layout.get_layer_offset(
layer_type=LayerType.decoder, vp_stage=vp_stage
)
elif (
config.num_layers_in_first_pipeline_stage is not None
or config.num_layers_in_last_pipeline_stage is not None
):
# Calculate number of pipeline stages to distribute the remaining Transformer
# layers after deducting the Transformer layers in the first or the last stages
middle_pipeline_stages = config.pipeline_model_parallel_size
middle_pipeline_stages -= sum(
[
1 if x is not None else 0
for x in (
config.num_layers_in_first_pipeline_stage,
config.num_layers_in_last_pipeline_stage,
)
]
)
# Calculate layers to distribute in each pipeline stage. If the
# num_layers_in_first_pipeline_stage and num_layers_in_last_pipeline_stage
# are not set, we will not enable uneven pipeline. All layers will be treated
# as middle layers.
num_layers_in_first_pipeline_stage = (
0 if config.num_layers_in_first_pipeline_stage is None else config.num_layers_in_first_pipeline_stage
)
num_layers_in_last_pipeline_stage = (
0 if config.num_layers_in_last_pipeline_stage is None else config.num_layers_in_last_pipeline_stage
)
middle_num_layers = (
config.num_layers - num_layers_in_first_pipeline_stage - num_layers_in_last_pipeline_stage
)
if (vp_size := config.virtual_pipeline_model_parallel_size) is not None:
assert vp_stage is not None, "vp_stage must be provided if virtual pipeline model parallel size is set"
# Calculate number of layers in each virtual model chunk
# If the num_layers_in_first_pipeline_stage and
# num_layers_in_last_pipeline_stage are not set, all pipeline stages
# will be treated as middle pipeline stages in the calculation
num_layers_per_virtual_model_chunk_in_first_pipeline_stage = (
0
if config.num_layers_in_first_pipeline_stage is None
else config.num_layers_in_first_pipeline_stage // vp_size
)
num_layers_per_virtual_model_chunk_in_last_pipeline_stage = (
0
if config.num_layers_in_last_pipeline_stage is None
else config.num_layers_in_last_pipeline_stage // vp_size
)
num_layers_per_vritual_model_chunk_in_middle_pipeline_stage = middle_num_layers // vp_size
# First stage + middle stage + last stage
total_virtual_chunks = (
num_layers_per_virtual_model_chunk_in_first_pipeline_stage
+ num_layers_per_vritual_model_chunk_in_middle_pipeline_stage
+ num_layers_per_virtual_model_chunk_in_last_pipeline_stage
)
# Calculate the layer offset with interleaved uneven pipeline parallelism
if pipeline_rank == 0:
offset = vp_stage * total_virtual_chunks
else:
offset = (
vp_stage * total_virtual_chunks
+ num_layers_per_virtual_model_chunk_in_first_pipeline_stage
+ (pipeline_rank - 1)
* (num_layers_per_vritual_model_chunk_in_middle_pipeline_stage // middle_pipeline_stages)
)
else:
if middle_pipeline_stages > 0:
num_layers_per_pipeline_rank = middle_num_layers // middle_pipeline_stages
else:
num_layers_per_pipeline_rank = 0
middle_pipeline_rank = (
pipeline_rank if config.num_layers_in_first_pipeline_stage is None else pipeline_rank - 1
)
if pipeline_rank == 0:
offset = 0
else:
offset = (middle_pipeline_rank * num_layers_per_pipeline_rank) + num_layers_in_first_pipeline_stage
else:
num_layers = config.num_layers
# Increase the number of layers by one if we include the embedding (loss)
# layer into pipeline parallelism partition and placement
if config.account_for_embedding_in_pipeline_split:
num_layers += 1
if config.account_for_loss_in_pipeline_split:
num_layers += 1
num_layers_per_pipeline_rank = num_layers // config.pipeline_model_parallel_size
if (vp_size := config.virtual_pipeline_model_parallel_size) is not None:
assert vp_stage is not None, "vp_stage must be provided if virtual pipeline model parallel size is set"
num_layers_per_virtual_rank = num_layers_per_pipeline_rank // vp_size
total_virtual_chunks = num_layers // vp_size
offset = vp_stage * total_virtual_chunks + (pipeline_rank * num_layers_per_virtual_rank)
# Reduce the offset of embedding layer from the total layer number
if config.account_for_embedding_in_pipeline_split and not parallel_state.is_pipeline_first_stage(
**extra_kwargs
):
offset -= 1
else:
offset = pipeline_rank * num_layers_per_pipeline_rank
# Reduce the offset of embedding layer from the total layer number
if config.account_for_embedding_in_pipeline_split and not parallel_state.is_pipeline_first_stage(
**extra_kwargs
):
offset -= 1
else:
offset = 0
return offset
def register_megatron_training_hooks(model: list[torch.nn.Module], optimizer):
from megatron.core.distributed import finalize_model_grads
from megatron.core.utils import get_model_config
try:
from megatron.core.distributed.fsdp.mcore_fsdp_adapter import FullyShardedDataParallel as megatron_FSDP
except ImportError:
megatron_FSDP = DDP
# register some callbacks for megatron training, following https://github.com/NVIDIA/Megatron-LM/blob/core_v0.15.0rc7/megatron/training/training.py#L2039-L2057
for one_model in model:
config = get_model_config(one_model)
config.grad_scale_func = optimizer.scale_loss
config.finalize_model_grads_func = finalize_model_grads
overlap_param_gather = getattr(optimizer.config, "overlap_param_gather", False)
overlap_grad_reduce = getattr(one_model.ddp_config, "overlap_grad_reduce", False)
align_grad_reduce = True # default to True, seldom to be false
align_param_gather = getattr(one_model.ddp_config, "align_param_gather", False)
if isinstance(model[0], megatron_FSDP | DDP) and overlap_grad_reduce:
assert config.no_sync_func is None, (
"When overlap_grad_reduce is True, config.no_sync_func must be None; "
"a custom no_sync_func is not supported when overlapping grad-reduce"
)
config.no_sync_func = [model_chunk.no_sync for model_chunk in model]
if len(model) == 1:
config.no_sync_func = config.no_sync_func[0]
if align_grad_reduce:
config.grad_sync_func = [model_chunk.start_grad_sync for model_chunk in model]
if len(model) == 1:
config.grad_sync_func = config.grad_sync_func[0]
if overlap_param_gather and align_param_gather:
config.param_sync_func = [model_chunk.start_param_sync for model_chunk in model]
if len(model) == 1:
config.param_sync_func = config.param_sync_func[0]
def mapping_string_to_attn_backend(args: dict) -> dict:
if "attention_backend" in args and isinstance(args["attention_backend"], str):
from megatron.core.transformer.enums import AttnBackend
args["attention_backend"] = AttnBackend[args["attention_backend"]]
return args
def get_megatron_mtp_loss(n_micro_batch):
# Calculate MTP loss scale similar to Megatron-LM implementation
mtp_loss_scale = 1.0 / n_micro_batch
# Create a dummy total_loss_dict to collect MTP metrics
total_loss_dict = {}
# Track MTP metrics - this will populate total_loss_dict with MTP losses
MTPLossLoggingHelper.track_mtp_metrics(
loss_scale=mtp_loss_scale, iteration=0, writer=None, wandb_writer=None, total_loss_dict=total_loss_dict
)
# Add MTP metrics to losses_reduced if any were collected
# total_loss_dict: {'mtp_1 loss': tensor(value, device='cuda:0')}
output = {}
if total_loss_dict:
for key, value in total_loss_dict.items():
# Convert key to have proper prefix and format
formatted_key = f"mtp_losses/{key.replace(' ', '_')}"
# only added to the 0th batch, the MTP loss obtained is a global value, and will be the same for every batch
output[formatted_key] = value.cpu().item()
return output
def get_megatron_module_device(models: list[Any]) -> str:
if not models:
return "cpu"
model_chunk = models[0]
if not model_chunk.buffers:
try:
return next(model_chunk.module.parameters()).device.type
except StopIteration:
return "cpu"
buffer = model_chunk.buffers[0]
if buffer.param_data.storage().size() == 0:
return "cpu"
else:
return get_device_name()
def check_mtp_config(model_config: HFModelConfig, engine_config: McoreEngineConfig):
has_mtp = (
model_config.hf_config.num_nextn_predict_layers > 0
if hasattr(model_config.hf_config, "num_nextn_predict_layers")
else False
)
enable_mtp = model_config.mtp.enable
if "mtp_loss_scaling_factor" not in engine_config.override_transformer_config:
engine_config.override_transformer_config["mtp_loss_scaling_factor"] = model_config.mtp.mtp_loss_scaling_factor
if enable_mtp and not model_config.mtp.enable_train:
# disable parameter update by configure the loss scale to 0
engine_config.override_transformer_config["mtp_loss_scaling_factor"] = 0
# Modify the hf_config before initialization, and apply patch after innitialization
if enable_mtp and not has_mtp:
logger.error("enable mtp while model has no mtp layer, ignore model.mtp.enable")
model_config.mtp.enable = False
model_config.mtp.enable_train = False
elif has_mtp and not enable_mtp:
model_config.hf_config.num_nextn_predict_layers = 0
def patch_engine_mtp(module, model_config):
logger.warning("Applying mtp patch...")
from verl.models.mcore.mtp_patch import patch_mtp_layer_get_embeddings, patch_postprocess
print(module)
if isinstance(module, list):
for m in module:
patch_postprocess(m)
if model_config.mtp.detach_encoder:
patch_mtp_layer_get_embeddings(m)
else:
patch_postprocess(module)
if model_config.mtp.detach_encoder:
patch_mtp_layer_get_embeddings(module)
|
verl__utils__metric__utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Metrics utils.
"""
from enum import Enum
from typing import Any, Optional, Union
import numpy as np
import torch
def reduce_metrics(metrics: dict[str, Union["Metric", list[Any]]]) -> dict[str, Any]:
"""
Reduces a dictionary of metric lists by computing the mean, max, or min of each list.
The reduce operation is determined by the key name:
- If the key contains "max", np.max is used
- If the key contains "min", np.min is used
- Otherwise, np.mean is used
Args:
metrics: A dictionary mapping metric names to lists of metric values.
Returns:
A dictionary with the same keys but with each list replaced by its reduced value.
Example:
>>> metrics = {
... "loss": [1.0, 2.0, 3.0],
... "accuracy": [0.8, 0.9, 0.7],
... "max_reward": [5.0, 8.0, 6.0],
... "min_error": [0.1, 0.05, 0.2]
... }
>>> reduce_metrics(metrics)
{"loss": 2.0, "accuracy": 0.8, "max_reward": 8.0, "min_error": 0.05}
"""
for key, val in metrics.items():
if isinstance(val, Metric):
metrics[key] = val.aggregate()
elif "max" in key:
metrics[key] = np.max(val)
elif "min" in key:
metrics[key] = np.min(val)
else:
metrics[key] = np.mean(val)
return metrics
class AggregationType(Enum):
MEAN = "mean"
SUM = "sum"
MIN = "min"
MAX = "max"
NumericType = int, float, torch.Tensor, np.ndarray
Numeric = int | float | torch.Tensor | np.ndarray
class Metric:
"""
A metric aggregator for collecting and aggregating numeric values.
This class accumulates numeric values (int, float, or scalar tensors) and computes
an aggregate statistic based on the specified aggregation type (MEAN, SUM, MIN, or MAX).
Args:
aggregation: The aggregation method to use. Can be a string ("mean", "sum", "min", "max")
or an AggregationType enum value.
value: Optional initial value(s) to add. Can be a single numeric value or a list of values.
Example:
>>> metric = Metric(aggregation="mean", value=1.0)
>>> metric.append(2.0)
>>> metric.append(3.0)
>>> metric.aggregate()
2.0
"""
def __init__(self, aggregation: str | AggregationType, value: Optional[Numeric | list[Numeric]] = None) -> None:
if isinstance(aggregation, str):
self.aggregation = AggregationType(aggregation)
else:
self.aggregation = aggregation
if not isinstance(self.aggregation, AggregationType):
raise ValueError(f"Unsupported aggregation type: {aggregation}")
self.values = []
if value is not None:
self.append(value)
def append(self, value: Union[Numeric, "Metric"]) -> None:
if isinstance(value, Metric):
self.extend(value)
return
if isinstance(value, torch.Tensor):
if value.numel() != 1:
raise ValueError("Only scalar tensors can be converted to float")
value = value.detach().item()
if not isinstance(value, NumericType):
raise ValueError(f"Unsupported value type: {type(value)}")
self.values.append(value)
def extend(self, values: Union["Metric", list[Numeric]]) -> None:
if isinstance(values, Metric):
if values.aggregation != self.aggregation:
raise ValueError(f"Aggregation type mismatch: {self.aggregation} != {values.aggregation}")
values = values.values
for value in values:
self.append(value)
def aggregate(self) -> float:
return self._aggregate(self.values, self.aggregation)
@classmethod
def _aggregate(cls, values: list[Numeric], aggregation: AggregationType) -> float:
match aggregation:
case AggregationType.MEAN:
return np.mean(values)
case AggregationType.SUM:
return np.sum(values)
case AggregationType.MIN:
return np.min(values)
case AggregationType.MAX:
return np.max(values)
@classmethod
def aggregate_dp(cls, metric_lists: list["Metric"]) -> float:
if not metric_lists:
raise ValueError("Cannot aggregate an empty list of metrics.")
value_lists = [ml.values for ml in metric_lists]
if not all(len(ls) == len(value_lists[0]) for ls in value_lists):
raise ValueError(
f"All Metric instances must have the same number of values "
f"for dp aggregation: {[len(ls) for ls in value_lists]}"
)
value_arrays = np.array(value_lists) # [num_dp, num_grad_accumulation]
aggregation = metric_lists[0].aggregation
match aggregation:
case AggregationType.SUM | AggregationType.MEAN:
return cls._aggregate(
values=np.mean(value_arrays, axis=0), aggregation=aggregation
) # mean over dp ranks
case AggregationType.MIN | AggregationType.MAX:
return cls._aggregate(values=value_arrays.flatten(), aggregation=aggregation) # min/max over all values
@classmethod
def from_dict(cls, data: dict[str, Numeric], aggregation: str | AggregationType) -> dict[str, "Metric"]:
return {key: cls(value=value, aggregation=aggregation) for key, value in data.items()}
def init_list(self) -> "Metric":
return Metric(aggregation=self.aggregation)
|
verl__utils__model.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities to create common models from huggingface
"""
import json
import os
import re
import warnings
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
from tensordict.tensorclass import NonTensorData
from torch import nn
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForImageTextToText,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelForVision2Seq,
GenerationConfig,
MistralForSequenceClassification,
PretrainedConfig,
PreTrainedModel,
)
from transformers.modeling_outputs import CausalLMOutputWithPast
from verl.models.registry import ModelRegistry
from verl.utils.import_utils import is_trl_available
class LambdaLayer(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def squeeze(x):
return torch.squeeze(x, dim=-1)
def update_model_config(module_config, override_config_kwargs):
"""Update the module config with the override_config_kwargs.
Args:
module_config: The module config from Huggingface Transformers.
override_config_kwargs: The kwargs to override the module config.
"""
for key, val in override_config_kwargs.items():
if isinstance(val, dict):
update_model_config(getattr(module_config, key), val)
else:
setattr(module_config, key, val)
def get_huggingface_actor_config(model_name: str, override_config_kwargs=None, trust_remote_code=False) -> dict:
if override_config_kwargs is None:
override_config_kwargs = {}
assert isinstance(override_config_kwargs, dict), (
f"override_config_kwargs must be a dict, got {type(override_config_kwargs)}"
)
module_config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
update_model_config(module_config, override_config_kwargs)
return module_config
def get_generation_config(
model: str,
trust_remote_code: bool = False,
) -> Optional[GenerationConfig]:
try:
return GenerationConfig.from_pretrained(model)
except OSError: # Not found
try:
config = get_huggingface_actor_config(
model,
trust_remote_code=trust_remote_code,
)
return GenerationConfig.from_model_config(config)
except OSError: # Not found
return None
def create_huggingface_actor(model_name: str, override_config_kwargs=None, automodel_kwargs=None) -> nn.Module:
"""
Args:
model_name:
override_config_kwargs:
Returns:
"""
if override_config_kwargs is None:
override_config_kwargs = {}
if automodel_kwargs is None:
automodel_kwargs = {}
assert isinstance(override_config_kwargs, dict), (
f"override_config_kwargs must be a dict, got {type(override_config_kwargs)}"
)
module_config = get_huggingface_actor_config(
model_name, override_config_kwargs, trust_remote_code=automodel_kwargs.get("trust_remote_code", False)
)
module: nn.Module = AutoModelForCausalLM.from_config(module_config, **automodel_kwargs)
return module
def create_huggingface_critic(model_name: str, override_config_kwargs=None, automodel_kwargs=None) -> nn.Module:
"""
Args:
model_name:
override_config_kwargs:
Returns:
"""
critic_module: nn.Module = create_huggingface_actor(
model_name, override_config_kwargs=override_config_kwargs, automodel_kwargs=automodel_kwargs
)
if automodel_kwargs is None:
automodel_kwargs = {}
torch_dtype = automodel_kwargs.get("torch_dtype", torch.float32)
critic_module.lm_head = nn.Sequential(
nn.Linear(critic_module.config.hidden_size, 1, dtype=torch_dtype), LambdaLayer(fn=squeeze)
)
return critic_module
def get_model_size(model: nn.Module, scale="auto"):
n_params = sum(p.numel() for p in model.parameters())
if scale == "auto":
if n_params > 1e9:
scale = "B"
elif n_params > 1e6:
scale = "M"
elif n_params > 1e3:
scale = "K"
else:
scale = ""
if scale == "B":
n_params = n_params / 1e9
elif scale == "M":
n_params = n_params / 1e6
elif scale == "K":
n_params = n_params / 1e3
elif scale == "":
pass
else:
raise NotImplementedError(f"Unknown scale {scale}")
return n_params, scale
def print_model_size(model: nn.Module, name: str = None):
n_params, scale = get_model_size(model, scale="auto")
if name is None:
name = model.__class__.__name__
print(f"{name} contains {n_params:.2f}{scale} parameters")
def create_random_mask(
input_ids: torch.Tensor,
max_ratio_of_valid_token: float,
max_ratio_of_left_padding: float,
min_ratio_of_valid_token: float = 0,
):
"""Create a random mask given input_ids. Support left padding and right padding.
Process:
- Sample valid token length
- Sample left_padding length
- Generate padding
Args:
input_ids:
shape (batch_size, seq_len)
Returns:
"""
assert max_ratio_of_valid_token > 0 and max_ratio_of_valid_token <= 1.0
assert max_ratio_of_left_padding >= 0 and max_ratio_of_left_padding < 1.0
assert min_ratio_of_valid_token <= max_ratio_of_valid_token
batch_size, sequence_length = input_ids.shape
max_num_valid_tokens = int(sequence_length * max_ratio_of_valid_token)
min_num_valid_tokens = max(1, int(sequence_length * min_ratio_of_valid_token))
max_left_padding = int(sequence_length * max_ratio_of_left_padding)
assert max_num_valid_tokens + max_left_padding <= sequence_length
assert max_num_valid_tokens > 0 and max_ratio_of_valid_token <= sequence_length
masks = torch.ones_like(input_ids, dtype=torch.int64)
# TODO: we can make this faster
for i in range(batch_size):
num_left_padding = np.random.randint(low=0, high=max_left_padding + 1, dtype=np.int64)
num_valid = np.random.randint(low=min_num_valid_tokens, high=max_num_valid_tokens + 1, dtype=np.int64)
for index in range(num_left_padding):
masks[i, index] = 0
for index in range(num_left_padding + num_valid, sequence_length):
masks[i, index] = 0
return masks
def compute_position_id_with_mask(mask):
return torch.clip(torch.cumsum(mask, dim=-1) - 1, min=0, max=None)
def convert_weight_keys(state_dict: dict[str, torch.Tensor], model: PreTrainedModel):
# convert state dict keys: https://github.com/huggingface/transformers/pull/38385
if not hasattr(model, "_checkpoint_conversion_mapping"):
return state_dict
reverse_key_mapping = {v: k for k, v in model._checkpoint_conversion_mapping.items()}
original_weights = {}
for key, value in state_dict.items():
for pattern, replacement in reverse_key_mapping.items():
replacement = replacement.lstrip("^") # strip off un-needed chars and patterns
replacement = re.sub(r"\(.*\)", "", replacement)
key, n_replace = re.subn(pattern, replacement, key)
# Early exit of the loop
if n_replace > 0:
break
original_weights[key] = value
return original_weights
def check_exclude_modules(config, key: str) -> bool:
"""
A helper method to check if the passed module's key name matches any of the exclude modules in the adapter_config.
Adapted from https://github.com/huggingface/peft/blob/main/src/peft/tuners/tuners_utils.py
Args:
config (`LoraConfig` | `LycorisConfig`): A config to match exclude modules from
key (`str`): A key to search any matches in config
Returns:
True of match object if key matches any exclude modules from config, False if no match found
"""
if hasattr(config, "exclude_modules") and config.exclude_modules:
if isinstance(config.exclude_modules, str):
if re.fullmatch(config.exclude_modules, key):
return True
elif key in config.exclude_modules:
return True
elif any(key.endswith(f".{exclude_key}") for exclude_key in config.exclude_modules):
return True
return False
def check_target_modules(config, key: str) -> bool:
"""
A helper method to check if the passed module's key name matches any of the target modules in the adapter_config.
Adapted from https://github.com/huggingface/peft/blob/main/src/peft/tuners/tuners_utils.py
Args:
config (`LoraConfig` | `LycorisConfig`): A config to match target modules from
key (`str`): A key to search any matches in config
Returns:
True of match object if key matches any target modules from config, False if no match found
"""
if isinstance(config.target_modules, str):
target_module_found = re.fullmatch(config.target_modules, key)
elif key in config.target_modules:
# this module is specified directly in target_modules
target_module_found = True
else:
target_module_found = any(key.endswith(f".{target_key}") for target_key in config.target_modules)
layer_indexes = getattr(config, "layers_to_transform", None)
layers_pattern = getattr(config, "layers_pattern", None)
is_using_layer_indexes = layer_indexes is not None and (
len(layer_indexes) != 0 if isinstance(layer_indexes, list) else True
)
if is_using_layer_indexes and target_module_found:
layer_index = None
# TODO: It's still unclear how empty layers_pattern (None, [], or "") should behave
# For now, empty layers_pattern means any layer pattern is ok
if layers_pattern is None or len(layers_pattern) == 0:
layer_index = re.match(r".*\.[^.]*\.(\d+)\.", key)
else:
layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern
for pattern in layers_pattern:
layer_index = re.match(rf".*\.{pattern}\.(\d+)\.", key)
if layer_index is not None:
break
if layer_index is None:
target_module_found = False
else:
layer_index = int(layer_index.group(1))
if isinstance(layer_indexes, int):
target_module_found = layer_index == layer_indexes
else:
target_module_found = layer_index in layer_indexes
return target_module_found
def normalize_model_name(name, pp_rank, vpp_rank, transformer_config, layer_name="layers"):
"""
Transform the model name in each model_chunk in each pp stage into the name in inference engine
"""
from verl.utils.megatron_utils import get_transformer_layer_offset
layer_offset = get_transformer_layer_offset(pp_rank, vpp_rank, transformer_config)
if layer_name in name: # belong to an intermediate layer
split_name = name.split(".")
# find the num next to split_name
for i, name in enumerate(split_name):
if name == layer_name:
break
layer_num_idx = i + 1
# check the name
assert len(split_name) >= layer_num_idx + 1, f"split_name = {split_name}"
assert split_name[layer_num_idx].isdigit(), f"split_name = {split_name}"
# increment layer_num_idx by layer_offset
split_name[layer_num_idx] = str(int(split_name[layer_num_idx]) + layer_offset)
name = ".".join(split_name) # weight name in inference_tp_model
return name
def normalize_pp_vpp_params(params, num_hidden_layers, layer_name="layers"):
"""
Normalize the pp vpp params into a complete named parameters.
This is useful when gather parameters from pp ranks and passed to a model without pp
params: Iterable[List[Dict[str, param]]]
params contains a list of pp, with a list of vpp named_parameters in each vpp chunk.
output: Dict[str, param]
"""
pp_size = len(params)
for pp_rank in range(len(params)):
vpp_size = len(params[pp_rank])
for vpp_rank in range(vpp_size):
for name, param in params[pp_rank][vpp_rank].items():
normalized_name = normalize_model_name(
name, pp_rank, vpp_rank, pp_size, vpp_size, num_hidden_layers, layer_name=layer_name
)
yield normalized_name, param
def get_parallel_model_from_config(
config, megatron_config, pre_process=None, post_process=None, share_embeddings_and_output_weights=False, value=False
):
from megatron.core import ModelParallelConfig
assert isinstance(megatron_config, ModelParallelConfig)
model_class = _get_parallel_model_architecture_from_config(config, value)
model = model_class(
config,
megatron_config,
pre_process=pre_process,
post_process=post_process,
share_embeddings_and_output_weights=share_embeddings_and_output_weights,
)
return model
def _get_parallel_model_architecture_from_config(config: PretrainedConfig, value=False) -> type[nn.Module]:
architectures = getattr(config, "architectures", [])
for arch in architectures:
model_cls = ModelRegistry.load_model_cls(arch, value)
print("after load model cls")
if model_cls is not None:
return model_cls
raise ValueError(
f"Model architectures {architectures} are not supported for now. Supported architectures: "
f"{ModelRegistry.get_supported_archs()}"
)
def _load_hf_model(config, model_config, is_value_model):
"""Helper function containing the loading hf model logic"""
from accelerate import init_empty_weights
from megatron.core import parallel_state as mpu
from verl.models.mcore.saver import _megatron_calc_global_rank
assert hasattr(model_config, "architectures"), "architectures cannot be empty when load weight!"
architectures = getattr(model_config, "architectures", [])
# get auto class
auto_cls = get_hf_auto_model_class(model_config)
if config.model.path.startswith("hdfs:"):
from verl.utils.fs import copy_to_local
print(f"start download from {config.model.path}")
local_model_path = copy_to_local(src=config.model.path, use_shm=config.model.get("use_shm", False))
print("finish download")
else:
local_model_path = config.model.path
print(f"load from local dir {local_model_path}")
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=0, cp_rank=mpu.get_context_parallel_rank())
cpu_init_weights = lambda: torch.device("cpu")
init_context = init_empty_weights if torch.distributed.get_rank() != src_rank else cpu_init_weights
with init_context(), warnings.catch_warnings():
warnings.simplefilter("ignore")
# TODO: to find a better way to load mistral7b-rm lm_head
if "mistral7b-rm" in config.model.path:
model = MistralForSequenceClassification.from_pretrained(
local_model_path,
torch_dtype="auto",
# device_map="auto", # disable auto device_map, the HF weight is only loaded to CPU in src_rank
# low_cpu_mem_usage=True
) # use score head instead of lm_head
state_dict = model.state_dict()
state_dict["lm_head.weight"] = state_dict["score.weight"]
state_dict["model.embed_tokens.weight"] = state_dict["model.embed_tokens.weight"][
:32000
] # workaround, 32001 -> 32000
is_value_model = True
else:
model = auto_cls.from_pretrained(
local_model_path,
torch_dtype="auto",
# device_map="auto", # disable auto device_map, the HF weight is only loaded to CPU in src_rank
# low_cpu_mem_usage=True
)
state_dict = model.state_dict()
return architectures, model, state_dict, is_value_model
def get_hf_model_path(config):
if config.model.path.startswith("hdfs:"):
from verl.utils.fs import copy_to_local
local_model_path = copy_to_local(src=config.model.path, use_shm=config.model.get("use_shm", False))
else:
local_model_path = config.model.path
return local_model_path
def load_megatron_model_weights(config, model_config, parallel_model, params_dtype, is_value_model=False):
"""Load weights for verl customized model."""
architectures, model, state_dict, is_value_model = _load_hf_model(config, model_config, is_value_model)
from verl.models.weight_loader_registry import get_weight_loader
print(f"before weight loader: architectures = {architectures}...")
for arch in architectures:
print(f"call weight loader arch = {arch}, model config = {model.config}")
weight_loader = get_weight_loader(arch)
weight_loader(
state_dict=state_dict,
wrapped_models=parallel_model,
config=model.config,
params_dtype=params_dtype,
is_value_model=is_value_model,
tie_word_embeddings=model_config.tie_word_embeddings,
)
return model.config
def load_megatron_gptmodel_weights(config, model_config, parallel_model, params_dtype, is_value_model=False):
"""Load weights for mcore GPT model."""
_, model, state_dict, is_value_model = _load_hf_model(config, model_config, is_value_model)
from verl.models.mcore.loader import load_state_dict_to_megatron_gptmodel
load_state_dict_to_megatron_gptmodel(
state_dict=state_dict,
wrapped_models=parallel_model,
config=model.config,
params_dtype=params_dtype,
is_value_model=is_value_model,
)
del state_dict, model
# pad input_ids_rmpad, cu_seqlens and max_seqlen_in_batch to be divisible by tp
def pad_packed_inputs(unpad_tokens: torch.Tensor, cu_seqlens, max_seqlen_in_batch, size):
"""pad the tokens such that the total length is a multiple of size.
This function is useful when applying sequence parallel and context parallel
Args:
unpad_tokens: (total_nnz, ...). Tokens after removing padding
cu_seqlens: (total_nnz + 1,)
max_seqlen_in_batch: int
Returns:
"""
F = nn.functional
total_nnz = unpad_tokens.shape[0]
pad_size = 0 if total_nnz % size == 0 else size - total_nnz % size
# we assume adding a new data in the batch with seqlen pad_size
if pad_size > 0:
if unpad_tokens.ndim == 1:
unpad_tokens = F.pad(unpad_tokens, (0, pad_size))
elif unpad_tokens.ndim == 2:
unpad_tokens = F.pad(unpad_tokens, (0, 0, 0, pad_size))
else:
raise NotImplementedError(f"Padding dim {unpad_tokens.ndim()} is not supported")
cu_seqlens = F.pad(cu_seqlens, (0, 1), value=pad_size + cu_seqlens[-1])
max_seqlen_in_batch = max(max_seqlen_in_batch, pad_size)
return unpad_tokens, cu_seqlens, max_seqlen_in_batch
def load_mcore_dist_weights(parallel_model, dist_weight_path, is_value_model=False, prefix=""):
from megatron.core import dist_checkpointing
from megatron.core.dist_checkpointing.serialization import StrictHandling
from verl.utils.megatron_utils import unwrap_model
# strict = StrictHandling.IGNORE_ALL if is_value_model else StrictHandling.ASSUME_OK_UNEXPECTED
strict = StrictHandling.ASSUME_OK_UNEXPECTED
for model in parallel_model:
ssd = unwrap_model(model).sharded_state_dict(prefix=prefix)
if is_value_model:
for k in list(ssd.keys()):
if "output_layer" in k:
ssd.pop(k)
dist_checkpointing.load(ssd, dist_weight_path, strict=strict)
return
def get_parallel_gptmodel_from_config(
tfconfig, hf_config, pre_process=None, post_process=None, share_embeddings_and_output_weights=False, value=False
):
from megatron.core.models.gpt.gpt_layer_specs import get_gpt_decoder_block_spec
from megatron.core.models.gpt.gpt_model import GPTModel
use_te = True
assert tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now"
transformer_layer_spec = get_gpt_decoder_block_spec(tfconfig, use_transformer_engine=use_te)
rope_scaling_args = {}
if hf_config.rope_scaling is not None:
assert hf_config.rope_scaling["type"] == "linear", "only linear scaling is supported for now"
rope_scaling_args["seq_len_interpolation_factor"] = hf_config.rope_scaling["factor"]
parallel_model = GPTModel(
config=tfconfig,
transformer_layer_spec=transformer_layer_spec,
vocab_size=hf_config.vocab_size,
max_sequence_length=hf_config.max_position_embeddings,
pre_process=pre_process,
post_process=post_process,
share_embeddings_and_output_weights=share_embeddings_and_output_weights,
position_embedding_type="rope",
rotary_base=hf_config.rope_theta,
**rope_scaling_args,
)
# # for layer in parallel_model.decoder.layers:
# layer.self_attention.core_attention.flash_attention.softmax_scale = None
if post_process and value:
from verl.models.llama.megatron.layers.parallel_linear import LinearForLastLayer
parallel_model.output_layer = LinearForLastLayer(
input_size=tfconfig.hidden_size, output_size=1, config=tfconfig
)
return parallel_model
def patch_valuehead_model(model) -> None:
from types import MethodType
from transformers import PreTrainedModel
from trl import AutoModelForCausalLMWithValueHead
def tie_weights(self: "AutoModelForCausalLMWithValueHead") -> None:
if isinstance(self.pretrained_model, PreTrainedModel):
self.pretrained_model.tie_weights()
def get_input_embeddings(self: "AutoModelForCausalLMWithValueHead") -> torch.nn.Module:
if isinstance(self.pretrained_model, PreTrainedModel):
return self.pretrained_model.get_input_embeddings()
def get_output_embeddings(self: "AutoModelForCausalLMWithValueHead") -> torch.nn.Module:
if isinstance(self.pretrained_model, PreTrainedModel):
return self.pretrained_model.get_output_embeddings()
def can_generate(self):
return False
ignore_modules = [name for name, _ in model.named_parameters() if "pretrained_model" in name]
model._keys_to_ignore_on_save = ignore_modules
model.tie_weights = MethodType(tie_weights, model)
model.get_input_embeddings = MethodType(get_input_embeddings, model)
model.get_output_embeddings = MethodType(get_output_embeddings, model)
model.can_generate = MethodType(can_generate, model)
model._no_split_modules = getattr(model.pretrained_model, "_no_split_modules", [])
def load_valuehead_model(local_path, torch_dtype, model_config, trust_remote_code):
from transformers import AutoModelForCausalLM, AutoModelForTokenClassification, AutoModelForVision2Seq
try:
model = AutoModelForTokenClassification.from_pretrained(
pretrained_model_name_or_path=local_path,
torch_dtype=torch_dtype,
config=model_config,
attn_implementation="flash_attention_2",
trust_remote_code=trust_remote_code,
)
return model
except BaseException as e:
if not is_trl_available():
raise RuntimeError(
f"model({local_path}) is not a value head model, please install trl to make it valid"
) from e
assert is_trl_available()
from trl import AutoModelForCausalLMWithValueHead
if type(model_config) in AutoModelForVision2Seq._model_mapping.keys():
module_class = AutoModelForVision2Seq
else:
module_class = AutoModelForCausalLM
ori_model = module_class.from_pretrained(
pretrained_model_name_or_path=local_path,
torch_dtype=torch_dtype,
config=model_config,
attn_implementation="flash_attention_2",
trust_remote_code=trust_remote_code,
)
model = AutoModelForCausalLMWithValueHead.from_pretrained(ori_model)
patch_valuehead_model(model)
return model
_architecture_to_auto_class = {
"ForCausalLM": AutoModelForCausalLM,
"ForVision2Seq": AutoModelForVision2Seq,
"ForTokenClassification": AutoModelForTokenClassification,
"ForSequenceClassification": AutoModelForSequenceClassification,
}
def get_hf_auto_model_class(hf_config):
has_remote_code = hasattr(hf_config, "auto_map") and any(
hf_config.architectures[0] in val for val in hf_config.auto_map.values()
)
if has_remote_code:
auto_class = next(k for k, v in hf_config.auto_map.items() if hf_config.architectures[0] in v)
match auto_class:
case "AutoModelForVision2Seq":
actor_module_class = AutoModelForVision2Seq
case "AutoModelForCausalLM":
actor_module_class = AutoModelForCausalLM
case "AutoModelForImageTextToText":
actor_module_class = AutoModelForImageTextToText
case _:
actor_module_class = AutoModel
else:
actor_module_class = AutoModel
# For VLM models, we use type to check instead of architecture
if type(hf_config) in AutoModelForImageTextToText._model_mapping.keys():
actor_module_class = AutoModelForImageTextToText
else:
for key, cls in _architecture_to_auto_class.items():
if key in hf_config.architectures[0]:
actor_module_class = cls
break
return actor_module_class
def extract_multi_modal_inputs(
batch_data: list[dict[str, torch.Tensor]],
indices: Optional[list[int]] = None,
) -> dict[str, torch.Tensor | list[torch.Tensor]]:
"""
Extract and process multi-modal inputs from a batch.
Args:
batch_data (list[dict[str, torch.Tensor]]): The batch containing potential multi-modal inputs
indices (Optional[list[int]]): If provided, only extract inputs at these indices
Returns:
dict[str, torch.Tensor | list[torch.Tensor]]: Processed multi-modal inputs ready for model consumption
"""
multi_modal_inputs = {}
multi_modal_inputs_collected = {}
has_image_bound = False
selected_batch_data = batch_data
if indices is not None:
selected_batch_data = [batch_data[i] for i in indices if i < len(batch_data)]
for inputs in selected_batch_data:
inputs = inputs.data if isinstance(inputs, NonTensorData) else inputs
# Mixed pure text and multi-modal dataset.
if inputs is None:
continue
if "image_bound" in inputs:
has_image_bound = True
for key, value in inputs.items():
if value is not None:
if key not in multi_modal_inputs_collected:
multi_modal_inputs_collected[key] = []
multi_modal_inputs_collected[key].append(value)
for key, values in multi_modal_inputs_collected.items():
if has_image_bound: # minicpm-o logic
multi_modal_inputs[key] = values
else:
multi_modal_inputs[key] = torch.cat(values, dim=0)
return multi_modal_inputs
def get_lora_rank_from_adapter(adapter_path: str | os.PathLike) -> int:
"""
Extract LoRA rank from adapter configuration file.
Args:
adapter_path: Path to LoRA adapter directory
Returns:
LoRA rank value from adapter_config.json
Raises:
FileNotFoundError: If adapter path or config file doesn't exist
ValueError: If config file is invalid or missing rank
"""
adapter_path = os.path.abspath(os.path.expanduser(str(adapter_path)))
if not os.path.exists(adapter_path):
raise FileNotFoundError(f"LoRA adapter path not found: {adapter_path}")
config_path = os.path.join(adapter_path, "adapter_config.json")
if not os.path.exists(config_path):
raise FileNotFoundError(f"adapter_config.json not found in {adapter_path}")
try:
with open(config_path, encoding="utf-8") as f:
config = json.load(f)
if "r" not in config:
raise ValueError(f"LoRA rank 'r' not found in {config_path}")
return int(config["r"])
except json.JSONDecodeError as e:
raise ValueError(f"Invalid JSON in {config_path}: {e}") from e
except (KeyError, ValueError) as e:
raise ValueError(f"Cannot parse LoRA rank from {config_path}: {e}") from e
@dataclass
class CausalLMOutputForPPO(CausalLMOutputWithPast):
log_probs: Optional[torch.FloatTensor] = None
entropy: Optional[torch.FloatTensor] = None
|
verl__utils__npu_flash_attn_utils.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
# Copied from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indices):
ctx.save_for_backward(indices)
assert input.ndim >= 2
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
second_dim = other_shape.numel()
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
# return input[indices]
return torch.gather(rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)).reshape(
-1, *other_shape
)
@staticmethod
def backward(ctx, grad_output):
(indices,) = ctx.saved_tensors
assert grad_output.ndim >= 2
other_shape = grad_output.shape[1:]
grad_output = rearrange(grad_output, "b ... -> b (...)")
grad_input = torch.zeros(
[ctx.first_axis_dim, grad_output.shape[1]],
device=grad_output.device,
dtype=grad_output.dtype,
)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
# grad_input[indices] = grad_output
grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
index_first_axis = IndexFirstAxis.apply
# Copied from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py
class IndexPutFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, values, indices, first_axis_dim):
ctx.save_for_backward(indices)
assert indices.ndim == 1
assert values.ndim >= 2
output = torch.zeros(first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
output[indices] = values
# output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
return output
@staticmethod
def backward(ctx, grad_output):
(indices,) = ctx.saved_tensors
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
grad_values = grad_output[indices]
# grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
return grad_values, None, None
index_put_first_axis = IndexPutFirstAxis.apply
# Copied from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py
def pad_input(hidden_states, indices, batch, seqlen):
"""
Arguments:
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
batch: int, batch size for the padded sequence.
seqlen: int, maximum sequence length for the padded sequence.
Return:
hidden_states: (batch, seqlen, ...)
"""
# dim = hidden_states.shape[-1]
# output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
# output[indices] = hidden_states
output = index_put_first_axis(hidden_states, indices, batch * seqlen)
return rearrange(output, "(b s) ... -> b s ...", b=batch)
# Copied from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py
def unpad_input(hidden_states, attention_mask, unused_mask=None):
"""
Arguments:
hidden_states: (batch, seqlen, ...)
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
unused_mask: (batch, seqlen), bool / int, 1 means the element is allocated but unused.
Return:
hidden_states: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask + unused_mask.
indices: (total_nnz), the indices of masked tokens from the flattened input sequence.
cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
max_seqlen_in_batch: int
seqused: (batch), returns the number of tokens selected in attention_mask + unused_mask.
"""
all_masks = (attention_mask + unused_mask) if unused_mask is not None else attention_mask
seqlens_in_batch = all_masks.sum(dim=-1, dtype=torch.int32)
used_seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(all_masks.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
# so we write custom forward and backward to make it a bit faster.
return (
index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
indices,
cu_seqlens,
max_seqlen_in_batch,
used_seqlens_in_batch,
)
|
verl__utils__profiler__empty_annotations.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
def mark_start_range(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> None:
pass
def mark_end_range(range_id: str) -> None:
pass
def mark_annotate(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> Callable:
def decorator(func):
return func
return decorator
|
verl__utils__profiler__mstx_profile.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Inspired from https://gitee.com/ascend/MindSpeed-RL/blob/master/mindspeed_rl/utils/utils.py
import functools
import logging
import os
from contextlib import contextmanager
from typing import Any, Callable, Optional
import torch_npu
from packaging import version
from torch_npu.npu import mstx
from .config import NPUToolConfig
from .profile import DistProfiler, ProfilerConfig
def mark_start_range(message: Optional[str] = None) -> None:
"""Start a mark range in the profiler.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
"""
return mstx.range_start(message=message)
def mark_end_range(range_id: str) -> None:
"""End a mark range in the profiler.
Args:
range_id (str):
The id of the mark range to end.
"""
return mstx.range_end(range_id)
def mark_annotate(message: Optional[str] = None) -> Callable:
"""Decorate a function to annotate a mark range along with the function life cycle.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
"""
def decorator(func):
profile_message = message or func.__name__
return mstx.mstx_range(profile_message)(func)
return decorator
@contextmanager
def marked_timer(name: str, timing_raw: dict[str, float], *args: Any, **kwargs: Any) -> None:
"""Context manager for timing with MSTX markers.
This utility function measures the execution time of code within its context,
accumulates the timing information, and adds MSTX markers for profiling.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
Yields:
None: This is a context manager that yields control back to the code block.
"""
if args:
logging.warning(f"Args are not supported in mstx_profile, but received: {args}")
if kwargs:
logging.warning(f"Kwargs are not supported in mstx_profile, but received: {kwargs}")
mark_range = mark_start_range(message=name)
from .performance import _timer
yield from _timer(name, timing_raw)
mark_end_range(mark_range)
def get_npu_profiler(
contents: list[str],
profile_level: str,
profile_save_path: str,
analysis: bool,
role: Optional[str] = None,
profile_step: Optional[str] = None,
):
"""Generate and return an NPU profiler object.
Args:
contents (list[str]):
A list of options to control the collection content,
such as npu, cpu, memory, shapes, module, stack.
profile_level (str):
The collection level, which can be set to level_none,
level0, level1 and level2.
profile_save_path (str):
The path to save the collected data.
analysis (bool):
Whether to enables automatic data parsing.
role (str, optional):
The role of the current data collection. Defaults to None.
profile_step(str, optional):
The current training step. Defaults to None.
"""
if profile_level == "level_none":
level = torch_npu.profiler.ProfilerLevel.Level_none
elif profile_level == "level0":
level = torch_npu.profiler.ProfilerLevel.Level0
elif profile_level == "level1":
level = torch_npu.profiler.ProfilerLevel.Level1
elif profile_level == "level2":
level = torch_npu.profiler.ProfilerLevel.Level2
else:
raise ValueError(f"level only supports level0, 1, 2, and level_none, but gets {profile_level}")
if profile_step:
profile_save_path = os.path.join(profile_save_path, profile_step)
if role:
profile_save_path = os.path.join(profile_save_path, role)
# The ability to filter communication via mstx_domain_exclude requires torch_npu==2.1 or higher.
if version.parse(torch_npu.__version__) < version.parse("2.1"):
raise RuntimeError("torch_npu==2.1 or higher is required to use mstx_domain_exclude")
experimental_config = torch_npu.profiler._ExperimentalConfig(
profiler_level=level,
export_type=torch_npu.profiler.ExportType.Db,
data_simplification=True,
msprof_tx=True,
mstx_domain_exclude=["communication"],
)
activites = []
if contents is None or "npu" in contents:
activites.append(torch_npu.profiler.ProfilerActivity.NPU)
if contents is None or "cpu" in contents:
activites.append(torch_npu.profiler.ProfilerActivity.CPU)
prof = torch_npu.profiler.profile(
with_modules=contents is None or "module" in contents,
with_stack=contents is None or "stack" in contents,
record_shapes=contents is None or "shapes" in contents,
profile_memory=contents is None or "memory" in contents,
activities=activites,
on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(profile_save_path, analyse_flag=analysis),
experimental_config=experimental_config,
)
return prof
class NPUProfiler(DistProfiler):
"""
NPU profiler. Initialized in a worker to control the NPU profiler.
"""
_define_count = 0
def __init__(self, rank: int, config: ProfilerConfig, tool_config: NPUToolConfig, **kwargs):
"""Initialize the NsightSystemsProfiler.
Args:
rank (int): The rank of the current process.
config (Optional[ProfilerConfig]): Configuration for the profiler. If None, a default configuration is used.
tool_config (NPUToolConfig): The config to control npu profiler behavior.
"""
if not config:
config = ProfilerConfig(ranks=[], enable=False)
if not tool_config:
assert not config.enable, "tool_config must be set when profiler is enabled"
self.discrete: bool = tool_config.discrete
self.profile_npu = None
self.profile_contents = tool_config.contents
self.profile_level = tool_config.level
self.profile_save_path = config.save_path
self.analysis = tool_config.analysis
def start(self, **kwargs):
role = kwargs.get("role", None)
if not self.discrete and NPUProfiler._define_count == 0:
self.profile_npu = get_npu_profiler(
contents=self.profile_contents,
profile_level=self.profile_level,
profile_save_path=self.profile_save_path,
analysis=self.analysis,
role=role,
)
self.profile_npu.start()
NPUProfiler._define_count += 1
def stop(self):
if not self.discrete and NPUProfiler._define_count == 1:
self.profile_npu.step()
self.profile_npu.stop()
NPUProfiler._define_count -= 1
def annotate(self, message: Optional[str] = None, role: Optional[str] = None, **kwargs_outer) -> Callable:
"""Decorate a Worker member function to profile the current rank in the current training step.
Requires the target function to be a member function of a Worker,
which has a member field `profiler` with NPUProfiler type.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
role (str, optional):
The role of the current data collection. Defaults to None.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs_inner):
profile_name = message or func.__name__
discrete_mode = self.discrete
if not discrete_mode:
mark_range = mark_start_range(message=profile_name)
else:
profile_npu = get_npu_profiler(
contents=self.profile_contents,
profile_level=self.profile_level,
profile_save_path=self.profile_save_path,
analysis=self.analysis,
role=role,
)
profile_npu.start()
mark_range = mark_start_range(message=profile_name)
result = func(*args, **kwargs_inner)
if not discrete_mode:
mark_end_range(mark_range)
else:
mark_end_range(mark_range)
profile_npu.step()
profile_npu.stop()
return result
return wrapper
return decorator
|
verl__utils__profiler__nvtx_profile.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from contextlib import contextmanager
from typing import Callable, Optional
import nvtx
import torch
from .config import NsightToolConfig
from .profile import DistProfiler, ProfilerConfig
def mark_start_range(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> None:
"""Start a mark range in the profiler.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
color (str, optional):
The color of the range. Defaults to None.
domain (str, optional):
The domain of the range. Defaults to None.
category (str, optional):
The category of the range. Defaults to None.
"""
return nvtx.start_range(message=message, color=color, domain=domain, category=category)
def mark_end_range(range_id: str) -> None:
"""End a mark range in the profiler.
Args:
range_id (str):
The id of the mark range to end.
"""
return nvtx.end_range(range_id)
def mark_annotate(
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
) -> Callable:
"""Decorate a function to annotate a mark range along with the function life cycle.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
color (str, optional):
The color of the range. Defaults to None.
domain (str, optional):
The domain of the range. Defaults to None.
category (str, optional):
The category of the range. Defaults to None.
"""
def decorator(func):
profile_message = message or func.__name__
return nvtx.annotate(profile_message, color=color, domain=domain, category=category)(func)
return decorator
@contextmanager
def marked_timer(
name: str,
timing_raw: dict[str, float],
color: str = None,
domain: Optional[str] = None,
category: Optional[str] = None,
):
"""Context manager for timing with NVTX markers.
This utility function measures the execution time of code within its context,
accumulates the timing information, and adds NVTX markers for profiling.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
color (Optional[str]): Color for the NVTX marker. Defaults to None.
domain (Optional[str]): Domain for the NVTX marker. Defaults to None.
category (Optional[str]): Category for the NVTX marker. Defaults to None.
Yields:
None: This is a context manager that yields control back to the code block.
"""
mark_range = mark_start_range(message=name, color=color, domain=domain, category=category)
from .performance import _timer
yield from _timer(name, timing_raw)
mark_end_range(mark_range)
class NsightSystemsProfiler(DistProfiler):
"""Nsight system profiler. Installed in a worker to control the Nsight system profiler."""
def __init__(self, rank: int, config: Optional[ProfilerConfig], tool_config: Optional[NsightToolConfig], **kwargs):
"""Initialize the NsightSystemsProfiler.
Args:
rank (int): The rank of the current process.
config (Optional[ProfilerConfig]): Configuration for the profiler. If None, a default configuration is used.
"""
# If no configuration is provided, create a default ProfilerConfig with an empty list of ranks
if not config:
config = ProfilerConfig(ranks=[])
if not tool_config:
assert not config.enable, "tool_config must be provided when profiler is enabled"
self.discrete: bool = tool_config.discrete
def start(self, **kwargs):
if not self.discrete:
torch.cuda.profiler.start()
def stop(self):
if not self.discrete:
torch.cuda.profiler.stop()
def annotate(
self,
message: Optional[str] = None,
color: Optional[str] = None,
domain: Optional[str] = None,
category: Optional[str] = None,
**kwargs_outer,
) -> Callable:
"""Decorate a Worker member function to profile the current rank in the current training step.
Requires the target function to be a member function of a Worker, which has a member field `profiler` with
NightSystemsProfiler type.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
color (str, optional):
The color of the range. Defaults to None.
domain (str, optional):
The domain of the range. Defaults to None.
category (str, optional):
The category of the range. Defaults to None.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs_inner):
profile_name = message or func.__name__
if self.discrete:
torch.cuda.profiler.start()
mark_range = mark_start_range(message=profile_name, color=color, domain=domain, category=category)
result = func(*args, **kwargs_inner)
mark_end_range(mark_range)
if self.discrete:
torch.cuda.profiler.stop()
return result
return wrapper
return decorator
|
verl__utils__profiler__performance.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import inspect
import logging
from contextlib import contextmanager
from typing import Any, Optional
import torch
import torch.distributed as dist
from codetiming import Timer
from verl.utils.device import get_device_id, get_torch_device
from verl.utils.logger import DecoratorLoggerBase
def _get_current_mem_info(unit: str = "GB", precision: int = 2) -> tuple[str]:
"""Get current memory usage.
Note that CPU device memory info is always 0.
Args:
unit (str, optional): The unit of memory measurement. Defaults to "GB".
precision (int, optional): The number of decimal places to round memory values. Defaults to 2.
Returns:
tuple[str]: A tuple containing memory allocated, memory reserved, memory used, and memory total
in the specified unit.
"""
assert unit in ["GB", "MB", "KB"]
device = get_torch_device()
# torch.cpu.memory_allocated() does not exist
if device == torch.cpu:
return "0.00", "0.00", "0.00", "0.00"
divisor = 1024**3 if unit == "GB" else 1024**2 if unit == "MB" else 1024
mem_allocated = get_torch_device().memory_allocated()
mem_reserved = get_torch_device().memory_reserved()
# use get_torch_device().mem_get_info to profile device memory
# since vllm's sleep mode works below pytorch
# see https://github.com/vllm-project/vllm/pull/11743#issuecomment-2754338119
mem_free, mem_total = get_torch_device().mem_get_info()
mem_used = mem_total - mem_free
mem_allocated = f"{mem_allocated / divisor:.{precision}f}"
mem_reserved = f"{mem_reserved / divisor:.{precision}f}"
mem_used = f"{mem_used / divisor:.{precision}f}"
mem_total = f"{mem_total / divisor:.{precision}f}"
return mem_allocated, mem_reserved, mem_used, mem_total
def log_gpu_memory_usage(head: str, logger: logging.Logger = None, level=logging.DEBUG, rank: int = 0):
"""Log GPU memory usage information.
Args:
head (str): A descriptive header for the memory usage log message.
logger (logging.Logger, optional): Logger instance to use for logging. If None, prints to stdout.
level: Logging level to use. Defaults to logging.DEBUG.
rank (int): The rank of the process to log memory for. Defaults to 0.
"""
if (not dist.is_initialized()) or (rank is None) or (dist.get_rank() == rank):
mem_allocated, mem_reserved, mem_used, mem_total = _get_current_mem_info()
message = (
f"{head}, memory allocated (GB): {mem_allocated}, memory reserved (GB): {mem_reserved}, "
f"device memory used/total (GB): {mem_used}/{mem_total}"
)
if logger is None:
print(message)
else:
logger.log(msg=message, level=level)
class GPUMemoryLogger(DecoratorLoggerBase):
"""A decorator class to log GPU memory usage.
Example:
>>> from verl.utils.profiler.performance import GPUMemoryLogger
>>> @GPUMemoryLogger(role="actor")
>>> def update_actor(self, batch):
... # real actor update logics
... return
"""
def __init__(self, role: str, logger: logging.Logger = None, level=logging.DEBUG, log_only_rank_0: bool = True):
if dist.is_initialized() and dist.get_world_size() > 1:
rank = dist.get_rank()
else:
rank = 0
super().__init__(role, logger, level, rank, log_only_rank_0)
def __call__(self, decorated_function: callable):
def f(*args, **kwargs):
return self.log(decorated_function, *args, **kwargs)
return f
def log(self, func, *args, **kwargs):
name = func.__name__
mem_allocated, mem_reserved, mem_used, mem_total = _get_current_mem_info()
message = (
f"Before {name}, memory allocated (GB): {mem_allocated}, memory reserved (GB): {mem_reserved}, "
f"device memory used/total (GB): {mem_used}/{mem_total}"
)
self.logging_function(message)
output = func(*args, **kwargs)
mem_allocated, mem_reserved, mem_used, mem_total = _get_current_mem_info()
message = (
f"After {name}, memory allocated (GB): {mem_allocated}, memory reserved (GB): {mem_reserved}, "
f"device memory used/total (GB): {mem_used}/{mem_total}"
)
self.logging_function(message)
return output
def log_print(ctn: Any):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
frame = inspect.currentframe().f_back
function_name = frame.f_code.co_name
line_number = frame.f_lineno
file_name = frame.f_code.co_filename.split("/")[-1]
print(f"[{current_time}-{file_name}:{line_number}:{function_name}]: {ctn}")
def _timer(name: str, timing_raw: dict[str, float]):
"""Inner function that handles the core timing logic.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
"""
with Timer(name=name, logger=None) as timer:
yield
if name not in timing_raw:
timing_raw[name] = 0
timing_raw[name] += timer.last
@contextmanager
def simple_timer(name: str, timing_raw: dict[str, float]):
"""Context manager for basic timing without NVTX markers.
This utility function measures the execution time of code within its context
and accumulates the timing information in the provided dictionary.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
Yields:
None: This is a context manager that yields control back to the code block.
"""
yield from _timer(name, timing_raw)
@contextmanager
def marked_timer(
name: str,
timing_raw: dict[str, float],
color: str = None,
domain: Optional[str] = None,
category: Optional[str] = None,
):
"""Context manager for timing with platform markers.
This utility function measures the execution time of code within its context,
accumulates the timing information, and adds platform markers for profiling.
This function is a default implementation when hardware profiler is not available.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
color (Optional[str]): Color for the marker. Defaults to None.
domain (Optional[str]): Domain for the marker. Defaults to None.
category (Optional[str]): Category for the marker. Defaults to None.
Yields:
None: This is a context manager that yields control back to the code block.
"""
yield from _timer(name, timing_raw)
def reduce_timing(
timing_raw: dict[str, float], reduce_op: torch.distributed.ReduceOp = torch.distributed.ReduceOp.AVG
) -> dict[str, float]:
"""Reduce timing information across all processes.
This function uses distributed communication to gather and sum the timing
information from all processes in a distributed environment.
Args:
timing_raw (Dict[str, float]): Dictionary containing timing information.
Returns:
Dict[str, float]: Reduced timing information.
"""
if not dist.is_initialized():
return timing_raw
key_list, timing_list = [], []
for key in sorted(timing_raw.keys()):
key_list.append(key)
timing_list.append(timing_raw[key])
timing_list = torch.tensor(timing_list, dtype=torch.float32, device=get_device_id())
torch.distributed.all_reduce(timing_list, op=reduce_op)
timing_list = [tensor.item() for tensor in timing_list.to("cpu")]
timing_generate = {key_list[i]: timing_list[i] for i in range(len(key_list))}
return timing_generate
def topk_reduce_ratio_min_max(timing: float, k: int = 10) -> tuple[float, float, float]:
"""Calculate topk items take-up ratio, and min/max timing across all ranks."""
if not dist.is_initialized():
return -1.0, -1.0, -1.0
world_size = dist.get_world_size()
timing_tensor = torch.tensor(timing, dtype=torch.float32, device=get_device_id())
tensor_list = [torch.zeros(1, dtype=torch.float32, device=get_device_id()) for _ in range(world_size)]
torch.distributed.all_gather(tensor_list, timing_tensor)
tensor_stack = torch.stack(tensor_list)
timing_min = tensor_stack.min().cpu().item()
timing_max = tensor_stack.max().cpu().item()
top_k_percentile = torch.quantile(tensor_stack, 1 - k / 100)
tail_ratio = torch.mean((tensor_stack > top_k_percentile).float()).cpu().item()
return tail_ratio, timing_min, timing_max
def gather_timing(timing_raw: dict[str, float]) -> dict[str, list[float]]:
if not dist.is_initialized():
return {k: [v] for k, v in timing_raw.items()}
key_list, timing_list = [], []
for key in sorted(timing_raw.keys()):
key_list.append(key)
timing_list.append(timing_raw[key])
world_size = torch.distributed.get_world_size()
object_gather_list = [None] * world_size
torch.distributed.all_gather_object(object_gather_list, timing_list)
timing_generate = {
key_list[i]: [timing_list[i] for timing_list in object_gather_list] for i in range(len(key_list))
}
return timing_generate
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.