{"file_name": "verl__checkpoint_engine__hccl_checkpoint_engine.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport asyncio\nimport logging\nimport os\nimport time\nfrom dataclasses import dataclass\nfrom typing import AsyncGenerator, Generator\n\nimport ray\nimport torch\nimport zmq\nfrom vllm.distributed.utils import StatelessProcessGroup\n\nfrom verl.checkpoint_engine.base import CheckpointEngine, CheckpointEngineRegistry, TensorMeta\nfrom verl.utils.distributed import stateless_init_process_group\nfrom verl.utils.net_utils import get_free_port, is_valid_ipv6_address\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(os.getenv(\"VERL_LOGGING_LEVEL\", \"WARN\"))\n\n\n@dataclass\nclass MasterMetadata:\n zmq_ip: str\n zmq_port: int\n dist_ip: str\n dist_port: int\n\n\nclass BroadcastOperation:\n \"\"\"Async broadcast operation with HCCL in separate thread.\n\n Args:\n rank (int): The rank of the current process.\n group_name (str): The name of the HCCL process group.\n bucket (torch.Tensor): The tensor to broadcast.\n metadata (dict[str, TensorMeta]): The metadata of the tensor.\n socket (zmq.Socket): The zeromq socket to communicate with master.\n topic (str): The topic to subscribe.\n \"\"\"\n\n def __init__(\n self,\n rank: int,\n process_group: StatelessProcessGroup | str,\n bucket: torch.Tensor,\n metadata: dict[str, TensorMeta],\n socket: zmq.Socket,\n topic: str,\n ) -> None:\n self.rank = rank\n self.pyhccl = process_group\n self.bucket = bucket\n self.metadata = metadata\n self.socket = socket\n self.topic = topic\n\n loop = asyncio.get_running_loop()\n self._task = loop.run_in_executor(None, self._run)\n\n def _run(self):\n # broadcast tensor meta via zeromq PUB/SUB\n if self.rank == 0:\n self.socket.send_string(self.topic, flags=zmq.SNDMORE)\n self.socket.send_pyobj(self.metadata)\n else:\n self.socket.recv_string()\n self.metadata = self.socket.recv_pyobj()\n\n # broadcast tensor via HCCL\n self.pyhccl.broadcast(self.bucket, src=0)\n\n async def wait_for_complete(self) -> dict[str, TensorMeta]:\n \"\"\"Wait for the broadcast operation to complete.\n\n Returns:\n dict[str, TensorMeta]: The bucket meta after broadcast.\n \"\"\"\n await self._task\n return self.metadata\n\n\n@CheckpointEngineRegistry.register(\"hccl\")\nclass HCCLCheckpointEngine(CheckpointEngine):\n \"\"\"HCCL checkpoint engine with collective communication.\n\n Args:\n bucket_size (int): Bucket size in bytes to transfer multiple weights at one time. Note that we use\n two buffer to send and recv weights at same time, so the device memory overhead is 2 * bucket_size.\n group_name (str): The name of the HCCL process group. Defaults to \"default\".\n rebuild_group (bool): Whether to rebuild the HCCL process group in each update. Defaults to False.\n is_master (bool): Whether the current process is the master process. Defaults to False.\n rollout_dtype (torch.dtype): The dtype of the weights received from rollout workers. Defaults to torch.bfloat16.\n \"\"\"\n\n def __init__(\n self,\n bucket_size: int,\n group_name: str = \"default\",\n rebuild_group: bool = False,\n is_master: bool = False,\n rollout_dtype: torch.dtype = torch.bfloat16,\n ) -> None:\n self.bucket_size = bucket_size\n self.group_name = group_name\n self.rebuild_group = rebuild_group\n self.rollout_dtype = rollout_dtype\n self.pyhccl = None\n self.device = torch.npu.current_device()\n\n # start zeromq server for broadcasting bucket tensor metadata\n self.is_master = is_master\n self.topic = \"bucket_metadata\"\n if self.is_master:\n self._start_zmq_server()\n self.dist_port, _ = get_free_port(self.ip)\n\n def prepare(self) -> MasterMetadata:\n self.send_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device=\"npu\")\n self.recv_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device=\"npu\")\n\n return (\n MasterMetadata(zmq_ip=self.ip, zmq_port=self.zmq_port, dist_ip=self.ip, dist_port=self.dist_port)\n if self.is_master\n else None\n )\n\n def finalize(self):\n \"\"\"Destroy the HCCL process group if rebuild_group is True.\"\"\"\n if self.rebuild_group:\n if self.rank >= 0:\n self.pyhccl.destroyComm(self.pyhccl.comm)\n self.pyhccl = None\n self.rank = None\n self.world_size = None\n\n self.send_buf = None\n self.recv_buf = None\n\n @classmethod\n def build_topology(cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]):\n trainer_kwargs = {\n \"rank\": [0] + [-1] * (trainer_world_size - 1),\n \"world_size\": [rollout_world_size + 1] * trainer_world_size,\n \"master_metadata\": [metadata[0]] * trainer_world_size,\n }\n rollout_kwargs = {\n \"rank\": list(range(1, rollout_world_size + 1)),\n \"world_size\": [rollout_world_size + 1] * rollout_world_size,\n \"master_metadata\": [metadata[0]] * rollout_world_size,\n }\n return trainer_kwargs, rollout_kwargs\n\n def _start_zmq_server(self):\n self.ip = ray.util.get_node_ip_address().strip(\"[]\")\n self.zmq_port, self.listen_sock = get_free_port(self.ip)\n\n context = zmq.Context()\n self.socket = context.socket(zmq.PUB)\n if is_valid_ipv6_address(self.ip):\n address = f\"tcp://[{self.ip}]:{self.zmq_port}\"\n self.socket.setsockopt(zmq.IPV6, 1)\n else:\n address = f\"tcp://{self.ip}:{self.zmq_port}\"\n\n self.socket.bind(address)\n\n def _connect_zmq_client(self, metadata: MasterMetadata):\n assert not self.is_master, \"Master process should not connect to other processes.\"\n context = zmq.Context()\n self.socket = context.socket(zmq.SUB)\n if is_valid_ipv6_address(metadata.zmq_ip):\n address = f\"tcp://[{metadata.zmq_ip}]:{metadata.zmq_port}\"\n self.socket.setsockopt(zmq.IPV6, 1)\n else:\n address = f\"tcp://{metadata.zmq_ip}:{metadata.zmq_port}\"\n\n self.socket.connect(address)\n self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)\n\n def init_process_group(self, rank: int, world_size: int, master_metadata: MasterMetadata):\n \"\"\"Initialize the HCCL process group.\n\n Args:\n rank (int): The rank of the current process.\n world_size (int): The total number of processes.\n \"\"\"\n # For trainer workers other than rank 0, their rank should be -1.\n if rank < 0:\n self.rank = rank\n self.world_size = world_size\n return\n\n if self.rebuild_group or self.pyhccl is None:\n self.pyhccl = stateless_init_process_group(\n master_metadata.dist_ip, master_metadata.dist_port, rank, world_size, self.device\n )\n self.rank = rank\n self.world_size = world_size\n else:\n assert self.rank == rank, f\"rank {rank} is not equal to self.rank {self.rank}\"\n assert self.world_size == world_size, (\n f\"world_size {world_size} is not equal to self.world_size {self.world_size}\"\n )\n\n if self.rank > 0:\n self._connect_zmq_client(master_metadata)\n\n # barrier\n signal = torch.tensor([1], dtype=torch.int8, device=torch.npu.current_device())\n self.pyhccl.all_reduce(signal)\n\n logger.info(f\"init_process_group rank: {self.rank}, world_size: {self.world_size}\")\n\n @torch.no_grad()\n async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):\n \"\"\"Send the weights of the model.\n\n Args:\n weights: A generator that yields the name of the weight tensor and the tensor itself.\n \"\"\"\n assert self.rank <= 0, \"Trainer workers other than rank 0 should not send weights.\"\n\n # For trainer rank other than 0, consume weights without sending.\n if self.rank < 0:\n for name, weight in weights:\n pass\n return\n\n send_buf, recv_buf = self.send_buf, self.recv_buf\n broadcast_op = None\n\n start_time = time.time()\n bucket_meta: dict[str, TensorMeta] = {}\n offset = 0\n for name, weight in weights:\n # fill the tensor bucket\n if offset + weight.nbytes > self.bucket_size:\n torch.npu.synchronize()\n\n # wait previous broadcast op finish\n if broadcast_op is not None:\n await broadcast_op.wait_for_complete()\n\n broadcast_op = BroadcastOperation(\n rank=self.rank,\n process_group=self.pyhccl,\n bucket=send_buf,\n metadata={\"bucket_meta\": bucket_meta, \"is_last\": False},\n socket=self.socket,\n topic=self.topic,\n )\n\n # swap send_buf and recv_buf\n send_buf, recv_buf = recv_buf, send_buf\n bucket_meta = {}\n offset = 0\n\n assert offset + weight.nbytes <= self.bucket_size, (\n f\"Weight {name}({weight.shape}, {weight.dtype}) is too large to fit in the bucket.\"\n )\n\n bucket_meta[name] = {\n \"name\": name,\n \"shape\": weight.shape,\n \"dtype\": weight.dtype,\n \"offset\": offset,\n }\n send_buf[offset : offset + weight.nbytes] = weight.view(-1).view(torch.uint8)\n offset += weight.nbytes\n\n # broadcast last bucket\n torch.npu.synchronize()\n if broadcast_op is not None:\n await broadcast_op.wait_for_complete()\n\n broadcast_op = BroadcastOperation(\n rank=self.rank,\n process_group=self.pyhccl,\n bucket=send_buf,\n metadata={\"bucket_meta\": bucket_meta, \"is_last\": True},\n socket=self.socket,\n topic=self.topic,\n )\n await broadcast_op.wait_for_complete()\n logger.info(f\"Rank {self.rank} send weights done, time cost: {time.time() - start_time:.2f}s\")\n\n @torch.no_grad()\n async def receive_weights(self) -> AsyncGenerator[tuple[str, torch.Tensor], None]:\n \"\"\"Receive the weights of the model.\n\n Yields:\n A tuple of the name of the weight tensor and the tensor itself.\n \"\"\"\n assert self.rank > 0, \"Rank 0 should not receive weights.\"\n send_buf, recv_buf = self.send_buf, self.recv_buf\n total_bytes, total_params = 0, 0\n\n # receive first bucket\n start_time = time.time()\n broadcast_op = BroadcastOperation(\n rank=self.rank,\n process_group=self.pyhccl,\n bucket=recv_buf,\n metadata=None,\n socket=self.socket,\n topic=self.topic,\n )\n metadata = await broadcast_op.wait_for_complete()\n total_bytes += self.bucket_size\n total_params += len(metadata[\"bucket_meta\"])\n\n # swap send_buf and recv_buf\n send_buf, recv_buf = recv_buf, send_buf\n while not metadata[\"is_last\"]:\n # 1. receive next bucket\n broadcast_op = BroadcastOperation(\n rank=self.rank,\n process_group=self.pyhccl,\n bucket=recv_buf,\n metadata=None,\n socket=self.socket,\n topic=self.topic,\n )\n\n # 2. yield tensor from send_buf\n for name, meta in metadata[\"bucket_meta\"].items():\n dtype, shape = meta[\"dtype\"], meta[\"shape\"]\n size = dtype.itemsize * shape.numel()\n tensor = send_buf[meta[\"offset\"] : meta[\"offset\"] + size].view(dtype=dtype).view(shape)\n yield name, tensor\n\n # 3. wait for next bucket broadcast finish\n metadata = await broadcast_op.wait_for_complete()\n total_bytes += self.bucket_size\n total_params += len(metadata[\"bucket_meta\"])\n\n # 4. swap send_buf and recv_buf\n torch.npu.synchronize() # sync non-blocking copy\n send_buf, recv_buf = recv_buf, send_buf\n\n # yield tensor from send_buf\n for name, meta in metadata[\"bucket_meta\"].items():\n dtype, shape = meta[\"dtype\"], meta[\"shape\"]\n size = dtype.itemsize * shape.numel()\n tensor = send_buf[meta[\"offset\"] : meta[\"offset\"] + size].view(dtype=dtype).view(shape)\n yield name, tensor\n\n time_cost = time.time() - start_time\n bandwidth = total_bytes / time_cost / (1024 * 1024 * 1024)\n logger.info(\n f\"Rank {self.rank} receive weights done, total_params: {total_params}, \"\n f\"time cost: {time_cost:.2f}s, bandwidth: {bandwidth:.2f} GB/s\"\n )\n"} {"file_name": "verl__models__llama__megatron__layers__parallel_attention.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.\n#\n# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX\n# and OPT implementations in this library. It has been modified from its\n# original forms to accommodate minor architectural differences compared\n# to GPT-NeoX and OPT used by the Meta AI team that trained the model.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom typing import Optional\n\nimport torch\nimport torch.nn.functional as F\nfrom einops import rearrange\nfrom flash_attn.layers.rotary import apply_rotary_emb\nfrom megatron.core import ModelParallelConfig, tensor_parallel\nfrom megatron.core import parallel_state as mpu\nfrom torch import nn\nfrom transformers import LlamaConfig\nfrom transformers.utils import is_flash_attn_2_available\n\nfrom verl.models.llama.megatron.layers.parallel_linear import QKVParallelLinear\nfrom verl.utils.megatron import tensor_parallel as tp_utils\n\n\nclass LlamaRotaryEmbedding(nn.Module):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):\n super().__init__()\n\n self.dim = dim\n self.max_position_embeddings = max_position_embeddings\n self.base = base\n inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))\n self.register_buffer(\"inv_freq\", inv_freq, persistent=False)\n\n # Build here to make `torch.jit.trace` work.\n self._set_cos_sin_cache(\n seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()\n )\n\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n self.max_seq_len_cached = seq_len\n t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)\n\n freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq)\n # Different from paper, but it uses a different permutation in order to obtain the same calculation\n emb = torch.cat((freqs, freqs), dim=-1)\n self.register_buffer(\"cos_cached\", emb.cos().to(dtype), persistent=False)\n self.register_buffer(\"sin_cached\", emb.sin().to(dtype), persistent=False)\n\n def forward(self, x, seq_len=None):\n # x: [bs, num_attention_heads, seq_len, head_size]\n if seq_len > self.max_seq_len_cached:\n self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)\n\n return (\n self.cos_cached[:seq_len].to(dtype=x.dtype),\n self.sin_cached[:seq_len].to(dtype=x.dtype),\n )\n\n\nclass LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):\n \"\"\"LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev\"\"\"\n\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n self.scaling_factor = scaling_factor\n super().__init__(dim, max_position_embeddings, base, device)\n\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n self.max_seq_len_cached = seq_len\n t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)\n t = t / self.scaling_factor\n\n freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq)\n # Different from paper, but it uses a different permutation in order to obtain the same calculation\n emb = torch.cat((freqs, freqs), dim=-1)\n self.register_buffer(\"cos_cached\", emb.cos().to(dtype), persistent=False)\n self.register_buffer(\"sin_cached\", emb.sin().to(dtype), persistent=False)\n\n\nclass LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):\n \"\"\"LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla\"\"\"\n\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n self.scaling_factor = scaling_factor\n super().__init__(dim, max_position_embeddings, base, device)\n\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n self.max_seq_len_cached = seq_len\n\n if seq_len > self.max_position_embeddings:\n base = self.base * (\n (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)\n ) ** (self.dim / (self.dim - 2))\n inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))\n self.register_buffer(\"inv_freq\", inv_freq, persistent=False)\n\n t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)\n\n freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq)\n # Different from paper, but it uses a different permutation in order to obtain the same calculation\n emb = torch.cat((freqs, freqs), dim=-1)\n self.register_buffer(\"cos_cached\", emb.cos().to(dtype), persistent=False)\n self.register_buffer(\"sin_cached\", emb.sin().to(dtype), persistent=False)\n\n\nclass LlamaLlama3ScalingRotaryEmbedding(LlamaRotaryEmbedding):\n def __init__(self, dim, config, max_position_embeddings=2048, base=10000, device=None):\n super().__init__(dim, max_position_embeddings, base, device)\n\n self.factor = config.rope_scaling[\"factor\"] # `8` in the original implementation\n self.high_freq_factor = config.rope_scaling[\"high_freq_factor\"] # `1` in the original implementation\n self.low_freq_factor = config.rope_scaling[\"low_freq_factor\"] # `4` in the original implementation\n self.old_context_len = config.rope_scaling[\n \"original_max_position_embeddings\"\n ] # `8192` in the original implementation\n\n low_freq_wavelen = self.old_context_len / self.low_freq_factor\n high_freq_wavelen = self.old_context_len / self.high_freq_factor\n\n wavelen = 2 * math.pi / self.inv_freq\n # wavelen < high_freq_wavelen: do nothing; wavelen > low_freq_wavelen: divide by factor\n inv_freq_llama = torch.where(wavelen > low_freq_wavelen, self.inv_freq / self.factor, self.inv_freq)\n # otherwise: interpolate between the two, using a smooth factor\n smooth_factor = (self.old_context_len / wavelen - self.low_freq_factor) / (\n self.high_freq_factor - self.low_freq_factor\n )\n smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / self.factor + smooth_factor * inv_freq_llama\n is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen)\n inv_freq = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama)\n\n self.register_buffer(\"inv_freq\", inv_freq, persistent=False)\n\n # Build here to make `torch.jit.trace` work.\n self._set_cos_sin_cache(\n seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()\n )\n\n\ndef rotate_half(x):\n \"\"\"Rotates half the hidden dims of the input.\"\"\"\n x1 = x[..., : x.shape[-1] // 2]\n x2 = x[..., x.shape[-1] // 2 :]\n return torch.cat((-x2, x1), dim=-1)\n\n\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids):\n cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]\n sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]\n q_embed = (q * cos) + (rotate_half(q) * sin)\n k_embed = (k * cos) + (rotate_half(k) * sin)\n return q_embed, k_embed\n\n\ndef repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n \"\"\"\n This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n \"\"\"\n batch, num_key_value_heads, slen, head_dim = hidden_states.shape\n if n_rep == 1:\n return hidden_states\n hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)\n return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)\n\n\nclass ParallelLlamaAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):\n super().__init__()\n self.config = config\n self.megatron_config = megatron_config\n self.hidden_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n self.num_key_value_heads = config.num_key_value_heads\n self.num_key_value_groups = self.num_heads // self.num_key_value_heads\n self.max_position_embeddings = config.max_position_embeddings\n self.rope_theta = config.rope_theta\n\n # assign values after tp\n tp_size = mpu.get_tensor_model_parallel_world_size()\n assert self.num_heads % tp_size == 0, (\n f\"num_head must be divisible by tp_size. Got num_head={self.num_heads}, tp_size={tp_size}\"\n )\n assert self.num_key_value_heads % tp_size == 0, (\n f\"num_key_value_heads must be divisible by tp_size. Got num_key_value_heads=\"\n f\"{self.num_key_value_heads}, tp_size={tp_size}\"\n )\n\n self.num_heads_per_tp = self.num_heads // tp_size\n self.num_key_value_heads_per_tp = self.num_key_value_heads // tp_size\n self.hidden_size_per_tp = self.hidden_size // tp_size\n\n if (self.head_dim * self.num_heads) != self.hidden_size:\n raise ValueError(\n f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and \"\n f\"`num_heads`: {self.num_heads}).\"\n )\n\n column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()\n row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear()\n\n if megatron_config is not None:\n assert column_kwargs.get(\"config\", False), \"must have ModelParallelConfig\"\n assert row_kwargs.get(\"config\", False), \"must have ModelParallelConfig\"\n tp_utils.update_kwargs_with_config(column_kwargs, megatron_config)\n tp_utils.update_kwargs_with_config(row_kwargs, megatron_config)\n\n # [self.q_size, self.k_size, self.v_size]\n self.qkv_proj = QKVParallelLinear(\n input_size=self.hidden_size,\n num_heads=self.num_heads,\n num_key_value_heads=self.num_key_value_heads,\n head_dim=self.head_dim,\n bias=config.attention_bias,\n gather_output=False,\n skip_bias_add=False,\n **column_kwargs,\n )\n\n self.q_size = self.num_heads_per_tp * self.head_dim\n self.k_size = self.num_key_value_heads_per_tp * self.head_dim\n self.v_size = self.num_key_value_heads_per_tp * self.head_dim\n\n self.o_proj = tensor_parallel.RowParallelLinear(\n input_size=self.num_heads * self.head_dim,\n output_size=self.hidden_size,\n bias=config.attention_bias,\n input_is_parallel=True,\n skip_bias_add=False,\n **row_kwargs,\n )\n\n self._init_rope()\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = LlamaRotaryEmbedding(\n self.head_dim,\n max_position_embeddings=self.max_position_embeddings,\n base=self.rope_theta,\n )\n else:\n rope_type_key = \"type\" if \"type\" in self.config.rope_scaling else \"rope_type\"\n scaling_type = self.config.rope_scaling[rope_type_key]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = LlamaLinearScalingRotaryEmbedding(\n self.head_dim,\n max_position_embeddings=self.max_position_embeddings,\n scaling_factor=scaling_factor,\n base=self.rope_theta,\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(\n self.head_dim,\n max_position_embeddings=self.max_position_embeddings,\n scaling_factor=scaling_factor,\n base=self.rope_theta,\n )\n elif scaling_type == \"llama3\":\n self.rotary_emb = LlamaLlama3ScalingRotaryEmbedding(\n self.head_dim,\n self.config,\n max_position_embeddings=self.max_position_embeddings,\n base=self.rope_theta,\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:\n bsz, q_len, _ = hidden_states.size()\n qkv = self.qkv_proj(hidden_states)[0]\n query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1)\n\n query_states = query_states.view(bsz, q_len, self.num_heads_per_tp, self.head_dim).transpose(1, 2)\n key_states = key_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2)\n value_states = value_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2)\n\n kv_seq_len = key_states.shape[-2]\n cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)\n query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)\n\n key_states = repeat_kv(key_states, self.num_key_value_groups)\n value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)\n\n if attn_weights.size() != (bsz, self.num_heads_per_tp, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz, self.num_heads_per_tp, q_len, kv_seq_len)}, \"\n f\"but is {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights + attention_mask\n\n # upcast attention to fp32\n attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)\n attn_output = torch.matmul(attn_weights, value_states)\n\n if attn_output.size() != (bsz, self.num_heads_per_tp, q_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads_per_tp, q_len, self.head_dim)}, \"\n f\"but is {attn_output.size()}\"\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous()\n attn_output = attn_output.reshape(bsz, q_len, self.hidden_size_per_tp)\n attn_output = self.o_proj(attn_output)[0]\n return attn_output\n\n\n\"\"\"\nRemove padding Attention\n- Using Flash-attn 2\n- Compatible with sequence parallel\n\"\"\"\n\n\nif is_flash_attn_2_available():\n from flash_attn import flash_attn_varlen_func\n from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa: F401\n\n\ndef apply_rotary_pos_emb_rmpad(q, k, cos, sin, position_ids, indices, sequence_length):\n batch_size = position_ids.shape[0]\n\n q = pad_input(q, indices, batch_size, sequence_length) # (batch_size, seqlen, num_head, head_dim)\n k = pad_input(k, indices, batch_size, sequence_length)\n cos = cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]\n sin = sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]\n q_embed = (q * cos) + (rotate_half(q) * sin)\n k_embed = (k * cos) + (rotate_half(k) * sin)\n\n q_embed = index_first_axis(rearrange(q_embed, \"b s ... -> (b s) ...\"), indices)\n k_embed = index_first_axis(rearrange(k_embed, \"b s ... -> (b s) ...\"), indices)\n\n return q_embed, k_embed\n\n\n# use flash-attn rotary embeddings with rmpad\n# cos/sin shoudl be: (seq_length, rotary_dim / 2)\ndef apply_rotary_pos_emb_rmpad_flash(q, k, cos, sin, cu_seqlens, max_seqlen):\n q_embed = apply_rotary_emb(\n q, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen\n )\n k_embed = apply_rotary_emb(\n k, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen\n )\n return q_embed, k_embed\n\n\nclass ParallelLlamaAttentionRmPad(ParallelLlamaAttention):\n def forward(\n self,\n hidden_states: torch.Tensor,\n position_ids: Optional[torch.LongTensor] = None,\n sequence_length: int = None,\n indices: torch.Tensor = None,\n cu_seqlens: torch.Tensor = None,\n max_seqlen_in_batch: int = None,\n ):\n total_nnz, _, _ = hidden_states.size() # This is the total_nnz padded after sequence parallel\n\n if self.megatron_config.sequence_parallel:\n total_nnz = total_nnz * mpu.get_tensor_model_parallel_world_size()\n\n qkv = self.qkv_proj(hidden_states)[0]\n query_states, key_states, value_states = qkv.split(\n [self.q_size, self.k_size, self.v_size], dim=-1\n ) # (total_nnz, 1, hidden_size)\n\n if self.megatron_config.sequence_parallel:\n sequence_parallel_pad = total_nnz - cu_seqlens[-1]\n total_nnz = cu_seqlens[-1] # total_nnz before sp padding\n query_states = query_states[:total_nnz]\n key_states = key_states[:total_nnz]\n value_states = value_states[:total_nnz]\n\n # Flash attention requires the input to have the shape\n # batch_size x seq_length x head_dime x hidden_dim\n # therefore we just need to keep the original shape\n query_states = query_states.view(total_nnz, self.num_heads_per_tp, self.head_dim)\n key_states = key_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim)\n value_states = value_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim)\n\n cos, sin = self.rotary_emb(value_states, seq_len=sequence_length)\n cos, sin = cos[:, : cos.shape[1] // 2], sin[:, : sin.shape[1] // 2] # flash attn only needs half\n query_states, key_states = apply_rotary_pos_emb_rmpad_flash(\n query_states, key_states, cos, sin, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen_in_batch\n )\n # query_states, key_states = apply_rotary_pos_emb_rmpad(query_states, key_states, cos, sin,\n # position_ids, indices,\n\n # TODO: llama does not have dropout in the config??\n # It is recommended to use dropout with FA according to the docs\n # when training.\n dropout_rate = 0.0 # if not self.training else self.attn_dropout\n\n # In PEFT, usually we cast the layer norms in float32 for training stability reasons\n # therefore the input hidden states gets silently casted in float32. Hence, we need\n # cast them back in float16 just to be sure everything works as expected.\n # This might slowdown training & inference so it is recommended to not cast the LayerNorms\n # in fp32. (LlamaRMSNorm handles it correctly)\n input_dtype = query_states.dtype\n if input_dtype == torch.float32:\n query_states = query_states.to(torch.float16)\n key_states = key_states.to(torch.float16)\n value_states = value_states.to(torch.float16)\n\n attn_output_unpad = flash_attn_varlen_func(\n query_states,\n key_states,\n value_states,\n cu_seqlens_q=cu_seqlens,\n cu_seqlens_k=cu_seqlens,\n max_seqlen_q=max_seqlen_in_batch,\n max_seqlen_k=max_seqlen_in_batch,\n dropout_p=dropout_rate,\n softmax_scale=None,\n causal=True,\n )\n\n attn_output_unpad = attn_output_unpad.to(input_dtype)\n attn_output_unpad = attn_output_unpad.reshape(total_nnz, 1, self.hidden_size_per_tp).contiguous()\n\n # sequence parallel reduce_scatter is performed inside RowColumnParallel if enabled\n # Here we need to repad\n if self.megatron_config.sequence_parallel:\n attn_output_unpad = F.pad(attn_output_unpad, pad=(0, 0, 0, 0, 0, sequence_parallel_pad))\n\n attn_output_unpad = self.o_proj(attn_output_unpad)[0]\n return attn_output_unpad\n"} {"file_name": "verl__models__mcore__registry.py", "text": "# Copyright 2025 Bytedance Ltd. and/or its affiliates\n# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nRegistry module for model architecture components.\n\"\"\"\n\nfrom enum import Enum\nfrom typing import Callable\n\nimport torch\nimport torch.nn as nn\n\nfrom .model_forward import gptmodel_forward_no_padding, model_forward_gen\nfrom .model_forward_fused import fused_forward_model_gen, fused_forward_no_padding_gen\n\n\nclass SupportedVLM(Enum):\n QWEN2_5_VL = \"Qwen2_5_VLForConditionalGeneration\"\n QWEN3_MOE_VL = \"Qwen3VLMoeForConditionalGeneration\"\n QWEN3_VL = \"Qwen3VLForConditionalGeneration\"\n\n\nsupported_vlm = [member.value for member in SupportedVLM]\n\n\ndef get_mcore_forward_fn(hf_config) -> Callable:\n \"\"\"\n Get the forward function for given model architecture.\n \"\"\"\n assert len(hf_config.architectures) == 1, \"Only one architecture is supported for now\"\n if hf_config.architectures[0] in supported_vlm:\n return model_forward_gen(True)\n else:\n # default to language model\n return model_forward_gen(False)\n\n\ndef get_mcore_forward_no_padding_fn(hf_config) -> Callable:\n \"\"\"\n Get the forward function for given model architecture.\n \"\"\"\n assert len(hf_config.architectures) == 1, \"Only one architecture is supported for now\"\n return gptmodel_forward_no_padding\n\n\ndef get_mcore_forward_fused_fn(hf_config) -> Callable:\n \"\"\"\n Get the forward function for given model architecture.\n \"\"\"\n assert len(hf_config.architectures) == 1, \"Only one architecture is supported for now\"\n if hf_config.architectures[0] in supported_vlm:\n return fused_forward_model_gen(True)\n else:\n # default to language model\n return fused_forward_model_gen(False)\n\n\ndef get_mcore_forward_fused_no_padding_fn(hf_config) -> Callable:\n \"\"\"\n Get the fused forward function for no-padding inputs.\n \"\"\"\n assert len(hf_config.architectures) == 1, \"Only one architecture is supported for now\"\n if hf_config.architectures[0] in supported_vlm:\n return fused_forward_no_padding_gen(True)\n else:\n # default to language model\n return fused_forward_no_padding_gen(False)\n\n\n# ruff: noqa\n\n########################################################\n# below is the deprecated code\n########################################################\n\nfrom .config_converter import (\n PretrainedConfig,\n TransformerConfig,\n hf_to_mcore_config_dense,\n hf_to_mcore_config_dpskv3,\n hf_to_mcore_config_llama4,\n hf_to_mcore_config_mixtral,\n hf_to_mcore_config_qwen2_5_vl,\n hf_to_mcore_config_qwen2moe,\n hf_to_mcore_config_qwen3moe,\n)\nfrom .model_initializer import (\n BaseModelInitializer,\n DeepseekV3Model,\n DenseModel,\n MixtralModel,\n Qwen2MoEModel,\n Qwen3MoEModel,\n Qwen25VLModel,\n)\nfrom .weight_converter import (\n McoreToHFWeightConverterDense,\n McoreToHFWeightConverterDpskv3,\n McoreToHFWeightConverterMixtral,\n McoreToHFWeightConverterQwen2_5_VL,\n McoreToHFWeightConverterQwen2Moe,\n McoreToHFWeightConverterQwen3Moe,\n)\n\n\nclass SupportedModel(Enum):\n LLAMA = \"LlamaForCausalLM\" # tested\n QWEN2 = \"Qwen2ForCausalLM\" # tested\n QWEN2_MOE = \"Qwen2MoeForCausalLM\" # pending\n DEEPSEEK_V3 = \"DeepseekV3ForCausalLM\" # not tested\n MIXTRAL = \"MixtralForCausalLM\" # tested\n QWEN2_5_VL = \"Qwen2_5_VLForConditionalGeneration\" # not supported\n LLAMA4 = \"Llama4ForConditionalGeneration\" # not tested\n QWEN3 = \"Qwen3ForCausalLM\" # tested\n QWEN3_MOE = \"Qwen3MoeForCausalLM\" # tested\n GLM4_MOE = \"Glm4MoeForCausalLM\"\n QWEN3_TOKEN_CLASSIFICATION = \"Qwen3ForTokenClassification\"\n LLAMA_TOKEN_CLASSIFICATION = \"LlamaForTokenClassification\"\n QWEN3_MOE_VL = \"Qwen3VLMoeForConditionalGeneration\"\n QWEN3_VL = \"Qwen3VLForConditionalGeneration\"\n GPT_OSS = \"GptOssForCausalLM\"\n MiMO = \"MiMoForCausalLM\"\n\n\n# Registry for model configuration converters\nMODEL_CONFIG_CONVERTER_REGISTRY: dict[SupportedModel, Callable[[PretrainedConfig, torch.dtype], TransformerConfig]] = {\n SupportedModel.LLAMA: hf_to_mcore_config_dense,\n SupportedModel.QWEN2: hf_to_mcore_config_dense,\n SupportedModel.QWEN2_MOE: hf_to_mcore_config_qwen2moe,\n SupportedModel.DEEPSEEK_V3: hf_to_mcore_config_dpskv3,\n SupportedModel.MIXTRAL: hf_to_mcore_config_mixtral,\n SupportedModel.QWEN2_5_VL: hf_to_mcore_config_qwen2_5_vl,\n SupportedModel.LLAMA4: hf_to_mcore_config_llama4,\n SupportedModel.QWEN3: hf_to_mcore_config_dense,\n SupportedModel.QWEN3_MOE: hf_to_mcore_config_qwen3moe,\n SupportedModel.QWEN3_TOKEN_CLASSIFICATION: hf_to_mcore_config_dense,\n SupportedModel.LLAMA_TOKEN_CLASSIFICATION: hf_to_mcore_config_dense,\n}\n\n# Registry for model initializers\nMODEL_INITIALIZER_REGISTRY: dict[SupportedModel, type[BaseModelInitializer]] = {\n SupportedModel.LLAMA: DenseModel,\n SupportedModel.QWEN2: DenseModel,\n SupportedModel.QWEN2_MOE: Qwen2MoEModel,\n SupportedModel.MIXTRAL: MixtralModel,\n SupportedModel.DEEPSEEK_V3: DeepseekV3Model,\n SupportedModel.QWEN2_5_VL: Qwen25VLModel,\n SupportedModel.LLAMA4: DenseModel,\n SupportedModel.QWEN3: DenseModel,\n SupportedModel.QWEN3_MOE: Qwen3MoEModel,\n SupportedModel.QWEN3_TOKEN_CLASSIFICATION: DenseModel,\n SupportedModel.LLAMA_TOKEN_CLASSIFICATION: DenseModel,\n}\n\n# Registry for model forward functions\nMODEL_FORWARD_REGISTRY: dict[SupportedModel, Callable] = {\n SupportedModel.LLAMA: model_forward_gen(),\n SupportedModel.QWEN2: model_forward_gen(),\n SupportedModel.QWEN2_MOE: model_forward_gen(),\n SupportedModel.MIXTRAL: model_forward_gen(),\n SupportedModel.DEEPSEEK_V3: model_forward_gen(),\n SupportedModel.LLAMA4: model_forward_gen(),\n SupportedModel.QWEN3: model_forward_gen(),\n SupportedModel.QWEN3_MOE: model_forward_gen(),\n SupportedModel.QWEN2_5_VL: model_forward_gen(True),\n SupportedModel.QWEN3_MOE_VL: model_forward_gen(True),\n SupportedModel.QWEN3_VL: model_forward_gen(True),\n SupportedModel.GLM4_MOE: model_forward_gen(),\n SupportedModel.QWEN3_TOKEN_CLASSIFICATION: model_forward_gen(),\n SupportedModel.LLAMA_TOKEN_CLASSIFICATION: model_forward_gen(),\n SupportedModel.GPT_OSS: model_forward_gen(),\n SupportedModel.MiMO: model_forward_gen(),\n}\n\n# Registry for model forward functions\nMODEL_FORWARD_NOPAD_REGISTRY: dict[SupportedModel, Callable] = {\n SupportedModel.LLAMA: gptmodel_forward_no_padding,\n SupportedModel.QWEN2: gptmodel_forward_no_padding,\n SupportedModel.QWEN2_MOE: gptmodel_forward_no_padding,\n SupportedModel.MIXTRAL: gptmodel_forward_no_padding,\n SupportedModel.DEEPSEEK_V3: gptmodel_forward_no_padding,\n SupportedModel.QWEN2_5_VL: gptmodel_forward_no_padding,\n SupportedModel.QWEN3_MOE_VL: gptmodel_forward_no_padding,\n SupportedModel.QWEN3_VL: gptmodel_forward_no_padding,\n SupportedModel.LLAMA4: gptmodel_forward_no_padding,\n SupportedModel.QWEN3: gptmodel_forward_no_padding,\n SupportedModel.QWEN3_MOE: gptmodel_forward_no_padding,\n SupportedModel.GLM4_MOE: gptmodel_forward_no_padding,\n SupportedModel.QWEN3_TOKEN_CLASSIFICATION: gptmodel_forward_no_padding,\n SupportedModel.LLAMA_TOKEN_CLASSIFICATION: gptmodel_forward_no_padding,\n SupportedModel.GPT_OSS: gptmodel_forward_no_padding,\n SupportedModel.MiMO: gptmodel_forward_no_padding,\n}\n\n# Registry for model forward functions\nMODEL_FORWARD_FUSED_REGISTRY: dict[SupportedModel, Callable] = {\n SupportedModel.LLAMA: fused_forward_model_gen(),\n SupportedModel.QWEN2: fused_forward_model_gen(),\n SupportedModel.QWEN2_MOE: fused_forward_model_gen(),\n SupportedModel.MIXTRAL: fused_forward_model_gen(),\n SupportedModel.QWEN2_5_VL: fused_forward_model_gen(True),\n SupportedModel.QWEN3_MOE_VL: fused_forward_model_gen(True),\n SupportedModel.QWEN3_VL: fused_forward_model_gen(True),\n SupportedModel.LLAMA4: fused_forward_model_gen(),\n SupportedModel.QWEN3: fused_forward_model_gen(),\n SupportedModel.QWEN3_MOE: fused_forward_model_gen(),\n SupportedModel.DEEPSEEK_V3: fused_forward_model_gen(),\n SupportedModel.GLM4_MOE: fused_forward_model_gen(),\n SupportedModel.GPT_OSS: fused_forward_model_gen(),\n SupportedModel.MiMO: fused_forward_model_gen(),\n}\n\n# Registry for model weight converters\nMODEL_WEIGHT_CONVERTER_REGISTRY: dict[SupportedModel, type] = {\n SupportedModel.LLAMA: McoreToHFWeightConverterDense,\n SupportedModel.QWEN2: McoreToHFWeightConverterDense,\n SupportedModel.QWEN2_MOE: McoreToHFWeightConverterQwen2Moe,\n SupportedModel.MIXTRAL: McoreToHFWeightConverterMixtral,\n SupportedModel.DEEPSEEK_V3: McoreToHFWeightConverterDpskv3,\n SupportedModel.QWEN3: McoreToHFWeightConverterDense,\n SupportedModel.QWEN3_MOE: McoreToHFWeightConverterQwen3Moe,\n SupportedModel.QWEN2_5_VL: McoreToHFWeightConverterQwen2_5_VL,\n SupportedModel.QWEN3_TOKEN_CLASSIFICATION: McoreToHFWeightConverterDense,\n SupportedModel.LLAMA_TOKEN_CLASSIFICATION: McoreToHFWeightConverterDense,\n}\n\n\ndef get_supported_model(model_type: str) -> SupportedModel:\n try:\n return SupportedModel(model_type)\n except ValueError as err:\n supported_models = [e.value for e in SupportedModel]\n raise NotImplementedError(\n f\"Model Type: {model_type} not supported. Supported models: {supported_models}\"\n ) from err\n\n\ndef hf_to_mcore_config(\n hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs\n) -> TransformerConfig:\n \"\"\"Convert huggingface PretrainedConfig to mcore TransformerConfig.\n\n Args:\n hf_config: The huggingface PretrainedConfig.\n dtype: The dtype of the model.\n **override_transformer_config_kwargs: The kwargs to override the transformer config.\n\n Returns:\n The mcore TransformerConfig.\n \"\"\"\n assert len(hf_config.architectures) == 1, \"Only one architecture is supported for now\"\n model = get_supported_model(hf_config.architectures[0])\n return MODEL_CONFIG_CONVERTER_REGISTRY[model](hf_config, dtype, **override_transformer_config_kwargs)\n\n\ndef init_mcore_model(\n tfconfig: TransformerConfig,\n hf_config: PretrainedConfig,\n pre_process: bool = True,\n post_process: bool = None,\n *,\n share_embeddings_and_output_weights: bool = False,\n value: bool = False,\n **extra_kwargs, # may be used for vlm and moe\n) -> nn.Module:\n \"\"\"\n Initialize a Mcore model.\n\n Args:\n tfconfig: The transformer config.\n hf_config: The HuggingFace config.\n pre_process: Optional pre-processing function.\n post_process: Optional post-processing function.\n share_embeddings_and_output_weights: Whether to share embeddings and output weights.\n value: Whether to use value.\n **extra_kwargs: Additional keyword arguments.\n\n Returns:\n The initialized model.\n \"\"\"\n assert len(hf_config.architectures) == 1, \"Only one architecture is supported for now\"\n model = get_supported_model(hf_config.architectures[0])\n initializer_cls = MODEL_INITIALIZER_REGISTRY[model]\n initializer = initializer_cls(tfconfig, hf_config)\n return initializer.initialize(\n pre_process=pre_process,\n post_process=post_process,\n share_embeddings_and_output_weights=share_embeddings_and_output_weights,\n value=value,\n **extra_kwargs,\n )\n\n\ndef get_mcore_weight_converter(hf_config: PretrainedConfig, dtype: torch.dtype) -> Callable:\n \"\"\"\n Get the weight converter for given model architecture.\n \"\"\"\n assert len(hf_config.architectures) == 1, \"Only one architecture is supported for now\"\n model = get_supported_model(hf_config.architectures[0])\n tfconfig = hf_to_mcore_config(hf_config, dtype)\n return MODEL_WEIGHT_CONVERTER_REGISTRY[model](hf_config, tfconfig)\n"} {"file_name": "verl__models__weight_loader_registry.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef get_weight_loader(arch: str):\n from verl.models.mcore.loader import load_state_dict_to_megatron_gptmodel\n\n _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY = {\n \"LlamaForCausalLM\": load_state_dict_to_megatron_gptmodel,\n \"Qwen2ForCausalLM\": load_state_dict_to_megatron_gptmodel,\n }\n\n if arch in _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY:\n return _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY[arch]\n raise ValueError(\n f\"Model architectures {arch} loader are not supported for now. Supported architectures: \"\n f\"{_MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY.keys()}\"\n )\n\n\ndef get_weight_saver(arch: str):\n from verl.models.mcore.saver import (\n merge_megatron_ckpt_gptmodel,\n merge_megatron_ckpt_gptmodel_dpskv3,\n merge_megatron_ckpt_gptmodel_mixtral,\n merge_megatron_ckpt_gptmodel_qwen2_5_vl,\n merge_megatron_ckpt_gptmodel_qwen_moe,\n )\n\n _MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY = {\n \"LlamaForCausalLM\": merge_megatron_ckpt_gptmodel,\n \"Qwen2ForCausalLM\": merge_megatron_ckpt_gptmodel,\n \"MixtralForCausalLM\": merge_megatron_ckpt_gptmodel_mixtral,\n \"Qwen2MoeForCausalLM\": merge_megatron_ckpt_gptmodel_qwen_moe,\n \"Qwen2_5_VLForConditionalGeneration\": merge_megatron_ckpt_gptmodel_qwen2_5_vl,\n \"DeepseekV3ForCausalLM\": merge_megatron_ckpt_gptmodel_dpskv3,\n \"Qwen3ForCausalLM\": merge_megatron_ckpt_gptmodel,\n \"Qwen3ForTokenClassification\": merge_megatron_ckpt_gptmodel,\n \"Qwen3MoeForCausalLM\": merge_megatron_ckpt_gptmodel_qwen_moe,\n \"LlamaForTokenClassification\": merge_megatron_ckpt_gptmodel,\n }\n if arch in _MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY:\n return _MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY[arch]\n raise ValueError(\n f\"Model architectures {arch} saver are not supported for now. Supported architectures: \"\n f\"{_MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY.keys()}\"\n )\n"} {"file_name": "verl__trainer__config__algorithm.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dataclasses import dataclass, field\nfrom typing import Any, Optional\n\nfrom verl.base_config import BaseConfig\n\n__all__ = [\"AlgoConfig\", \"FilterGroupsConfig\", \"KLControlConfig\", \"RolloutCorrectionConfig\"]\n\n\n@dataclass\nclass KLControlConfig(BaseConfig):\n \"\"\"Configuration for KL control.\n\n The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.\n\n Args:\n type (str): Type of KL control. Can be \"fixed\" or \"adaptive\".\n kl_coef (float): Initial coefficient for KL penalty.\n horizon (int): Horizon value for adaptive controller.\n target_kl (float): Target KL divergence for adaptive controller.\n \"\"\"\n\n type: str = \"fixed\"\n kl_coef: float = 0.001\n horizon: int = 10000\n target_kl: float = 0.1\n\n\n@dataclass\nclass FilterGroupsConfig(BaseConfig):\n \"\"\"Configuration for filter groups (used in DAPO and Entropy).\n\n The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.\n\n Args:\n enable (bool): Whether to enable filter groups.\n metric (Optional[str]): Metric to use for filtering: \"acc\", \"score\", \"seq_reward\", \"seq_final_reward\", etc.\n max_num_gen_batches (int): Non-positive values mean no upper limit.\n \"\"\"\n\n enable: bool = False\n metric: Optional[str] = None\n max_num_gen_batches: int = 0\n\n\n@dataclass\nclass RolloutCorrectionConfig(BaseConfig):\n \"\"\"Configuration for Rollout Correction (addresses off-policy issues in RL training).\n\n The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.\n\n Rollout Correction handles off-policiness from multiple sources:\n 1. Policy mismatch: Rollout policy (e.g., vLLM BF16) vs Training policy (e.g., FSDP FP32)\n 2. Model update staleness: Rollout data collected from older policy checkpoints\n 3. General off-policy scenarios: Any distribution shift between data collection and training\n\n For more details, see:\n \"When Speed Kills Stability: Demystifying RL Collapse from the Training-Inference Mismatch\"\n https://richardli.xyz/rl-collapse\n\n This typed config replaces the old dict-based approach and provides:\n - Type safety and validation\n - Clear documentation of all parameters\n - Named factory methods for common presets (TIS, MIS, etc.)\n - Sensible defaults\n\n Args:\n rollout_is (Optional[str]): IS weight aggregation level.\n - None: No IS weights (metrics only)\n - \"token\": Per-token IS weights (low variance, biased)\n - \"sequence\": Per-sequence IS weights (unbiased, high variance)\n Default: \"sequence\"\n\n rollout_is_threshold (float): Upper threshold for IS weight truncation/rejection.\n Typical range: 1.5-5.0 for token level, 2.0-10.0 for sequence level.\n Default: 2.0\n\n rollout_is_batch_normalize (bool): Apply batch normalization to IS weights.\n - True: Normalize IS weights to have mean=1.0 within each batch\n - False: Use raw (truncated) IS weights (standard)\n - Reduces variance by ensuring average weight is 1.0 per batch\n - Only affects IS weight values, not rejection sampling\n Default: False (no batch normalization)\n\n rollout_rs (Optional[str]): Rejection sampling aggregation modes.\n Accepts a comma-delimited list (duplicates removed) of canonical options implemented in\n ``rollout_corr_helper``:\n - \"token_k1\": Token-level rejection with ``-log r`` (ratio thresholds supplied via\n ``rollout_rs_threshold`` as ``lower_upper``)\n - \"token_k2\": Token-level rejection with ``0.5 * (log r)^2`` (upper bound only)\n - \"token_k3\": Token-level rejection with ``exp(log r) - 1 - log r`` (upper bound only)\n - \"seq_sum_k1\": Sequence sum of ``-log r`` (ratio bounds)\n - \"seq_sum_k2\": Sequence sum of rejection with ``0.5 * (log r)^2`` (upper bound only)\n - \"seq_sum_k3\": Sequence sum of rejection with ``exp(log r) - 1 - log r`` (upper bound only)\n - \"seq_mean_k1\": Sequence mean of ``-log r`` (ratio bounds)\n - \"seq_mean_k2\": Sequence mean of rejection with ``0.5 * (log r)^2`` (upper bound only)\n - \"seq_mean_k3\": Sequence mean of rejection with ``exp(log r) - 1 - log r`` (upper bound only)\n - \"seq_max_k2\": Sequence max of rejection with ``0.5 * (log r)^2`` (upper bound only)\n - \"seq_max_k3\": Sequence max of rejection with ``exp(log r) - 1 - log r`` (upper bound only)\n names automatically. Default: None\n\n rollout_rs_threshold (Optional[Union[str, float]]): Threshold specification for rejection sampling.\n Provide one value per option (single entry is broadcast when multiple options are supplied).\n Ratio-based modes (``*k1``) expect ``lower_upper`` strings; supplying a single float implies\n only the upper ratio bound, with the lower bound inferred as its reciprocal. Divergence modes\n (k2/k3) expect positive upper bounds (float or string). Default: None\n\n bypass_mode (bool): Operating mode - bypass or decoupled.\n - True: Bypass mode - reuse rollout_log_prob as old_log_prob (2 policies)\n Uses compute_policy_loss_bypass_mode() with loss_type selection\n - False: Decoupled mode - compute old_log_prob separately (3 policies)\n Uses standard PPO loss with IS weight correction\n Default: False (decoupled mode)\n\n loss_type (str): Loss function type in bypass mode (bypass_mode=True).\n - \"reinforce\": REINFORCE-style policy gradient with explicit IS weights\n L = -E[w * log π(a|s) * A] where w = π_current / π_rollout\n - \"ppo_clip\": PPO clipped objective (IS handled by ratio, no explicit weights)\n L = -E[min(r*A, clip(r)*A)] where r = π_current / π_rollout\n Default: \"ppo_clip\"\n\n Example:\n # Create with defaults\n config = RolloutCorrectionConfig()\n\n # Decoupled PPO mode presets (3 policies: π_rollout, π_old, π_θ)\n # IS weights correct for gap between π_old and π_rollout\n config = RolloutCorrectionConfig.decoupled_token_is() # Token-TIS\n config = RolloutCorrectionConfig.decoupled_seq_is() # Seq-TIS\n config = RolloutCorrectionConfig.decoupled_seq_is_rs() # Seq-MIS\n config = RolloutCorrectionConfig.decoupled_geo_rs() # Geo-RS (ratio mode)\n\n # Bypass mode presets (2 policies: π_rollout = π_old, π_θ)\n # loss_type controls the loss function\n # PPO-clip presets (ratio handles IS, so no separate IS weights needed):\n config = RolloutCorrectionConfig.bypass_ppo_clip() # PPO-clip only\n config = RolloutCorrectionConfig.bypass_ppo_clip_geo_rs() # PPO-clip + Geo-RS\n config = RolloutCorrectionConfig.bypass_ppo_clip_k3_rs() # PPO-clip + K3-RS\n # REINFORCE presets (explicit IS weights):\n config = RolloutCorrectionConfig.bypass_pg_is() # REINFORCE + Seq-TIS\n config = RolloutCorrectionConfig.bypass_pg_geo_rs() # REINFORCE + Geo-RS\n config = RolloutCorrectionConfig.bypass_pg_geo_rs_seq_tis() # REINFORCE + Geo-RS + Seq-TIS\n config = RolloutCorrectionConfig.bypass_pg_geo_rs_token_tis() # REINFORCE + Geo-RS + Token-TIS\n\n # Decoupled Geometric ratio presets (length-normalized IS ratio)\n config = RolloutCorrectionConfig.decoupled_geo_rs_seq_tis() # Decoupled Geo-RS + Seq-TIS\n config = RolloutCorrectionConfig.decoupled_geo_rs_token_tis() # Decoupled Geo-RS + Token-TIS\n\n # Decoupled K3 KL Estimator presets (more stable for small KL values)\n config = RolloutCorrectionConfig.decoupled_k3_rs() # Decoupled K3-RS\n config = RolloutCorrectionConfig.decoupled_k3_rs_seq_tis() # Decoupled K3-RS + Seq-TIS\n config = RolloutCorrectionConfig.decoupled_k3_rs_token_tis() # Decoupled K3-RS + Token-TIS\n\n Reference:\n Liu, Li, Fu, Wang, Liu, Shen (2025)\n \"When Speed Kills Stability: Demystifying RL Collapse from the Training-Inference Mismatch\"\n https://richardli.xyz/rl-collapse\n \"\"\"\n\n rollout_is: Optional[str] = \"sequence\"\n rollout_is_threshold: float = 2.0\n rollout_is_batch_normalize: bool = False\n rollout_rs: Optional[str] = None\n rollout_rs_threshold: Optional[str | float] = None\n bypass_mode: bool = False\n loss_type: str = \"ppo_clip\"\n\n @classmethod\n def decoupled_token_is(cls, threshold: float = 2.0) -> \"RolloutCorrectionConfig\":\n \"\"\"Decoupled Mode with Token-level Importance Sampling.\n\n IS weight correction at token level in decoupled mode (three policies).\n\n Args:\n threshold (float): Upper threshold for IS weights. Default: 2.0\n\n Returns:\n RolloutCorrectionConfig configured for decoupled mode with token-level IS\n \"\"\"\n return cls(rollout_is=\"token\", rollout_is_threshold=threshold, rollout_rs=None)\n\n @classmethod\n def decoupled_seq_is(cls, threshold: float = 2.0) -> \"RolloutCorrectionConfig\":\n \"\"\"Decoupled Mode with Sequence-level Importance Sampling.\n\n IS weight correction at sequence level in decoupled mode (three policies).\n\n Args:\n threshold (float): Upper threshold for IS weights. Default: 2.0\n\n Returns:\n RolloutCorrectionConfig configured for decoupled mode with sequence-level IS\n \"\"\"\n return cls(rollout_is=\"sequence\", rollout_is_threshold=threshold, rollout_rs=None)\n\n @classmethod\n def decoupled_seq_is_rs(\n cls,\n is_threshold: float = 2.0,\n rs_threshold: Optional[str | float] = \"0.5_2.0\",\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Decoupled Mode with Sequence-level IS + Rejection Sampling.\n\n Sequence-level IS with sequence-level rejection sampling in decoupled mode.\n Rejects entire sequences based on sequence-level IS weight.\n\n Args:\n is_threshold (float): Upper threshold for IS weights. Default: 2.0\n rs_threshold (Optional[Union[str, float]]): Upper threshold for rejection sampling. Default: 0.5_2.0\n\n Returns:\n RolloutCorrectionConfig configured for decoupled mode with sequence IS + RS\n \"\"\"\n return cls(\n rollout_is=\"sequence\",\n rollout_is_threshold=is_threshold,\n rollout_rs=\"seq_sum_k1\",\n rollout_rs_threshold=rs_threshold,\n )\n\n @classmethod\n def decoupled_geo_rs(\n cls,\n rs_threshold: Optional[str | float] = \"0.999_1.001\",\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Decoupled Mode with Geometric Mean Rejection Sampling (ratio-based).\n\n Uses geometric mean IS ratio E[log(r)] for rejection sampling at sequence level.\n This is a ratio-based mode (ideal = 0.0) with [lower, upper] threshold bounds.\n Length-normalized but still uses IS ratio semantics.\n\n Args:\n rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)\n\n Returns:\n RolloutCorrectionConfig configured for decoupled mode with Geo-RS\n \"\"\"\n return cls(\n rollout_is=None,\n rollout_rs=\"seq_mean_k1\",\n rollout_rs_threshold=rs_threshold,\n )\n\n @classmethod\n def bypass_ppo_clip(cls) -> \"RolloutCorrectionConfig\":\n \"\"\"Bypass mode with PPO-clip loss.\n\n PPO clipped objective in bypass mode. The PPO ratio = π_θ/π_rollout\n already handles IS correction, so no explicit IS weights are applied.\n\n Skips old_log_prob computation for faster execution (2 policies instead of 3).\n\n Returns:\n RolloutCorrectionConfig configured for bypass mode with PPO-clip\n \"\"\"\n return cls(\n rollout_is=None,\n rollout_rs=None,\n bypass_mode=True,\n loss_type=\"ppo_clip\",\n )\n\n @classmethod\n def bypass_ppo_clip_geo_rs(\n cls,\n rs_threshold: Optional[str | float] = \"0.999_1.001\",\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Bypass mode with PPO-clip loss and Geometric Mean RS (ratio-based).\n\n PPO clipped objective in bypass mode with geometric mean IS ratio RS.\n Uses E[log(r)] (ideal = 0.0) with [lower, upper] threshold bounds.\n\n Args:\n rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)\n\n Returns:\n RolloutCorrectionConfig configured for bypass mode with PPO-clip + Geo-RS\n \"\"\"\n return cls(\n rollout_is=None,\n rollout_rs=\"seq_mean_k1\",\n rollout_rs_threshold=rs_threshold,\n bypass_mode=True,\n loss_type=\"ppo_clip\",\n )\n\n @classmethod\n def bypass_ppo_clip_k3_rs(\n cls,\n rs_threshold: float = 0.01,\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Bypass mode with PPO-clip loss and K3 Rejection Sampling.\n\n PPO clipped objective in bypass mode with K3 KL estimator RS to mask outliers.\n K3 is more stable than K1 for small KL values.\n The PPO ratio = π_θ/π_rollout already handles IS correction.\n\n Args:\n rs_threshold (float): Max allowed K3 divergence. Default: 0.01\n\n Returns:\n RolloutCorrectionConfig configured for bypass mode with PPO-clip + K3-RS\n \"\"\"\n return cls(\n rollout_is=None,\n rollout_rs=\"seq_mean_k3\",\n rollout_rs_threshold=rs_threshold,\n bypass_mode=True,\n loss_type=\"ppo_clip\",\n )\n\n @classmethod\n def bypass_pg_is(cls, threshold: float = 2.0) -> \"RolloutCorrectionConfig\":\n \"\"\"Bypass mode with REINFORCE loss and IS Correction.\n\n Uses REINFORCE loss with explicit IS correction in bypass mode.\n No PPO clipping.\n\n Args:\n threshold (float): Upper threshold for IS weights. Default: 2.0\n\n Returns:\n RolloutCorrectionConfig configured for bypass mode with REINFORCE + IS\n \"\"\"\n return cls(\n rollout_is=\"sequence\",\n rollout_is_threshold=threshold,\n rollout_rs=None,\n bypass_mode=True,\n loss_type=\"reinforce\",\n )\n\n @classmethod\n def bypass_pg_geo_rs(\n cls,\n rs_threshold: Optional[str | float] = \"0.999_1.001\",\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Bypass mode with REINFORCE loss and Geometric Mean RS (ratio-based).\n\n REINFORCE with geometric mean IS ratio rejection sampling in bypass mode.\n Uses E[log(r)] (ideal = 0.0) with [lower, upper] threshold bounds.\n\n Args:\n rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)\n\n Returns:\n RolloutCorrectionConfig configured for bypass mode with REINFORCE + Geo-RS\n \"\"\"\n return cls(\n rollout_is=None,\n rollout_rs=\"seq_mean_k1\",\n rollout_rs_threshold=rs_threshold,\n bypass_mode=True,\n loss_type=\"reinforce\",\n )\n\n @classmethod\n def decoupled_geo_rs_seq_tis(\n cls,\n is_threshold: float = 2.0,\n rs_threshold: Optional[str | float] = \"0.999_1.001\",\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Decoupled mode with Geometric Mean RS and Sequence-level Truncated IS (ratio-based).\n\n Combines the Geometric Mean Filter (ratio-based validity check) with\n Clipped Sequence Weight (debiasing). Uses E[log(r)] (ideal = 0.0).\n\n Args:\n is_threshold (float): Upper threshold for sequence IS weights. Default: 2.0\n rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)\n\n Returns:\n RolloutCorrectionConfig configured for Geo-RS-Seq-TIS\n \"\"\"\n return cls(\n rollout_is=\"sequence\",\n rollout_is_threshold=is_threshold,\n rollout_rs=\"seq_mean_k1\",\n rollout_rs_threshold=rs_threshold,\n )\n\n @classmethod\n def decoupled_geo_rs_token_tis(\n cls,\n is_threshold: float = 2.0,\n rs_threshold: Optional[str | float] = \"0.999_1.001\",\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Decoupled mode with Geometric Mean RS and Token-level Truncated IS (ratio-based).\n\n Combines the Geometric Mean Filter (ratio-based validity check) with\n Token-level IS weights. Uses E[log(r)] (ideal = 0.0).\n\n Args:\n is_threshold (float): Upper threshold for token IS weights. Default: 2.0\n rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)\n\n Returns:\n RolloutCorrectionConfig configured for Geo-RS-Token-TIS\n \"\"\"\n return cls(\n rollout_is=\"token\",\n rollout_is_threshold=is_threshold,\n rollout_rs=\"seq_mean_k1\",\n rollout_rs_threshold=rs_threshold,\n )\n\n @classmethod\n def bypass_pg_geo_rs_seq_tis(\n cls,\n is_threshold: float = 2.0,\n rs_threshold: Optional[str | float] = \"0.999_1.001\",\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Bypass mode with REINFORCE loss, Geo-RS, and Sequence-level IS.\n\n Combines geometric mean IS ratio rejection with sequence-level IS\n in bypass mode with REINFORCE loss (no PPO clipping).\n Uses E[log(r)] (ideal = 0.0) with [lower, upper] threshold bounds.\n\n Args:\n is_threshold (float): Upper threshold for sequence IS weights. Default: 2.0\n rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)\n\n Returns:\n RolloutCorrectionConfig configured for bypass mode with REINFORCE + Geo-RS + Seq-TIS\n \"\"\"\n return cls(\n rollout_is=\"sequence\",\n rollout_is_threshold=is_threshold,\n rollout_rs=\"seq_mean_k1\",\n rollout_rs_threshold=rs_threshold,\n bypass_mode=True,\n loss_type=\"reinforce\",\n )\n\n @classmethod\n def bypass_pg_geo_rs_token_tis(\n cls,\n is_threshold: float = 2.0,\n rs_threshold: Optional[str | float] = \"0.999_1.001\",\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Bypass mode with REINFORCE loss, Geo-RS, and Token-level IS.\n\n Combines geometric mean IS ratio rejection with token-level IS weights\n in bypass mode with REINFORCE loss (no PPO clipping).\n Uses E[log(r)] (ideal = 0.0) with [lower, upper] threshold bounds.\n\n Token-level IS has lower variance but introduces bias.\n\n Args:\n is_threshold (float): Upper threshold for token IS weights. Default: 2.0\n rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)\n\n Returns:\n RolloutCorrectionConfig configured for bypass mode with REINFORCE + Geo-RS + Token-TIS\n \"\"\"\n return cls(\n rollout_is=\"token\",\n rollout_is_threshold=is_threshold,\n rollout_rs=\"seq_mean_k1\",\n rollout_rs_threshold=rs_threshold,\n bypass_mode=True,\n loss_type=\"reinforce\",\n )\n\n @classmethod\n def decoupled_k3_rs(\n cls,\n rs_threshold: float = 0.01,\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Decoupled mode with K3 KL Estimator Rejection Sampling.\n\n Uses K3 KL estimator at sequence level for rejection sampling.\n K3 = E[r - log(r) - 1] where r = π_train/π_rollout.\n More stable than geometric mean for small KL values.\n\n K3 >= 0 always (equals 0 when policies match exactly).\n\n Args:\n rs_threshold (float): Max allowed K3 divergence. Default: 0.01\n Typical range: 0.001-0.1\n\n Returns:\n RolloutCorrectionConfig configured for K3 RS\n \"\"\"\n return cls(\n rollout_is=None,\n rollout_rs=\"seq_mean_k3\",\n rollout_rs_threshold=rs_threshold,\n )\n\n @classmethod\n def decoupled_k3_rs_seq_tis(\n cls,\n is_threshold: float = 2.0,\n rs_threshold: float = 0.01,\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Decoupled mode with K3 RS and Sequence-level Truncated IS.\n\n Combines K3 KL estimator rejection with sequence-level IS weights.\n K3 provides more stable outlier detection than geometric mean.\n\n Args:\n is_threshold (float): Upper threshold for sequence IS weights. Default: 2.0\n rs_threshold (float): Max allowed K3 divergence. Default: 0.01\n\n Returns:\n RolloutCorrectionConfig configured for K3-RS-Seq-TIS\n \"\"\"\n return cls(\n rollout_is=\"sequence\",\n rollout_is_threshold=is_threshold,\n rollout_rs=\"seq_mean_k3\",\n rollout_rs_threshold=rs_threshold,\n )\n\n @classmethod\n def decoupled_k3_rs_token_tis(\n cls,\n is_threshold: float = 2.0,\n rs_threshold: float = 0.01,\n ) -> \"RolloutCorrectionConfig\":\n \"\"\"Decoupled mode with K3 RS and Token-level Truncated IS.\n\n Combines K3 KL estimator rejection with token-level IS weights.\n K3 provides more stable outlier detection than geometric mean.\n Token-level IS has lower variance but introduces bias.\n\n Args:\n is_threshold (float): Upper threshold for token IS weights. Default: 2.0\n rs_threshold (float): Max allowed K3 divergence. Default: 0.01\n\n Returns:\n RolloutCorrectionConfig configured for K3-RS-Token-TIS\n \"\"\"\n return cls(\n rollout_is=\"token\",\n rollout_is_threshold=is_threshold,\n rollout_rs=\"seq_mean_k3\",\n rollout_rs_threshold=rs_threshold,\n )\n\n @classmethod\n def disabled(cls) -> \"RolloutCorrectionConfig\":\n \"\"\"Disabled - Metrics Only Mode.\n\n Computes and logs off-policy metrics without applying correction.\n\n Returns:\n RolloutCorrectionConfig with all correction disabled\n \"\"\"\n return cls(rollout_is=None, rollout_rs=None)\n\n\n@dataclass\nclass AlgoConfig(BaseConfig):\n \"\"\"Configuration for the algorithm.\n\n The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.\n\n Args:\n gamma (float): Discount factor for future rewards.\n lam (float): Trade-off between bias and variance in the GAE estimator.\n adv_estimator (str): Advantage estimator type: \"gae\", \"grpo\", \"reinforce_plus_plus\", etc.\n norm_adv_by_std_in_grpo (bool): Whether to normalize advantages by std (specific to GRPO).\n use_kl_in_reward (bool): Whether to enable in-reward KL penalty.\n kl_penalty (str): How to estimate KL divergence: \"kl\", \"abs\", \"mse\", \"low_var_kl\", or \"full\".\n kl_ctrl (KLControlConfig): KL control configuration.\n use_pf_ppo (bool): Whether to enable preference feedback PPO.\n pf_ppo (dict[str, Any]): Preference feedback PPO settings.\n filter_groups (Optional[FilterGroupsConfig]): Filter groups configuration, used in DAPO and Entropy\n rollout_correction (Optional[RolloutCorrectionConfig]): Rollout Correction configuration.\n Addresses off-policy issues from policy mismatch, model staleness, and general distribution shifts.\n\n Set to None to disable entirely. Use factory methods for common presets:\n - RolloutCorrectionConfig.decoupled_token_is() - Decoupled mode with token-level IS\n - RolloutCorrectionConfig.decoupled_seq_is() - Decoupled mode with sequence-level IS\n - RolloutCorrectionConfig.decoupled_seq_is_rs() - Decoupled mode with sequence IS + RS\n - RolloutCorrectionConfig.decoupled_k1_rs() - Decoupled mode with K1-RS (divergence)\n - RolloutCorrectionConfig.decoupled_geo_rs() - Decoupled mode with Geo-RS (ratio)\n - RolloutCorrectionConfig.bypass_ppo_clip() - Bypass mode with PPO-clip\n - RolloutCorrectionConfig.bypass_ppo_clip_k1_rs() - Bypass mode with PPO-clip + K1-RS\n - RolloutCorrectionConfig.bypass_pg_is() - Bypass mode with REINFORCE + IS\n - RolloutCorrectionConfig.bypass_pg_k1_rs() - Bypass mode with REINFORCE + K1-RS\n\n For backward compatibility, you can still pass a dict, which will be converted to\n RolloutCorrectionConfig automatically.\n \"\"\"\n\n gamma: float = 1.0\n lam: float = 1.0\n adv_estimator: str = \"gae\"\n norm_adv_by_std_in_grpo: bool = True\n use_kl_in_reward: bool = False\n kl_penalty: str = \"kl\"\n kl_ctrl: KLControlConfig = field(default_factory=KLControlConfig)\n use_pf_ppo: bool = False\n pf_ppo: dict[str, Any] = field(default_factory=dict)\n filter_groups: Optional[FilterGroupsConfig] = None\n # Rollout Correction: corrects off-policy issues (policy mismatch, model staleness, distribution shifts)\n # Set to None to disable, use RolloutCorrectionConfig presets (e.g., .tis(), .mis()), or pass dict\n rollout_correction: Optional[RolloutCorrectionConfig] = None\n"} {"file_name": "verl__trainer__ppo__core_algos.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCore functions to implement PPO algorithms.\nThe function implemented in this file should be used by trainer with different distributed strategies to\nimplement PPO-like algorithms.\n\"\"\"\n\n__all__ = [\"register_adv_est\", \"get_adv_estimator_fn\", \"AdvantageEstimator\"]\n\nfrom collections import defaultdict\nfrom enum import Enum\nfrom typing import Any, Callable, Optional\n\nimport numpy as np\nimport torch\nfrom omegaconf import DictConfig\n\nimport verl.utils.torch_functional as verl_F\nfrom verl.trainer.config import AlgoConfig\nfrom verl.utils import as_torch_index, group_mean_std\nfrom verl.utils.import_utils import deprecated\nfrom verl.workers.config import ActorConfig\n\nPolicyLossFn = Callable[\n [\n torch.Tensor, # old_log_prob\n torch.Tensor, # log_prob\n torch.Tensor, # advantages\n torch.Tensor, # response_mask\n str, # loss_agg_mode\n Optional[DictConfig | ActorConfig], # config\n torch.Tensor | None, # rollout_log_probs\n ],\n tuple[torch.Tensor, dict[str, Any]],\n]\n\nPOLICY_LOSS_REGISTRY: dict[str, PolicyLossFn] = {}\n\n\ndef register_policy_loss(name: str) -> Callable[[PolicyLossFn], PolicyLossFn]:\n \"\"\"Register a policy loss function with the given name.\n\n Args:\n name (str): The name to register the policy loss function under.\n\n Returns:\n function: Decorator function that registers the policy loss function.\n \"\"\"\n\n def decorator(func: PolicyLossFn) -> PolicyLossFn:\n POLICY_LOSS_REGISTRY[name] = func\n return func\n\n return decorator\n\n\ndef get_policy_loss_fn(name):\n \"\"\"Get the policy loss with a given name.\n\n Args:\n name: `(str)`\n The name of the policy loss.\n\n Returns:\n `(callable)`: The policy loss function.\n \"\"\"\n loss_name = name\n if loss_name not in POLICY_LOSS_REGISTRY:\n raise ValueError(\n f\"Unsupported loss mode: {loss_name}. Supported modes are: {list(POLICY_LOSS_REGISTRY.keys())}\"\n )\n return POLICY_LOSS_REGISTRY[loss_name]\n\n\nclass AdvantageEstimator(str, Enum):\n \"\"\"Using an enumeration class to avoid spelling errors in adv_estimator.\n\n Note(haibin.lin): this enum class is immutable after creation. Extending this\n enum for new estimators may not be necessary since users can always just call\n `verl.trainer.ppo.core_algos.register` with string name for a custom advantage\n estimator instead.\n \"\"\"\n\n GAE = \"gae\"\n GRPO = \"grpo\"\n REINFORCE_PLUS_PLUS = \"reinforce_plus_plus\"\n REINFORCE_PLUS_PLUS_BASELINE = \"reinforce_plus_plus_baseline\"\n REMAX = \"remax\"\n RLOO = \"rloo\"\n OPO = \"opo\"\n GRPO_PASSK = \"grpo_passk\"\n GPG = \"gpg\"\n RLOO_VECTORIZED = \"rloo_vectorized\"\n GRPO_VECTORIZED = \"grpo_vectorized\"\n OPTIMAL_TOKEN_BASELINE = \"optimal_token_baseline\"\n TIR_OPTIMAL_TOKEN_BASELINE = \"tir_optimal_token_baseline\"\n\n\nADV_ESTIMATOR_REGISTRY: dict[str, Any] = {}\n\n\ndef register_adv_est(name_or_enum: str | AdvantageEstimator) -> Any:\n \"\"\"Decorator to register a advantage estimator function with a given name.\n\n Args:\n name_or_enum: `(str)` or `(AdvantageEstimator)`\n The name or enum of the advantage estimator.\n\n \"\"\"\n\n def decorator(fn):\n name = name_or_enum.value if isinstance(name_or_enum, Enum) else name_or_enum\n if name in ADV_ESTIMATOR_REGISTRY and ADV_ESTIMATOR_REGISTRY[name] != fn:\n raise ValueError(\n f\"Adv estimator {name} has already been registered: {ADV_ESTIMATOR_REGISTRY[name]} vs {fn}\"\n )\n ADV_ESTIMATOR_REGISTRY[name] = fn\n return fn\n\n return decorator\n\n\ndef get_adv_estimator_fn(name_or_enum):\n \"\"\"Get the advantage estimator function with a given name.\n\n Args:\n name_or_enum: `(str)` or `(AdvantageEstimator)`\n The name or enum of the advantage estimator.\n\n Returns:\n `(callable)`: The advantage estimator function.\n \"\"\"\n name = name_or_enum.value if isinstance(name_or_enum, Enum) else name_or_enum\n if name not in ADV_ESTIMATOR_REGISTRY:\n raise ValueError(f\"Unknown advantage estimator simply: {name}\")\n return ADV_ESTIMATOR_REGISTRY[name]\n\n\nclass AdaptiveKLController:\n \"\"\"\n Adaptive KL controller described in the paper:\n https://arxiv.org/pdf/1909.08593.pdf\n \"\"\"\n\n def __init__(self, init_kl_coef, target_kl, horizon):\n self.value = init_kl_coef\n self.target = target_kl\n self.horizon = horizon\n\n def update(self, current_kl, n_steps):\n \"\"\"Update the KL coefficient based on current KL divergence.\n\n Args:\n current_kl (float): Current KL divergence value.\n n_steps (int): Number of steps taken.\n \"\"\"\n target = self.target\n proportional_error = np.clip(current_kl / target - 1, -0.2, 0.2)\n mult = 1 + proportional_error * n_steps / self.horizon\n self.value *= mult\n\n\nclass FixedKLController:\n \"\"\"Fixed KL controller.\"\"\"\n\n def __init__(self, kl_coef):\n self.value = kl_coef\n\n def update(self, current_kl, n_steps):\n \"\"\"Update method for fixed KL controller (no-op).\n\n Args:\n current_kl (float): Current KL divergence value (unused).\n n_steps (int): Number of steps taken (unused).\n \"\"\"\n pass\n\n\ndef get_kl_controller(kl_ctrl):\n \"\"\"Factory function to create appropriate KL controller based on configuration.\n\n Args:\n kl_ctrl: Configuration object containing KL controller settings.\n\n Returns:\n KL controller instance (FixedKLController or AdaptiveKLController).\n\n Raises:\n NotImplementedError: If controller type is not supported.\n AssertionError: If adaptive controller horizon is not positive.\n \"\"\"\n if kl_ctrl.type == \"fixed\":\n return FixedKLController(kl_coef=kl_ctrl.kl_coef)\n elif kl_ctrl.type == \"adaptive\":\n assert kl_ctrl.horizon > 0, f\"horizon must be larger than 0. Got {kl_ctrl.horizon}\"\n return AdaptiveKLController(init_kl_coef=kl_ctrl.kl_coef, target_kl=kl_ctrl.target_kl, horizon=kl_ctrl.horizon)\n else:\n raise NotImplementedError\n\n\n@register_adv_est(AdvantageEstimator.GAE) # or simply: @register_adv_est(\"gae\")\ndef compute_gae_advantage_return(\n token_level_rewards: torch.Tensor,\n values: torch.Tensor,\n response_mask: torch.Tensor,\n gamma: torch.Tensor,\n lam: torch.Tensor,\n):\n \"\"\"Adapted from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py\n\n Args:\n token_level_rewards: `(torch.Tensor)`\n shape is (bs, response_length)\n values: `(torch.Tensor)`\n shape is (bs, response_length)\n response_mask: `(torch.Tensor)`\n shape is (bs, response_length). [EOS] mask. The token after [EOS] have mask zero.\n gamma is `(float)`\n discounted factor used in RL\n lam: `(float)`\n lambda value when computing Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)\n\n Returns:\n advantages: `(torch.Tensor)`\n shape: (bs, response_length)\n Returns: `(torch.Tensor)`\n shape: (bs, response_length)\n\n \"\"\"\n with torch.no_grad():\n nextvalues = 0\n lastgaelam = 0\n advantages_reversed = []\n gen_len = token_level_rewards.shape[-1]\n\n for t in reversed(range(gen_len)):\n delta = token_level_rewards[:, t] + gamma * nextvalues - values[:, t]\n lastgaelam_ = delta + gamma * lam * lastgaelam\n\n # skip values and TD-error on observation tokens\n nextvalues = values[:, t] * response_mask[:, t] + (1 - response_mask[:, t]) * nextvalues\n lastgaelam = lastgaelam_ * response_mask[:, t] + (1 - response_mask[:, t]) * lastgaelam\n\n advantages_reversed.append(lastgaelam)\n advantages = torch.stack(advantages_reversed[::-1], dim=1)\n\n returns = advantages + values\n advantages = verl_F.masked_whiten(advantages, response_mask)\n return advantages, returns\n\n\n# NOTE(sgm): this implementation only consider outcome supervision, where the reward is a scalar.\n@register_adv_est(AdvantageEstimator.GRPO) # or simply: @register_adv_est(\"grpo\")\ndef compute_grpo_outcome_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: np.ndarray,\n epsilon: float = 1e-6,\n norm_adv_by_std_in_grpo: bool = True,\n config: Optional[AlgoConfig] = None,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantage for GRPO, operating only on Outcome reward\n (with only one scalar reward for each response).\n\n Args:\n token_level_rewards: `(torch.Tensor)`\n shape is (bs, response_length)\n response_mask: `(torch.Tensor)`\n shape is (bs, response_length)\n index: `(np.ndarray)`\n index array for grouping\n epsilon: `(float)`\n small value to avoid division by zero\n norm_adv_by_std_in_grpo: `(bool)`\n whether to scale the GRPO advantage\n config: `(Optional[AlgoConfig])`\n algorithm configuration object\n\n Note:\n If norm_adv_by_std_in_grpo is True, the advantage is scaled by the std, as in the original GRPO.\n If False, the advantage is not scaled, as in Dr.GRPO (https://arxiv.org/abs/2503.20783).\n\n Returns:\n advantages: `(torch.Tensor)`\n shape is (bs, response_length)\n Returns: `(torch.Tensor)`\n shape is (bs, response_length)\n \"\"\"\n scores = token_level_rewards.sum(dim=-1)\n\n id2score = defaultdict(list)\n id2mean = {}\n id2std = {}\n\n with torch.no_grad():\n bsz = scores.shape[0]\n for i in range(bsz):\n id2score[index[i]].append(scores[i])\n for idx in id2score:\n if len(id2score[idx]) == 1:\n id2mean[idx] = torch.tensor(0.0)\n id2std[idx] = torch.tensor(1.0)\n elif len(id2score[idx]) > 1:\n scores_tensor = torch.stack(id2score[idx])\n id2mean[idx] = torch.mean(scores_tensor)\n id2std[idx] = torch.std(scores_tensor)\n else:\n raise ValueError(f\"no score in prompt index: {idx}\")\n for i in range(bsz):\n if norm_adv_by_std_in_grpo:\n scores[i] = (scores[i] - id2mean[index[i]]) / (id2std[index[i]] + epsilon)\n else:\n scores[i] = scores[i] - id2mean[index[i]]\n scores = scores.unsqueeze(-1) * response_mask\n\n return scores, scores\n\n\n@register_adv_est(AdvantageEstimator.GRPO_VECTORIZED)\ndef compute_grpo_vectorized_outcome_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: np.ndarray,\n epsilon: float = 1e-6,\n norm_adv_by_std_in_grpo: bool = True,\n config: Optional[AlgoConfig] = None,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Vectorized GRPO(outcome-only):\n For each group g:\n a_i = \\\\frac{r_i - \\\\mu_g}{\\\\sigma_g} (or without dividing by \\\\sigma_g),\n then broadcast the scalar across the token dimension (multiplied by response_mask).。\n \"\"\"\n with torch.no_grad():\n scores = token_level_rewards.sum(dim=-1)\n g = as_torch_index(index, device=scores.device)\n mean_g, std_g, _ = group_mean_std(scores, g, eps=epsilon, device=scores.device)\n if norm_adv_by_std_in_grpo:\n scalars = (scores - mean_g[g]) / (std_g[g] + epsilon)\n else:\n scalars = scores - mean_g[g]\n advantages = scalars.unsqueeze(-1) * response_mask\n return advantages, advantages\n\n\n@register_adv_est(AdvantageEstimator.GRPO_PASSK) # or simply: @register_adv_est(\"grpo_passk\")\ndef compute_grpo_passk_outcome_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: np.ndarray,\n epsilon: float = 1e-6,\n norm_adv_by_std_in_grpo: bool = True,\n config: Optional[AlgoConfig] = None,\n **kwargs,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantage for Pass@k using a GRPO-style outcome reward formulation.\n Only the best response per group gets a non-zero advantage: r_max - r_second_max.\n\n Implemented as described in https://arxiv.org/abs/2503.19595.\n\n Args:\n token_level_rewards: (bs, response_length)\n response_mask: (bs, response_length)\n index: (bs,) → group ID per sample\n epsilon: float for numerical stability\n config: (AlgoConfig) algorithm settings, which contains \"norm_adv_by_std_in_grpo\"\n\n Returns:\n advantages: (bs, response_length)\n returns: (bs, response_length)\n \"\"\"\n assert config is not None\n # if True, normalize advantage by std within group\n norm_adv_by_std_in_grpo = config.get(\"norm_adv_by_std_in_grpo\", True)\n scores = token_level_rewards.sum(dim=-1) # (bs,)\n advantages = torch.zeros_like(scores)\n\n id2scores = defaultdict(list)\n id2indices = defaultdict(list)\n\n with torch.no_grad():\n bsz = scores.shape[0]\n for i in range(bsz):\n idx = index[i]\n id2scores[idx].append(scores[i])\n id2indices[idx].append(i)\n\n for idx in id2scores:\n rewards = torch.stack(id2scores[idx]) # (k,)\n if rewards.numel() < 2:\n raise ValueError(\n f\"Pass@k requires at least 2 samples per group. Got {rewards.numel()} for group {idx}.\"\n )\n topk, topk_idx = torch.topk(rewards, 2)\n r_max, r_second_max = topk[0], topk[1]\n i_max = id2indices[idx][topk_idx[0].item()]\n advantage = r_max - r_second_max\n if norm_adv_by_std_in_grpo:\n std = torch.std(rewards)\n advantage = advantage / (std + epsilon)\n advantages[i_max] = advantage\n\n advantages = advantages.unsqueeze(-1) * response_mask\n return advantages, advantages\n\n\n@register_adv_est(\n AdvantageEstimator.REINFORCE_PLUS_PLUS_BASELINE\n) # or simply: @register_adv_est(\"reinforce_plus_plus_baseline\")\ndef compute_reinforce_plus_plus_baseline_outcome_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: torch.Tensor,\n epsilon: float = 1e-6,\n config: Optional[AlgoConfig] = None,\n **kwargs,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantage for RF++-baseline (https://arxiv.org/abs/2501.03262), operating only on Outcome reward\n (with only one scalar reward for each response).\n\n Args:\n token_level_rewards: `(torch.Tensor)`\n shape: (bs, response_length)\n response_mask: `(torch.Tensor)`\n shape: (bs, response_length)\n config: (AlgoConfig) algorithm config\n\n Returns:\n advantages: `(torch.Tensor)`\n shape: (bs, response_length)\n Returns: `(torch.Tensor)`\n shape: (bs, response_length)\n \"\"\"\n response_length = token_level_rewards.shape[-1]\n scores = token_level_rewards.sum(dim=-1)\n\n id2score = defaultdict(list)\n id2mean = {}\n\n with torch.no_grad():\n bsz = scores.shape[0]\n for i in range(bsz):\n id2score[index[i]].append(scores[i])\n for idx in id2score:\n if len(id2score[idx]) == 1:\n id2mean[idx] = torch.tensor(0.0)\n elif len(id2score[idx]) > 1:\n id2mean[idx] = torch.mean(torch.stack(id2score[idx]))\n else:\n raise ValueError(f\"no score in prompt index: {idx}\")\n for i in range(bsz):\n scores[i] = scores[i] - id2mean[index[i]]\n\n scores = scores.unsqueeze(-1).tile([1, response_length]) * response_mask\n scores = verl_F.masked_whiten(scores, response_mask) * response_mask\n\n return scores, scores\n\n\n@register_adv_est(AdvantageEstimator.RLOO) # or simply: @register_adv_est(\"rloo\")\ndef compute_rloo_outcome_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: np.ndarray,\n epsilon: float = 1e-6,\n config: Optional[AlgoConfig] = None,\n **kwargs,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantage for RLOO based on https://arxiv.org/abs/2402.14740\n\n Args:\n token_level_rewards: `(torch.Tensor)`\n shape: (bs, response_length)\n response_mask: `(torch.Tensor)`\n shape: (bs, response_length)\n config: (AlgoConfig) algorithm config\n\n Returns:\n advantages: `(torch.Tensor)`\n shape: (bs, response_length)\n Returns: `(torch.Tensor)`\n shape: (bs, response_length)\n \"\"\"\n scores = token_level_rewards.sum(dim=-1)\n\n id2score = defaultdict(list)\n id2mean = {}\n\n with torch.no_grad():\n bsz = scores.shape[0]\n for i in range(bsz):\n id2score[index[i]].append(scores[i])\n for idx in id2score:\n if len(id2score[idx]) == 1:\n id2mean[idx] = torch.tensor(0.0)\n elif len(id2score[idx]) > 1:\n id2mean[idx] = torch.mean(torch.stack(id2score[idx]))\n else:\n raise ValueError(f\"no score in prompt index: {idx}\")\n for i in range(bsz):\n response_num = len(id2score[index[i]])\n if response_num > 1:\n scores[i] = scores[i] * response_num / (response_num - 1) - id2mean[index[i]] * response_num / (\n response_num - 1\n )\n scores = scores.unsqueeze(-1) * response_mask\n\n return scores, scores\n\n\n@register_adv_est(AdvantageEstimator.OPO) # or simply: @register_adv_est(\"opo\")\ndef compute_opo_outcome_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: np.ndarray,\n epsilon: float = 1e-6,\n config: Optional[AlgoConfig] = None,\n **kwargs,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantage for OPO based on https://arxiv.org/pdf/2505.23585\n\n Args:\n token_level_rewards: `(torch.Tensor)`\n shape: (bs, response_length)\n response_mask: `(torch.Tensor)`\n shape: (bs, response_length)\n config: (AlgoConfig) algorithm config\n\n Returns:\n advantages: `(torch.Tensor)`\n shape: (bs, response_length)\n Returns: `(torch.Tensor)`\n shape: (bs, response_length)\n \"\"\"\n response_length = response_mask.sum(dim=-1)\n scores = token_level_rewards.sum(dim=-1)\n\n id2score = defaultdict(list)\n id2len = defaultdict(list)\n id2bsl = {}\n\n with torch.no_grad():\n bsz = scores.shape[0]\n for i in range(bsz):\n id2score[index[i]].append(scores[i])\n id2len[index[i]].append(response_length[i])\n\n for idx in id2score:\n if len(id2score[idx]) == 1:\n id2bsl[idx] = torch.tensor(0.0)\n elif len(id2score[idx]) > 1:\n score_tensor = torch.stack(id2score[idx])\n len_tensor = torch.stack(id2len[idx])\n id2bsl[idx] = (len_tensor * score_tensor).sum() / len_tensor.sum()\n else:\n raise ValueError(f\"no score in prompt index: {idx}\")\n for i in range(bsz):\n scores[i] = scores[i] - id2bsl[index[i]]\n scores = scores.unsqueeze(-1) * response_mask\n\n return scores, scores\n\n\n@register_adv_est(AdvantageEstimator.REINFORCE_PLUS_PLUS) # or simply: @register_adv_est(\"reinforce_plus_plus\")\ndef compute_reinforce_plus_plus_outcome_advantage(\n token_level_rewards: torch.Tensor, response_mask: torch.Tensor, config: Optional[AlgoConfig] = None, **kwargs\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantage for REINFORCE++.\n This implementation is based on the paper: https://arxiv.org/abs/2501.03262\n\n Args:\n token_level_rewards: `(torch.Tensor)`\n shape: (bs, response_length)\n response_mask: `(torch.Tensor)`\n shape: (bs, response_length)\n config: (AlgoConfig) algorithm config\n\n Returns:\n advantages: `(torch.Tensor)`\n shape: (bs, response_length)\n Returns: `(torch.Tensor)`\n shape: (bs, response_length)\n \"\"\"\n assert config is not None\n gamma = config.gamma\n with torch.no_grad():\n returns = torch.zeros_like(token_level_rewards)\n running_return = 0\n\n for t in reversed(range(token_level_rewards.shape[1])):\n running_return = token_level_rewards[:, t] + gamma * running_return\n returns[:, t] = running_return\n # Reset after EOS\n running_return = running_return * response_mask[:, t]\n\n advantages = verl_F.masked_whiten(returns, response_mask)\n advantages = advantages * response_mask\n\n return advantages, returns\n\n\n@register_adv_est(AdvantageEstimator.REMAX) # or simply: @register_adv_est(\"remax\")\ndef compute_remax_outcome_advantage(\n token_level_rewards: torch.Tensor,\n reward_baselines: torch.Tensor,\n response_mask: torch.Tensor,\n config: Optional[AlgoConfig] = None,\n **kwargs,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantage for ReMax, operating only on Outcome reward\n This implementation is based on the paper: https://arxiv.org/abs/2310.10505\n (with only one scalar reward for each response).\n\n Args:\n token_level_rewards: `(torch.Tensor)`\n shape: (bs, response_length)\n reward_baselines: `(torch.Tensor)`\n shape: (bs,)\n response_mask: `(torch.Tensor)`\n shape: (bs, response_length)\n config: (AlgoConfig) algorithm config\n\n Returns:\n advantages: `(torch.Tensor)`\n shape: (bs, response_length)\n Returns: `(torch.Tensor)`\n shape: (bs, response_length)\n \"\"\"\n\n with torch.no_grad():\n returns = (token_level_rewards * response_mask).flip(dims=[-1]).cumsum(dim=-1).flip(dims=[-1])\n advantages = returns - reward_baselines.unsqueeze(-1) * response_mask\n\n return advantages, returns\n\n\n@register_adv_est(AdvantageEstimator.GPG) # or simply: @register_adv_est(\"gpg\")\ndef compute_gpg_outcome_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: np.ndarray,\n epsilon: float = 1e-6,\n f_norm: float = 1.0,\n alpha: float = 1.0,\n config=None,\n **kwargs,\n):\n \"\"\"\n Compute advantage for GPG, operating only on Outcome reward\n (with only one scalar reward for each response).\n Args:\n token_level_rewards: `(torch.Tensor)`\n shape: (bs, response_length)\n response_mask: `(torch.Tensor)`\n shape: (bs, response_length)\n index: `(np.ndarray)`\n shape: (bs,)\n epsilon: (float)\n f_norm: (float)\n alpha: (float)\n config: (dict) algorithm config\n\n Returns:\n advantages: `(torch.Tensor)`\n shape: (bs, response_length)\n Returns: `(torch.Tensor)`\n shape: (bs, response_length)\n \"\"\"\n scores = token_level_rewards.sum(dim=-1)\n\n id2score = defaultdict(list)\n id2mean = {}\n id2std = {}\n\n with torch.no_grad():\n bsz = scores.shape[0]\n m = torch.count_nonzero(scores)\n alpha = bsz / m.clamp(min=1)\n\n for i in range(bsz):\n id2score[index[i]].append(scores[i])\n\n for idx in id2score:\n if len(id2score[idx]) == 1:\n id2mean[idx] = torch.tensor(0.0)\n id2std[idx] = torch.tensor(1.0)\n elif len(id2score[idx]) > 1:\n scores_tensor = torch.stack(id2score[idx])\n id2mean[idx] = torch.mean(scores_tensor)\n id2std[idx] = torch.std(scores_tensor)\n else:\n raise ValueError(f\"no score in prompt index: {idx}\")\n for i in range(bsz):\n scores[i] = alpha * (scores[i] - id2mean[index[i]]) / (f_norm)\n scores = scores.unsqueeze(-1) * response_mask\n\n return scores, scores\n\n\n@register_adv_est(AdvantageEstimator.RLOO_VECTORIZED) # or simply: @register_adv_est(\"rloo_vectorized\")\ndef compute_rloo_vectorized_outcome_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: np.ndarray,\n epsilon: float = 1e-6,\n config: Optional[AlgoConfig] = None,\n **kwargs,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantage for RLOO based on https://arxiv.org/abs/2402.14740\n\n Args:\n token_level_rewards: `(torch.Tensor)`\n shape: (bs, response_length)\n response_mask: `(torch.Tensor)`\n shape: (bs, response_length)\n config: (AlgoConfig) algorithm config\n\n Returns:\n advantages: `(torch.Tensor)`\n shape: (bs, response_length)\n Returns: `(torch.Tensor)`\n shape: (bs, response_length)\n \"\"\"\n scores = token_level_rewards.sum(dim=-1)\n\n with torch.no_grad():\n inv = torch.from_numpy(np.unique(index, return_inverse=True)[1]).to(scores.device)\n\n c = torch.bincount(inv)[inv].to(scores.dtype)\n adv = ((c * scores - torch.bincount(inv, weights=scores)[inv]) / (c - 1).clamp_min(1)) * (c > 1)\n\n adv = adv.unsqueeze(-1) * response_mask\n\n return adv, adv\n\n\n@register_adv_est(AdvantageEstimator.OPTIMAL_TOKEN_BASELINE)\ndef compute_optimal_token_baseline_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: np.ndarray,\n old_log_probs: torch.Tensor,\n sum_pi_squared: torch.Tensor,\n rollout_is_weights: torch.Tensor = None,\n handle_zero_tail: bool = False,\n epsilon: float = 1e-8,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantages using Optimal Token Baseline (OTB).\n\n Unlike the group mean based baseline which uses a single baseline per trajectory,\n this computes a unique baseline for each timestep using cumulative path variance.\n\n Theory:\n For each timestep t in each prompt group:\n B_t* = E[G_t × W_t] / E[W_t]\n where W_t = Σ_{j=1}^t ||s_j||² (cumulative path-variance proxy)\n and ||s_j||² = 1 - 2π_j + Σπ²\n\n The cumulative sum W_t captures the \"realized energy\" of trajectory has been up to timestep t,\n giving higher weight to predicting rewards on high-variance paths.\n\n Args:\n token_level_rewards: Rewards at each token position [shape: (bs, response_length)]\n response_mask: Binary mask for valid tokens (1) vs padding (0) [shape: (bs, response_length)]\n index: Prompt indices for grouping trajectories from same prompt [shape: (bs,)]\n old_log_probs: Log probabilities from training policy during generation [shape: (bs, response_length)]\n sum_pi_squared: Sum of squared probabilities over vocabulary Σπ² [shape: (bs, response_length)]\n rollout_is_weights: Pre-computed IS weights for W correction [shape: (bs, response_length)],\n None if not using IS\n handle_zero_tail: If True, zero baselines will be set in the portion of the longest trajectory\n that extends beyond the second-longest trajectory in the prompt group.\n Default: False\n epsilon: Small constant for numerical stability (default: 1e-8)\n\n Returns:\n advantages: OTB advantage estimates [shape: (bs, response_length)]\n returns: Cumulative rewards (returns) from each position [shape: (bs, response_length)]\n\n Note on Rollout Importance Sampling:\n When rollout_is_weights is provided, W_t is scaled by ρ̄²(t) to minimize MSE under truncated IS:\n B_t* = Σ[G_t × ρ̄²(t) × W_t] / Σ[ρ̄²(t) × W_t]\n \"\"\"\n with torch.no_grad():\n batch_size, seq_len = token_level_rewards.shape\n device = token_level_rewards.device\n\n # Compute returns (reward-to-go) for each timestep\n returns = (token_level_rewards * response_mask).flip(dims=[-1]).cumsum(dim=-1).flip(dims=[-1])\n\n # Step 1: Compute w_per_timestep = 1 - 2π_t + Σπ²)\n pi_t = torch.exp(old_log_probs)\n w_per_timestep = 1 - 2 * pi_t + sum_pi_squared\n\n # Step 2: Apply rollout importance sampling correction (if enabled)\n if rollout_is_weights is not None:\n # Scale W by ρ̄² to minimize MSE under truncated IS\n w_per_timestep = w_per_timestep * (rollout_is_weights**2)\n\n # Step 3: Compute cumulative path-variance proxy: W_t = Σ_{j=1}^t w_j\n # This measures accumulated variance from the start of the trajectory up to timestep t\n w_cumulative = (w_per_timestep * response_mask).cumsum(dim=-1)\n\n # Group trajectories by prompt\n prompt_groups = defaultdict(list)\n for i in range(batch_size):\n prompt_groups[index[i]].append(i)\n\n # Initialize baselines tensor [batch_size, seq_len]\n baselines = torch.zeros_like(returns)\n\n # Compute per-step baseline for each prompt group\n for _, trajectory_indices in prompt_groups.items():\n N = len(trajectory_indices)\n if N == 1:\n # Single trajectory - no baseline (advantage = return)\n continue\n\n traj_idx = torch.tensor(trajectory_indices, device=device)\n\n # Extract group data [N, seq_len]\n returns_group = returns[traj_idx]\n w_cumulative_group = w_cumulative[traj_idx]\n mask_group = response_mask[traj_idx]\n\n # Compute per-timestep baseline: B_t = Σ[G_t × W_t] / Σ[W_t]\n # where W_t = Σ_{j=1}^t ||s_j||² (cumulative path variance)\n # Shape: [seq_len]\n numerator = (returns_group * w_cumulative_group * mask_group).sum(dim=0) # Sum over trajectories\n denominator = (w_cumulative_group * mask_group).sum(dim=0) + epsilon\n\n baseline_per_step = numerator / denominator # [seq_len]\n\n # Assign to all trajectories in this group\n baselines[traj_idx] = baseline_per_step.unsqueeze(0).expand(N, -1)\n\n if handle_zero_tail:\n # Optionally zero out the portion of the longest trajectory that extends\n # beyond the second-longest trajectory in the prompt group.\n response_lengths = mask_group.sum(dim=-1)\n sorted_lengths, _ = torch.sort(response_lengths)\n max_length = int(sorted_lengths[-1].item())\n second_max_length = int(sorted_lengths[-2].item())\n max_length_idx = (response_lengths == max_length).nonzero(as_tuple=True)[0]\n if max_length_idx.numel() == 1 and max_length > second_max_length:\n max_length_traj_idx = trajectory_indices[int(max_length_idx[0])]\n baselines[max_length_traj_idx, second_max_length:] = 0.0\n\n # Compute advantages: A_t = G_t - B_t\n advantages = (returns - baselines) * response_mask\n\n return advantages, returns\n\n\n@register_adv_est(AdvantageEstimator.TIR_OPTIMAL_TOKEN_BASELINE)\ndef compute_multi_turn_optimal_token_baseline_advantage(\n token_level_rewards: torch.Tensor,\n response_mask: torch.Tensor,\n index: np.ndarray,\n old_log_probs: torch.Tensor,\n sum_pi_squared: torch.Tensor,\n rollout_is_weights: torch.Tensor = None,\n handle_zero_tail: bool = True,\n epsilon: float = 1e-8,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute advantages using Optimal Token Baseline (OTB).\n\n Unlike the group mean based baseline which uses a single baseline per trajectory,\n this computes a unique baseline for each timestep using cumulative path variance.\n\n Theory:\n For each timestep t in each prompt group:\n B_t* = E[G_t × W_t] / E[W_t]\n where W_t = Σ_{j=1}^t ||s_j||² (cumulative path-variance proxy)\n and ||s_j||² = 1 - 2π_j + Σπ²\n\n The cumulative sum W_t captures the \"realized energy\" of trajectory has been up to timestep t,\n giving higher weight to predicting rewards on high-variance paths.\n\n Args:\n token_level_rewards: Rewards at each token position [shape: (bs, response_length)]\n response_mask: Binary mask for valid tokens (1) vs padding (0) [shape: (bs, response_length)]\n index: Prompt indices for grouping trajectories from same prompt [shape: (bs,)]\n old_log_probs: Log probabilities from training policy during generation [shape: (bs, response_length)]\n sum_pi_squared: Sum of squared probabilities over vocabulary Σπ² [shape: (bs, response_length)]\n rollout_is_weights: Pre-computed IS weights for W correction [shape: (bs, response_length)],\n None if not using IS\n handle_zero_tail: If True, zero baselines will be set in the portion of the longest trajectory\n that extends beyond the second-longest trajectory in the prompt group.\n Default: False\n epsilon: Small constant for numerical stability (default: 1e-8)\n\n Returns:\n advantages: OTB advantage estimates [shape: (bs, response_length)]\n returns: Cumulative rewards (returns) from each position [shape: (bs, response_length)]\n\n Note on Rollout Importance Sampling:\n When rollout_is_weights is provided, W_t is scaled by ρ̄²(t) to minimize MSE under truncated IS:\n B_t* = Σ[G_t × ρ̄²(t) × W_t] / Σ[ρ̄²(t) × W_t]\n \"\"\"\n with torch.no_grad():\n # Compute returns (reward-to-go) for each timestep\n token_returns = (token_level_rewards * response_mask).flip(dims=[-1]).cumsum(dim=-1).flip(dims=[-1])\n\n # Step 1: Compute w_per_timestep = 1 - 2π_t + Σπ²)\n pi_t = torch.exp(old_log_probs)\n w_per_timestep = 1 - 2 * pi_t + sum_pi_squared\n\n # Step 2: Apply rollout importance sampling correction (if enabled)\n if rollout_is_weights is not None:\n # Scale W by ρ̄² to minimize MSE under truncated IS\n w_per_timestep = w_per_timestep * (rollout_is_weights**2)\n\n # Step 3: Compute cumulative path-variance proxy: W_t = Σ_{j=1}^t w_j\n # This measures accumulated variance from the start of the trajectory up to timestep t\n w_cumulative = (w_per_timestep * response_mask).cumsum(dim=-1)\n\n # Step 4: Concatenate returns and w_cumulative for each trajectory\n # This allows us to compute baseline per timestep for each trajectory\n response_lengths = response_mask.sum(dim=-1).to(dtype=torch.long) # [shape: (bs * n, )]\n max_response_length = int(response_lengths.max().item()) if response_lengths.numel() > 0 else 0\n all_w_values = w_cumulative.new_zeros(\n (len(response_lengths), max_response_length)\n ) # [shape: (bs * n, max_response_length)]\n all_returns = torch.zeros_like(all_w_values)\n for i in range(len(response_lengths)):\n length = int(response_lengths[i].item())\n if length == 0:\n continue\n mask = response_mask[i].bool()\n all_w_values[i, :length] = w_cumulative[i, mask]\n all_returns[i, :length] = token_returns[i, mask]\n\n # Group trajectories by prompt\n prompt_groups = defaultdict(list)\n for i in range(len(response_lengths)):\n if response_lengths[i] == 0:\n continue\n prompt_groups[index[i]].append(i)\n\n # Compute optimal baseline for each prompt group\n baselines = torch.zeros_like(all_returns)\n\n for _, trajectory_indices in prompt_groups.items():\n N = len(trajectory_indices)\n traj_idx = torch.tensor(trajectory_indices, device=all_returns.device)\n\n if N == 1:\n # Single trajectory - no baseline (keep original reward as advantage)\n baselines[traj_idx[0]] = 0.0\n continue\n\n # Extract group data\n w_group = all_w_values[traj_idx] # [shape: (N, max_response_length)]\n R_group = all_returns[traj_idx] # [shape: (N, max_response_length)]\n # Direct optimal baseline - single value for all in group\n b_star = (R_group * w_group).sum(dim=0) / (w_group.sum(dim=0) + epsilon)\n # Convert to match baselines dtype (epsilon can cause float64 promotion)\n baselines[traj_idx] = b_star.to(baselines.dtype)\n\n if handle_zero_tail:\n # Optionally zero out the portion of the longest trajectory that extends\n # beyond the second-longest trajectory in the prompt group.\n response_lengths_group = response_lengths[traj_idx]\n sorted_lengths, _ = torch.sort(response_lengths_group)\n max_length = int(sorted_lengths[-1].item())\n second_max_length = int(sorted_lengths[-2].item())\n max_length_idx = (response_lengths_group == max_length).nonzero(as_tuple=True)[0]\n if max_length_idx.numel() == 1 and max_length > second_max_length:\n max_length_traj_idx = trajectory_indices[int(max_length_idx[0])]\n baselines[max_length_traj_idx, second_max_length:] = 0.0\n\n # Compute advantages\n all_advantages = all_returns - baselines # [shape: (bs * n, max_response_length)]\n\n advantages = torch.zeros_like(token_returns) # [shape: (bs * n, turn * response_length)]\n for i in range(len(response_lengths)):\n if response_lengths[i] == 0:\n continue\n advantages[i, response_mask[i].bool()] = all_advantages[i, : response_lengths[i]]\n\n advantages = advantages * response_mask # [shape: (bs * n * turn, response_length)]\n\n return advantages, token_returns\n\n\ndef compute_rewards(token_level_scores, old_log_prob, ref_log_prob, kl_ratio):\n \"\"\"Compute token-level rewards with KL penalty.\n\n Args:\n token_level_scores (torch.Tensor): Token-level reward scores.\n old_log_prob (torch.Tensor): Log probabilities from current policy.\n ref_log_prob (torch.Tensor): Log probabilities from reference policy.\n kl_ratio (float): KL penalty coefficient.\n\n Returns:\n torch.Tensor: Token-level rewards with KL penalty applied.\n \"\"\"\n kl = old_log_prob - ref_log_prob\n return token_level_scores - kl * kl_ratio\n\n\ndef agg_loss(\n loss_mat: torch.Tensor,\n loss_mask: torch.Tensor,\n loss_agg_mode: str,\n dp_size: int = 1,\n batch_num_tokens: Optional[int] = None,\n global_batch_size: Optional[int] = None,\n loss_scale_factor: Optional[int] = None,\n):\n \"\"\"\n Aggregate the loss across global batch to ensure the loss is invariant to fsdp/megatron parallelism.\n\n NOTE: The returned loss has different behaviors for different backend:\n - FSDP: the loss is directly used for backward.\n - Megatron: the loss should be scaled by `num_microbatches` and `cp_size` for pp schedule.\n\n Args:\n loss_mat: micro batch loss matrix, (bs, response_length)\n loss_mask: micro batch loss mask, (bs, response_length)\n loss_agg_mode: method to aggregate the loss matrix into a scalar\n dp_size: data parallel size\n batch_num_tokens: number of valid tokens in global batch\n global_batch_size: global batch size\n loss_scale_factor: scale factor for \"seq-mean-token-sum-norm\" mode. If None, uses loss_mask.shape[-1].\n Set this to a constant value to ensure consistent normalization throughout training.\n\n Returns:\n loss: `a scalar torch.Tensor`\n aggregated loss\n \"\"\"\n if loss_agg_mode == \"token-mean\":\n if batch_num_tokens is None:\n batch_num_tokens = loss_mask.sum()\n loss = verl_F.masked_sum(loss_mat, loss_mask) / batch_num_tokens * dp_size\n elif loss_agg_mode == \"seq-mean-token-sum\":\n seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) # token-sum\n seq_mask = (torch.sum(loss_mask, dim=-1) > 0).float() # exclude fully masked sequences\n if global_batch_size is None:\n global_batch_size = seq_mask.sum()\n loss = verl_F.masked_sum(seq_losses, seq_mask) / global_batch_size * dp_size # seq-mean\n elif loss_agg_mode == \"seq-mean-token-mean\":\n seq_mask = torch.sum(loss_mask, dim=-1) # per-sequence token count\n seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) / (seq_mask + 1e-8) # token-mean\n seq_mask = (seq_mask > 0).float() # exclude fully masked sequences\n if global_batch_size is None:\n global_batch_size = seq_mask.sum()\n loss = verl_F.masked_sum(seq_losses, seq_mask) / global_batch_size * dp_size # seq-mean\n elif loss_agg_mode == \"seq-mean-token-sum-norm\":\n seq_losses = torch.sum(loss_mat * loss_mask, dim=-1)\n if loss_scale_factor is None:\n loss_scale_factor = loss_mask.shape[-1]\n loss = torch.sum(seq_losses) / loss_scale_factor\n else:\n raise ValueError(f\"Invalid loss_agg_mode: {loss_agg_mode}\")\n\n return loss\n\n\n@deprecated(\"verl.trainer.ppo.core_algos.compute_policy_loss_vanilla\")\ndef compute_policy_loss(\n old_log_prob,\n log_prob,\n advantages,\n response_mask,\n cliprange=None,\n cliprange_low=None,\n cliprange_high=None,\n clip_ratio_c=3.0,\n loss_agg_mode: str = \"token-mean\",\n):\n \"\"\"\n Compute the clipped policy objective and related metrics for PPO.\n\n Adapted from\n https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1122\n\n Args:\n old_log_prob (torch.Tensor):\n Log-probabilities of actions under the old policy, shape (batch_size, response_length).\n log_prob (torch.Tensor):\n Log-probabilities of actions under the current policy, shape (batch_size, response_length).\n advantages (torch.Tensor):\n Advantage estimates for each action, shape (batch_size, response_length).\n response_mask (torch.Tensor):\n Mask indicating which tokens to include in the loss, shape (batch_size, response_length).\n cliprange (float, optional):\n Clipping parameter ε for standard PPO. See https://arxiv.org/abs/1707.06347.\n Defaults to None (must be provided).\n cliprange_low (float, optional):\n Lower clip range for dual-clip PPO. Defaults to same as `cliprange`.\n cliprange_high (float, optional):\n Upper clip range for dual-clip PPO. Defaults to same as `cliprange`.\n clip_ratio_c (float, optional):\n Lower bound of the ratio for dual-clip PPO. See https://arxiv.org/pdf/1912.09729.\n Defaults to 3.0.\n loss_agg_mode (str, optional):\n Aggregation mode for `agg_loss`. Defaults to \"token-mean\".\n \"\"\"\n assert clip_ratio_c > 1.0, (\n \"The lower bound of the clip_ratio_c for dual-clip PPO should be greater than 1.0,\"\n + f\" but get the value: {clip_ratio_c}.\"\n )\n\n negative_approx_kl = log_prob - old_log_prob\n # Clamp negative_approx_kl for stability\n negative_approx_kl = torch.clamp(negative_approx_kl, min=-20.0, max=20.0)\n ratio = torch.exp(negative_approx_kl)\n ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask)\n\n pg_losses1 = -advantages * ratio\n if cliprange_low is None:\n cliprange_low = cliprange\n if cliprange_high is None:\n cliprange_high = cliprange\n pg_losses2 = -advantages * torch.clamp(\n ratio, 1 - cliprange_low, 1 + cliprange_high\n ) # - clip(ratio, 1-cliprange, 1+cliprange) * A\n clip_pg_losses1 = torch.maximum(\n pg_losses1, pg_losses2\n ) # max(-ratio * A, -clip(ratio, 1-cliprange, 1+cliprange) * A)\n pg_clipfrac = verl_F.masked_mean(torch.gt(pg_losses2, pg_losses1).float(), response_mask)\n\n pg_losses3 = -advantages * clip_ratio_c\n clip_pg_losses2 = torch.min(pg_losses3, clip_pg_losses1)\n pg_clipfrac_lower = verl_F.masked_mean(\n torch.gt(clip_pg_losses1, pg_losses3) * (advantages < 0).float(), response_mask\n )\n\n pg_losses = torch.where(advantages < 0, clip_pg_losses2, clip_pg_losses1)\n pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode)\n\n return pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower\n\n\n@register_policy_loss(\"vanilla\") # type: ignore[arg-type]\ndef compute_policy_loss_vanilla(\n old_log_prob: torch.Tensor,\n log_prob: torch.Tensor,\n advantages: torch.Tensor,\n response_mask: torch.Tensor,\n loss_agg_mode: str = \"token-mean\",\n config: Optional[ActorConfig] = None,\n rollout_is_weights: torch.Tensor | None = None,\n) -> tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"\n Compute the clipped policy objective and related metrics for PPO.\n\n Adapted from\n https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1122\n\n Args:\n old_log_prob (torch.Tensor):\n Log-probabilities of actions under the old policy, shape (batch_size, response_length).\n log_prob (torch.Tensor):\n Log-probabilities of actions under the current policy, shape (batch_size, response_length).\n advantages (torch.Tensor):\n Advantage estimates for each action, shape (batch_size, response_length).\n response_mask (torch.Tensor):\n Mask indicating which tokens to include in the loss, shape (batch_size, response_length).\n loss_agg_mode (str, optional):\n Aggregation mode for `agg_loss`. Defaults to \"token-mean\".\n config: `(verl.trainer.config.ActorConfig)`:\n config for the actor.\n rollout_log_probs: `(torch.Tensor)`:\n log probabilities of actions under the rollout policy, shape (batch_size, response_length).\n \"\"\"\n\n assert config is not None\n assert not isinstance(config, AlgoConfig)\n clip_ratio = config.clip_ratio # Clipping parameter ε for standard PPO. See https://arxiv.org/abs/1707.06347.\n clip_ratio_low = config.clip_ratio_low if config.clip_ratio_low is not None else clip_ratio\n clip_ratio_high = config.clip_ratio_high if config.clip_ratio_high is not None else clip_ratio\n clip_ratio_c = config.get( # Lower bound of the ratio for dual-clip PPO. See https://arxiv.org/pdf/1912.09729.\n \"clip_ratio_c\", 3.0\n )\n\n cliprange = clip_ratio\n cliprange_low = clip_ratio_low\n cliprange_high = clip_ratio_high\n\n assert clip_ratio_c > 1.0, (\n \"The lower bound of the clip_ratio_c for dual-clip PPO should be greater than 1.0,\"\n + f\" but get the value: {clip_ratio_c}.\"\n )\n\n negative_approx_kl = log_prob - old_log_prob\n # Clamp negative_approx_kl for stability\n negative_approx_kl = torch.clamp(negative_approx_kl, min=-20.0, max=20.0)\n ratio = torch.exp(negative_approx_kl)\n ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask)\n\n pg_losses1 = -advantages * ratio\n if cliprange_low is None:\n cliprange_low = cliprange\n if cliprange_high is None:\n cliprange_high = cliprange\n pg_losses2 = -advantages * torch.clamp(\n ratio, 1 - cliprange_low, 1 + cliprange_high\n ) # - clip(ratio, 1-cliprange, 1+cliprange) * A\n clip_pg_losses1 = torch.maximum(\n pg_losses1, pg_losses2\n ) # max(-ratio * A, -clip(ratio, 1-cliprange, 1+cliprange) * A)\n pg_clipfrac = verl_F.masked_mean(torch.gt(pg_losses2, pg_losses1).float(), response_mask)\n\n pg_losses3 = -advantages * clip_ratio_c\n clip_pg_losses2 = torch.min(pg_losses3, clip_pg_losses1)\n pg_clipfrac_lower = verl_F.masked_mean(\n torch.gt(clip_pg_losses1, pg_losses3) * (advantages < 0).float(), response_mask\n )\n\n pg_losses = torch.where(advantages < 0, clip_pg_losses2, clip_pg_losses1)\n\n # Apply rollout correction weights if provided\n if rollout_is_weights is not None:\n pg_losses = pg_losses * rollout_is_weights\n\n pg_loss = agg_loss(\n loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode, **config.global_batch_info\n )\n\n pg_metrics = {\n \"actor/pg_clipfrac\": pg_clipfrac.detach().item(),\n \"actor/ppo_kl\": ppo_kl.detach().item(),\n \"actor/pg_clipfrac_lower\": pg_clipfrac_lower.detach().item(),\n }\n return pg_loss, pg_metrics\n\n\n@register_policy_loss(\"gspo\")\ndef compute_policy_loss_gspo(\n old_log_prob: torch.Tensor,\n log_prob: torch.Tensor,\n advantages: torch.Tensor,\n response_mask: torch.Tensor,\n loss_agg_mode: str = \"seq-mean-token-mean\",\n config: Optional[ActorConfig] = None,\n rollout_is_weights: torch.Tensor | None = None,\n) -> tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"\n Compute the clipped policy objective and related metrics for GSPO.\n\n See https://arxiv.org/pdf/2507.18071 for more details.\n\n Args:\n old_log_prob (torch.Tensor):\n Log-probabilities of actions under the old policy, shape (batch_size, response_length).\n log_prob (torch.Tensor):\n Log-probabilities of actions under the current policy, shape (batch_size, response_length).\n advantages (torch.Tensor):\n Advantage estimates for each action, shape (batch_size, response_length).\n response_mask (torch.Tensor):\n Mask indicating which tokens to include in the loss, shape (batch_size, response_length).\n loss_agg_mode (str, optional):\n Aggregation mode for `agg_loss`. For GSPO, it is recommended to use \"seq-mean-token-mean\".\n \"\"\"\n\n assert config is not None\n assert isinstance(config, ActorConfig)\n clip_ratio_low = config.clip_ratio_low if config.clip_ratio_low is not None else config.clip_ratio\n clip_ratio_high = config.clip_ratio_high if config.clip_ratio_high is not None else config.clip_ratio\n\n negative_approx_kl = log_prob - old_log_prob\n\n # compute sequence-level importance ratio:\n # si(θ) = (π_θ(yi|x)/π_θold(yi|x))^(1/|yi|) =\n # exp [(1/|y_i|) * Σ_t log(π_θ(y_i,t|x,y_i, tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"\n Compute the smoothed policy objective and related metrics for SAPO.\n\n See https://arxiv.org/pdf/2511.20347 for more details.\n\n Args:\n old_log_prob (torch.Tensor):\n Log-probabilities of actions under the old policy, shape (batch_size, response_length).\n log_prob (torch.Tensor):\n Log-probabilities of actions under the current policy, shape (batch_size, response_length).\n advantages (torch.Tensor):\n Advantage estimates for each action, shape (batch_size, response_length).\n response_mask (torch.Tensor):\n Mask indicating which tokens to include in the loss, shape (batch_size, response_length).\n loss_agg_mode (str, optional):\n Aggregation mode for `agg_loss`. For SAPO, it is recommended to use \"seq-mean-token-mean\".\n \"\"\"\n\n assert config is not None\n assert isinstance(config, ActorConfig)\n\n # temperature for positive and negative token updates\n tau_pos = torch.as_tensor(config.tau_pos, dtype=advantages.dtype, device=advantages.device)\n tau_neg = torch.as_tensor(config.tau_neg, dtype=advantages.dtype, device=advantages.device)\n\n def gate_function(x, tau):\n \"\"\"The gating function used in SAPO\"\"\"\n return torch.sigmoid(tau * (x - 1.0)) * (4.0 / tau)\n\n # compute IS at token level:\n # r_{i,t}(θ) = π_θ(y_{i,t}|x, y_{i, 0 else tau_neg\n taus = torch.where(\n condition=advantages > 0,\n input=tau_pos, # if A_{i,t} > 0 we set to tau_pos\n other=tau_neg, # if A_{i,t} <= 0 we set to tau_neg\n )\n\n # compute the gates f_{i,t}(r_{i,t}(θ)) at token level\n gates = gate_function(ratio, taus)\n\n # compute policy gradient loss\n pg_losses = -gates * advantages\n\n # Apply rollout correction weights if provided\n if rollout_is_weights is not None:\n pg_losses = pg_losses * rollout_is_weights\n\n # for SAPO, we need to aggregate the loss at the sequence level (seq-mean-token-mean)\n pg_loss = agg_loss(\n loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=\"seq-mean-token-mean\", **config.global_batch_info\n )\n\n # For compatibility, return zero for both pg_clipfrac and pg_clipfrac_lower (not used in SAPO)\n pg_clipfrac = torch.tensor(0.0, device=pg_loss.device)\n pg_clipfrac_lower = torch.tensor(0.0, device=pg_loss.device)\n # compute KL for metrics tracking\n ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask)\n # return metrics dict\n pg_metrics = {\n \"actor/pg_clipfrac\": pg_clipfrac.detach().item(),\n \"actor/ppo_kl\": ppo_kl.detach().item(),\n \"actor/pg_clipfrac_lower\": pg_clipfrac_lower.detach().item(),\n }\n\n return pg_loss, pg_metrics\n\n\n@register_policy_loss(\"gpg\")\ndef compute_policy_loss_gpg(\n old_log_prob: torch.Tensor,\n log_prob: torch.Tensor,\n advantages: torch.Tensor,\n response_mask: torch.Tensor,\n loss_agg_mode: str = \"token-mean\",\n config: Optional[ActorConfig] = None,\n rollout_is_weights: torch.Tensor | None = None,\n) -> tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"Adapted from\n https://github.com/AMAP-ML/GPG/blob/main/VisualThinker-R1-Zero/src/open-r1-multimodal/src/open_r1/trainer/grpo_trainer.py#L495\n Args:\n log_prob: `(torch.Tensor)`\n shape: (bs, response_length)\n advantages: `(torch.Tensor)`\n shape: (bs, response_length)\n response_mask: `(torch.Tensor)`\n shape: (bs, response_length)\n return:\n pg_loss: `a scalar torch.Tensor`\n policy gradient loss computed via GPG\n \"\"\"\n assert config is not None\n pg_losses = -log_prob * advantages\n\n # Apply rollout correction weights if provided\n if rollout_is_weights is not None:\n pg_losses = pg_losses * rollout_is_weights\n\n pg_loss = agg_loss(\n loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode, **config.global_batch_info\n )\n return pg_loss, {}\n\n\n@register_policy_loss(\"clip_cov\")\ndef compute_policy_loss_clip_cov(\n old_log_prob: torch.Tensor,\n log_prob: torch.Tensor,\n advantages: torch.Tensor,\n response_mask: torch.Tensor,\n loss_agg_mode: str = \"token-mean\",\n config: Optional[ActorConfig] = None,\n rollout_is_weights: torch.Tensor | None = None,\n) -> tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"\n Compute the clipped policy objective and related metrics for Clip-Cov.\n\n Adapted from\n https://github.com/PRIME-RL/Entropy-Mechanism-of-RL/blob/main/verl/trainer/ppo/core_algos.py\n\n Args:\n old_log_prob (torch.Tensor):\n Log-probabilities of actions under the old policy, shape (batch_size, response_length).\n log_prob (torch.Tensor):\n Log-probabilities of actions under the current policy, shape (batch_size, response_length).\n advantages (torch.Tensor):\n Advantage estimates for each action, shape (batch_size, response_length).\n response_mask (torch.Tensor):\n Mask indicating which tokens to include in the loss, shape (batch_size, response_length).\n cliprange (float, optional):\n Clipping parameter ε for standard PPO. See https://arxiv.org/abs/1707.06347.\n Defaults to None (must be provided).\n cliprange_low (float, optional):\n Lower clip range for dual-clip PPO. Defaults to same as `cliprange`.\n cliprange_high (float, optional):\n Upper clip range for dual-clip PPO. Defaults to same as `cliprange`.\n loss_agg_mode (str, optional):\n Aggregation mode for `agg_loss`. Defaults to \"token-mean\".\n clip_cvo_ratio (float, optional):\n Ratio for clipping the covariance. Defaults to 0.0002.\n clip_cov_lb (float, optional):\n Lower bound for clipping covariance. Defaults to 1.0.\n clip_cov_ub (float, optional):\n Upper bound for clipping covariance. Defaults to 5.0.\n \"\"\"\n assert config is not None\n assert not isinstance(config, AlgoConfig), \"passing AlgoConfig not supported yet\"\n assert config.policy_loss is not None\n\n clip_cov_ratio = config.policy_loss.clip_cov_ratio if config.policy_loss.clip_cov_ratio is not None else 0.0002\n cliprange = config.clip_ratio\n cliprange_low = config.clip_ratio_low if config.clip_ratio_low is not None else cliprange\n cliprange_high = config.clip_ratio_high if config.clip_ratio_high is not None else cliprange\n clip_cov_ub = config.policy_loss.clip_cov_ub if config.policy_loss.clip_cov_ub is not None else 5.0\n clip_cov_lb = config.policy_loss.clip_cov_lb if config.policy_loss.clip_cov_lb is not None else 1.0\n\n assert clip_cov_ratio > 0, \"clip_ratio should be larger than 0.\"\n\n negative_approx_kl = log_prob - old_log_prob\n ratio = torch.exp(negative_approx_kl)\n ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask)\n\n pg_losses1 = -advantages * ratio\n\n if cliprange_low is None:\n cliprange_low = cliprange\n if cliprange_high is None:\n cliprange_high = cliprange\n\n corr = torch.ones_like(advantages)\n pg_losses2 = -advantages * torch.clamp(ratio, 1 - cliprange_low, 1 + cliprange_high)\n clip_by_origin = (pg_losses2 > pg_losses1) & (response_mask > 0)\n\n cov_all = (advantages - verl_F.masked_mean(advantages, response_mask)) * (\n log_prob - verl_F.masked_mean(log_prob.detach(), response_mask)\n )\n cov_all[response_mask == 0] = -torch.inf\n cov_all[clip_by_origin] = -torch.inf\n\n clip_num = max(int(clip_cov_ratio * response_mask.sum().item()), 1)\n top_k_idx = (cov_all < clip_cov_ub) & (cov_all > clip_cov_lb) & (response_mask > 0)\n top_k_idx = torch.nonzero(top_k_idx)\n\n if len(top_k_idx) > 0:\n perm = torch.randperm(len(top_k_idx))\n top_k_idx = top_k_idx[perm[: min(clip_num, len(top_k_idx))]]\n else:\n top_k_idx = torch.empty((0, 2), device=cov_all.device, dtype=torch.long)\n\n corr[top_k_idx[:, 0], top_k_idx[:, 1]] = 0\n\n pg_clipfrac = verl_F.masked_mean((corr == 0).float(), response_mask)\n\n pg_losses = torch.maximum(pg_losses1, pg_losses2) * corr\n\n # Apply rollout correction weights if provided\n if rollout_is_weights is not None:\n pg_losses = pg_losses * rollout_is_weights\n\n pg_loss = agg_loss(\n loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode, **config.global_batch_info\n )\n pg_metrics = {\n \"actor/pg_clipfrac\": pg_clipfrac.detach().item(),\n \"actor/ppo_kl\": ppo_kl.detach().item(),\n }\n return pg_loss, pg_metrics\n\n\n@register_policy_loss(\"kl_cov\")\ndef compute_policy_loss_kl_cov(\n old_log_prob: torch.Tensor,\n log_prob: torch.Tensor,\n advantages: torch.Tensor,\n response_mask: torch.Tensor,\n loss_agg_mode: str = \"token-mean\",\n config: Optional[ActorConfig] = None,\n rollout_is_weights: torch.Tensor | None = None,\n) -> tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"\n Compute the clipped policy objective and related metrics for Clip-Cov.\n\n Adapted from\n https://github.com/PRIME-RL/Entropy-Mechanism-of-RL/blob/main/verl/trainer/ppo/core_algos.py\n\n Args:\n old_log_prob (torch.Tensor):\n Log-probabilities of actions under the old policy, shape (batch_size, response_length).\n log_prob (torch.Tensor):\n Log-probabilities of actions under the current policy, shape (batch_size, response_length).\n advantages (torch.Tensor):\n Advantage estimates for each action, shape (batch_size, response_length).\n response_mask (torch.Tensor):\n Mask indicating which tokens to include in the loss, shape (batch_size, response_length).\n loss_agg_mode (str, optional):\n Aggregation mode for `agg_loss`. Defaults to \"token-mean\".\n kl_cov_ratio (float, optional):\n Ratio for selecting the top-k covariance values. Defaults to 0.0002.\n ppo_kl_coef (float, optional):\n Coefficient for the KL penalty term in the loss. Defaults to 1.\n \"\"\"\n assert config is not None\n assert not isinstance(config, AlgoConfig), \"passing AlgoConfig not supported yet\"\n assert config.policy_loss is not None\n\n kl_cov_ratio = config.policy_loss.kl_cov_ratio if config.policy_loss.kl_cov_ratio is not None else 0.0002\n ppo_kl_coef = config.policy_loss.ppo_kl_coef if config.policy_loss.ppo_kl_coef is not None else 1.0\n\n assert kl_cov_ratio > 0, \"kl_cov_ratio should be larger than 0.\"\n\n negative_approx_kl = log_prob - old_log_prob\n abs_kl = negative_approx_kl.abs()\n ratio = torch.exp(negative_approx_kl)\n ppo_kl_abs = verl_F.masked_mean(negative_approx_kl.abs(), response_mask)\n pg_losses1 = -advantages * ratio\n pg_losses_kl = -advantages * ratio + ppo_kl_coef * abs_kl\n pg_losses = pg_losses1\n\n all_valid = response_mask > 0\n all_valid_idx = torch.nonzero(all_valid.reshape(-1), as_tuple=True)[0]\n all_valid_adv = advantages[all_valid].detach().reshape(-1).cpu()\n all_valid_logp = log_prob[all_valid].detach().reshape(-1).cpu()\n\n k = min(kl_cov_ratio, len(all_valid_adv))\n\n if k != 0:\n cov_lst_all = (all_valid_adv - all_valid_adv.mean()) * (all_valid_logp - all_valid_logp.mean())\n k_percent_nums = max(1, int(len(cov_lst_all) * kl_cov_ratio))\n large_cov_idxs = torch.topk(cov_lst_all, k_percent_nums, largest=True).indices\n\n if len(large_cov_idxs) != 0:\n large_cov_idxs = all_valid_idx[large_cov_idxs]\n pg_losses[large_cov_idxs // advantages.shape[1], large_cov_idxs % advantages.shape[1]] = pg_losses_kl[\n large_cov_idxs // advantages.shape[1], large_cov_idxs % advantages.shape[1]\n ]\n\n # Apply rollout correction weights if provided\n if rollout_is_weights is not None:\n pg_losses = pg_losses * rollout_is_weights\n\n pg_loss = agg_loss(\n loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode, **config.global_batch_info\n )\n pg_metrics = {\n \"actor/ppo_kl\": ppo_kl_abs.detach().item(),\n }\n return pg_loss, pg_metrics\n\n\n@register_policy_loss(\"geo_mean\")\ndef compute_policy_loss_geo_mean(\n old_log_prob: torch.Tensor,\n log_prob: torch.Tensor,\n advantages: torch.Tensor,\n response_mask: torch.Tensor,\n loss_agg_mode: str = \"token-mean\",\n config: Optional[ActorConfig] = None,\n rollout_is_weights: torch.Tensor | None = None,\n) -> tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"\n Compute the clipped policy objective and related metrics for GMPO.\n\n Adapted from paper https://arxiv.org/abs/2507.20673\n https://github.com/callsys/GMPO/blob/main/train_zero_math_gmpo.py\n\n Args:\n old_log_prob (torch.Tensor):\n Log-probabilities of actions under the old policy, shape (batch_size, response_length).\n log_prob (torch.Tensor):\n Log-probabilities of actions under the current policy, shape (batch_size, response_length).\n advantages (torch.Tensor):\n Advantage estimates for each action, shape (batch_size, response_length).\n response_mask (torch.Tensor):\n Mask indicating which tokens to include in the loss, shape (batch_size, response_length).\n loss_agg_mode (str, optional):\n not used\n \"\"\"\n\n assert config is not None\n assert not isinstance(config, AlgoConfig)\n clip_ratio = config.clip_ratio # Clipping parameter. See https://arxiv.org/abs/1707.06347.\n clip_ratio_low = config.clip_ratio_low if config.clip_ratio_low is not None else clip_ratio\n clip_ratio_high = config.clip_ratio_high if config.clip_ratio_high is not None else clip_ratio\n\n cliprange = clip_ratio\n cliprange_low = clip_ratio_low\n cliprange_high = clip_ratio_high\n if cliprange_low is None:\n cliprange_low = cliprange\n if cliprange_high is None:\n cliprange_high = cliprange\n\n negative_approx_kl = log_prob - old_log_prob\n # Clamp negative_approx_kl for stability (uncomment it if you like)\n # negative_approx_kl = torch.clamp(negative_approx_kl, min=-20.0, max=20.0)\n ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask)\n\n # Clipping at token-level & Clipping wider\n sgn_advantage = torch.sign(advantages)\n negative_approx_kl_clamp = torch.clamp(negative_approx_kl, -cliprange_low, cliprange_high)\n negative_approx_kl_min = torch.min(sgn_advantage * negative_approx_kl, sgn_advantage * negative_approx_kl_clamp)\n negative_approx_kl_min = sgn_advantage * negative_approx_kl_min\n\n # Geometric-Mean Policy Optimization\n response_mask_sum = response_mask.sum(dim=-1)\n ratio = torch.exp((negative_approx_kl_min * response_mask).sum(dim=-1) / (response_mask_sum + 1e-8))\n # we only support sequence level advantage for now,\n # otherwise, below would be not consistent with the paper\n advantage = (advantages * response_mask).sum(dim=-1) / (response_mask_sum + 1e-8)\n pg_losses = -advantage * ratio\n\n # Apply rollout correction weights if provided\n # For geo_mean, IS weights are 2D (batch_size, seq_length) and need to be aggregated to sequence level\n if rollout_is_weights is not None:\n # Aggregate token-level weights to sequence level using geometric mean for consistency\n # Note: rollout_is_weights is always 2D regardless of aggregation mode\n seq_is_weights = torch.exp(\n (torch.log(rollout_is_weights + 1e-10) * response_mask).sum(dim=-1) / (response_mask_sum + 1e-8)\n )\n pg_losses = pg_losses * seq_is_weights\n\n pg_loss = torch.mean(pg_losses)\n\n # higher: ratio is too large that need clamp to clip_high (when adv > 0)\n clipped = torch.ne(negative_approx_kl, negative_approx_kl_clamp)\n pg_clipfrac = verl_F.masked_mean((clipped * (advantages > 0)).float(), response_mask)\n pg_clipfrac_lower = verl_F.masked_mean((clipped * (advantages < 0)).float(), response_mask)\n pg_metrics = {\n \"actor/pg_clipfrac\": pg_clipfrac.detach().item(),\n \"actor/ppo_kl\": ppo_kl.detach().item(),\n \"actor/pg_clipfrac_lower\": pg_clipfrac_lower.detach().item(),\n }\n return pg_loss, pg_metrics\n\n\n@register_policy_loss(\"cispo\")\ndef compute_policy_loss_cispo(\n old_log_prob: torch.Tensor,\n log_prob: torch.Tensor,\n advantages: torch.Tensor,\n response_mask: torch.Tensor,\n loss_agg_mode: str = \"token-mean\",\n config: Optional[DictConfig | ActorConfig] = None,\n rollout_is_weights: torch.Tensor | None = None,\n) -> tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"\n Compute the clipped policy objective and related metrics for CISPO.\n\n See https://arxiv.org/pdf/2506.13585 for more details.\n \"\"\"\n\n assert config is not None\n assert isinstance(config, ActorConfig)\n clip_ratio_low = config.clip_ratio_low if config.clip_ratio_low is not None else config.clip_ratio\n clip_ratio_high = config.clip_ratio_high if config.clip_ratio_high is not None else config.clip_ratio\n\n # Compute importance sampling ratio: π_θ / π_θ_old\n negative_approx_kl = log_prob - old_log_prob\n # Clamp for numerical stability\n negative_approx_kl = torch.clamp(negative_approx_kl, min=-20.0, max=20.0)\n ratio = torch.exp(negative_approx_kl)\n ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask)\n\n # CISPO: Clip the importance sampling weights\n # KEY: Apply stop gradient to the clipped ratio\n # This prevents gradients from flowing through the ratio computation and clipping\n # Gradients only flow through log_prob in the final loss term\n clipped_ratio = torch.clamp(ratio, 1 - clip_ratio_low, 1 + clip_ratio_high)\n clipped_ratio_sg = clipped_ratio.detach()\n\n # CISPO objective function (to maximize): J = sg(clip(ratio)) * A * log π_θ\n # Loss function (to minimize): L = -J = -sg(clip(ratio)) * A * log_prob\n pg_losses = -clipped_ratio_sg * advantages * log_prob\n\n # Track clipping statistics\n pg_clipfrac = verl_F.masked_mean((ratio != clipped_ratio).float(), response_mask)\n\n # Apply rollout importance sampling weights if provided\n if rollout_is_weights is not None:\n pg_losses = pg_losses * rollout_is_weights\n\n pg_loss = agg_loss(\n loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode, **config.global_batch_info\n )\n\n # For compatibility, return zero for pg_clipfrac_lower (not used in CISPO)\n pg_clipfrac_lower = torch.tensor(0.0, device=pg_loss.device)\n\n pg_metrics = {\n \"actor/pg_clipfrac\": pg_clipfrac.detach().item(),\n \"actor/ppo_kl\": ppo_kl.detach().item(),\n \"actor/pg_clipfrac_lower\": pg_clipfrac_lower.detach().item(),\n }\n return pg_loss, pg_metrics\n\n\ndef compute_entropy_loss(logits, response_mask, loss_agg_mode: str = \"token-mean\"):\n \"\"\"Compute categorical entropy loss (For backward compatibility)\n\n Args:\n logits (torch.Tensor): shape is (bs, response_length, vocab_size)\n response_mask (torch.Tensor): shape is (bs, response_length)\n\n Returns:\n entropy: a scalar torch.Tensor\n\n \"\"\"\n # compute entropy\n token_entropy = verl_F.entropy_from_logits(logits) # (bs, response_len)\n entropy_loss = agg_loss(loss_mat=token_entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode)\n return entropy_loss\n\n\ndef compute_value_loss(\n vpreds: torch.Tensor,\n returns: torch.Tensor,\n values: torch.Tensor,\n response_mask: torch.Tensor,\n cliprange_value: float,\n loss_agg_mode: str = \"token-mean\",\n):\n \"\"\"\n Compute the clipped value-function loss for PPO.\n\n Copied from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1151\n\n Args:\n vpreds (torch.FloatTensor):\n Predicted values from the value head, shape (batch_size, response_length).\n values (torch.FloatTensor):\n Old (baseline) values from the value head, shape (batch_size, response_length).\n returns (torch.FloatTensor):\n Ground-truth returns, shape (batch_size, response_length).\n response_mask (torch.Tensor):\n Mask indicating which tokens to include in the value loss calculation.\n cliprange_value (float):\n Clip range for value prediction updates.\n loss_agg_mode (str, optional):\n Aggregation mode for `agg_loss`. Defaults to \"token-mean\".\n\n Returns:\n vf_loss (torch.FloatTensor):\n A scalar tensor containing the aggregated value-function loss.\n vf_clipfrac (float):\n Fraction of elements where the clipped loss was used.\n \"\"\"\n vpredclipped = verl_F.clip_by_value(vpreds, values - cliprange_value, values + cliprange_value)\n vf_losses1 = (vpreds - returns) ** 2\n vf_losses2 = (vpredclipped - returns) ** 2\n clipped_vf_losses = torch.max(vf_losses1, vf_losses2)\n vf_loss = 0.5 * agg_loss(loss_mat=clipped_vf_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode)\n vf_clipfrac = verl_F.masked_mean(torch.gt(vf_losses2, vf_losses1).float(), response_mask)\n return vf_loss, vf_clipfrac\n\n\ndef kl_penalty(logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor, kl_penalty) -> torch.FloatTensor:\n \"\"\"Compute KL divergence given logprob and ref_logprob. Optionally using straight through to bind k2 on other\n kl penalty compute method for unbiased KL gradient estimation.\n See more description in http://joschu.net/blog/kl-approx.html\n\n Args:\n logprob:\n ref_logprob:\n\n Returns:\n kl_estimate\n \"\"\"\n forward_score = kl_penalty_forward(logprob, ref_logprob, kl_penalty)\n if not kl_penalty.endswith(\"+\") or kl_penalty in (\"mse\", \"k2\"):\n return forward_score\n\n \"\"\"\n The expectation of k1 and k3 estimator is the expectaed value of KL, but the expected gradient of k1 and k3\n estimator is not the expectaed gradient of KL. On the other hand k2 estimator gives right gradient estimator, \n so we use a straight through trick here if the kl_penalty method ends with '+', .e.g., k3+. \n \"\"\"\n backward_score = 0.5 * (logprob - ref_logprob).square()\n\n return backward_score - backward_score.detach() + forward_score.detach()\n\n\ndef kl_penalty_forward(logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor, kl_penalty) -> torch.FloatTensor:\n \"\"\"Compute KL divergence given logprob and ref_logprob.\n Copied from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1104\n See more description in http://joschu.net/blog/kl-approx.html\n\n Args:\n logprob:\n ref_logprob:\n\n Returns:\n kl_estimate\n \"\"\"\n if kl_penalty in (\"kl\", \"k1\"):\n return logprob - ref_logprob\n\n if kl_penalty == \"abs\":\n return (logprob - ref_logprob).abs()\n\n if kl_penalty in (\"mse\", \"k2\"):\n return 0.5 * (logprob - ref_logprob).square()\n\n # J. Schulman. Approximating kl divergence, 2020.\n # # URL http://joschu.net/blog/kl-approx.html.\n if kl_penalty in (\"low_var_kl\", \"k3\"):\n kl = ref_logprob - logprob\n # For numerical stability\n kl = torch.clamp(kl, min=-20, max=20)\n ratio = torch.exp(kl)\n kld = (ratio - kl - 1).contiguous()\n return torch.clamp(kld, min=-10, max=10)\n\n if kl_penalty == \"full\":\n # so, here logprob and ref_logprob should contain the logits for every token in vocabulary\n raise NotImplementedError\n\n raise NotImplementedError\n\n\ndef compute_pf_ppo_reweight_data(\n data,\n reweight_method: str = \"pow\",\n weight_pow: float = 2.0,\n):\n \"\"\"Reweight the data based on the token_level_scores.\n\n Args:\n data: DataProto object, containing batch, non_tensor_batch and meta_info\n reweight_method: str, choices: \"pow\", \"max_min\", \"max_random\"\n weight_pow: float, the power of the weight\n\n Returns:\n\n \"\"\"\n\n @torch.no_grad()\n def compute_weights(scores: torch.Tensor, reweight_method: str, weight_pow: float) -> torch.Tensor:\n \"\"\"Compute importance weights for resampling based on scores.\n\n Args:\n scores (torch.Tensor): Tensor of scores to compute weights from.\n reweight_method (str): Method for computing weights ('pow', 'max_min', 'max_random').\n weight_pow (float): Power exponent for 'pow' method.\n\n Returns:\n torch.Tensor: Computed importance weights.\n\n Raises:\n ValueError: If reweight_method is not supported.\n \"\"\"\n if reweight_method == \"pow\":\n weights = torch.pow(torch.abs(scores), weight_pow)\n elif reweight_method == \"max_min\":\n max_score = torch.max(scores)\n min_score = torch.min(scores)\n weights = torch.where((scores == max_score) | (scores == min_score), 1.0, 0.0)\n elif reweight_method == \"max_random\":\n max_score = torch.max(scores)\n weights = torch.where(scores == max_score, 0.4, 0.1)\n else:\n raise ValueError(f\"Unsupported reweight_method: {reweight_method}\")\n return weights\n\n scores = data.batch[\"token_level_scores\"].sum(dim=-1)\n weights = compute_weights(scores, reweight_method, weight_pow)\n weights = torch.clamp(weights + 1e-8, min=1e-8)\n\n batch_size = scores.shape[0]\n sample_indices = torch.multinomial(weights, batch_size, replacement=True)\n\n resampled_batch = {key: tensor[sample_indices] for key, tensor in data.batch.items()}\n\n sample_indices_np = sample_indices.numpy()\n resampled_non_tensor_batch = {}\n for key, array in data.non_tensor_batch.items():\n if isinstance(array, np.ndarray):\n resampled_non_tensor_batch[key] = array[sample_indices_np]\n else:\n resampled_non_tensor_batch[key] = [array[i] for i in sample_indices_np]\n\n resampled_meta_info = {}\n for key, value in data.meta_info.items():\n if isinstance(value, list) and len(value) == batch_size:\n resampled_meta_info[key] = [value[i] for i in sample_indices_np]\n else:\n resampled_meta_info[key] = value\n\n from copy import deepcopy\n\n resampled_data = deepcopy(data)\n resampled_data.batch = type(data.batch)(resampled_batch)\n resampled_data.batch.batch_size = data.batch.batch_size\n resampled_data.non_tensor_batch = resampled_non_tensor_batch\n resampled_data.meta_info = resampled_meta_info\n\n return resampled_data\n\n\ndef compute_policy_loss_reinforce(\n rollout_log_prob: torch.Tensor,\n log_prob: torch.Tensor,\n advantages: torch.Tensor,\n response_mask: torch.Tensor,\n loss_agg_mode: str = \"seq-mean-token-sum\",\n config: Optional[ActorConfig] = None,\n rollout_is_weights: Optional[torch.Tensor] = None,\n) -> tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"Compute REINFORCE-style policy gradient loss with optional IS correction.\n\n This function implements policy gradient (REINFORCE) with optional importance\n sampling correction for rollout-training policy mismatch.\n\n Mathematical formulation:\n Without IS (rollout_is_weights=None):\n L = -E[log π(a|s) * A(s,a)]\n Gradient: ∇_θ L = -E[∇log π(a|s) * A] (standard REINFORCE)\n\n With IS (rollout_is_weights provided):\n L = -E_π_rollout[w * log π(a|s) * A(s,a)]\n where w = π_current / π_rollout (truncated IS weight)\n Gradient: ∇_θ L = -E[w * ∇log π(a|s) * A] (IS-corrected policy gradient)\n\n Args:\n rollout_log_prob: Log probabilities from rollout policy (e.g., vLLM BF16).\n Shape: (batch_size, seq_length). Used for KL computation.\n log_prob: Log probabilities from current training policy.\n Shape: (batch_size, seq_length)\n advantages: Advantage estimates for each token.\n Shape: (batch_size, seq_length)\n response_mask: Mask indicating valid tokens (1 for valid, 0 for padding).\n Shape: (batch_size, seq_length). Should already include rejection sampling.\n loss_agg_mode: Loss aggregation strategy (see agg_loss for details).\n config: Actor config (required for global_batch_info).\n rollout_is_weights: Pre-computed IS weights (π_current / π_rollout).\n Shape: (batch_size, seq_length). None to disable IS correction.\n\n Returns:\n Tuple of (loss, metrics):\n loss: Scalar policy gradient loss\n metrics: Dictionary with \"actor/ppo_kl\"\n\n Note:\n Unlike PPO (compute_policy_loss_vanilla), this function:\n - Does NOT use PPO clipping\n - Uses log π(a|s) directly (not ratio)\n - IS weights are applied as multiplicative factor\n \"\"\"\n assert config is not None, \"ActorConfig must be provided for REINFORCE loss\"\n\n # Compute pure policy gradient loss with optional IS correction\n # Standard REINFORCE: L = -E[log π(a|s) * A]\n # With IS: L = -E[w * log π(a|s) * A] where w = π_current / π_rollout\n if rollout_is_weights is not None:\n # IS-corrected policy gradient: L = -E[stopgrad(w) · log π · A]\n pg_losses = -advantages * log_prob * rollout_is_weights\n else:\n # Standard REINFORCE: L = -E[log π · A]\n pg_losses = -advantages * log_prob\n\n # Aggregate loss\n pg_loss = agg_loss(\n loss_mat=pg_losses,\n loss_mask=response_mask,\n loss_agg_mode=loss_agg_mode,\n **config.global_batch_info,\n )\n\n # Compute KL divergence between current and rollout policy\n negative_approx_kl = log_prob - rollout_log_prob\n kl_divergence = verl_F.masked_mean(-negative_approx_kl, response_mask)\n\n pg_metrics = {\n \"actor/ppo_kl\": kl_divergence.detach().item(),\n }\n\n return pg_loss, pg_metrics\n\n\n@register_policy_loss(\"bypass_mode\")\ndef compute_policy_loss_bypass_mode(\n old_log_prob: torch.Tensor,\n log_prob: torch.Tensor,\n advantages: torch.Tensor,\n response_mask: torch.Tensor,\n loss_agg_mode: str = \"token-mean\",\n config: Optional[ActorConfig] = None,\n rollout_is_weights: torch.Tensor | None = None,\n) -> tuple[torch.Tensor, dict[str, Any]]:\n \"\"\"Bypass mode policy loss supporting both REINFORCE and PPO-clip.\n\n This function is the entry point for bypass mode, where old_log_prob = rollout_log_prob.\n It computes IS weights and rejection masks, then dispatches to either REINFORCE or\n PPO-clip loss based on the loss_type configuration.\n\n IMPORTANT - Bypass mode semantics:\n In bypass mode, the trainer sets old_log_prob = rollout_log_prob.\n This means:\n - For REINFORCE: We use IS weights w = π_current / π_rollout explicitly\n - For PPO-clip: The PPO ratio π_current / π_old = π_current / π_rollout\n already incorporates the IS correction through clipping, so we do NOT\n apply additional IS weights (would be double-counting)\n\n Loss types:\n - \"ppo_clip\" (default): PPO clipped objective (compute_policy_loss_vanilla)\n L = -E[min(r*A, clip(r)*A)] where r = π_current / π_rollout\n Note: IS weights are NOT applied (clipping handles the ratio)\n - \"reinforce\": REINFORCE-style policy gradient with IS correction\n L = -E[w * log π(a|s) * A] where w = π_current / π_rollout\n\n Args:\n old_log_prob: In bypass mode, this is actually rollout_log_prob.\n Shape: (batch_size, seq_length)\n log_prob: Current policy log probabilities.\n Shape: (batch_size, seq_length)\n advantages: Advantage estimates.\n Shape: (batch_size, seq_length)\n response_mask: Valid token mask (1=valid, 0=padding).\n Shape: (batch_size, seq_length)\n loss_agg_mode: Loss aggregation mode (passed to underlying loss function).\n config: Actor config containing rollout_correction settings in policy_loss.\n rollout_is_weights: Pre-computed IS weights (ignored, computed internally).\n\n Config options (in config.policy_loss.rollout_correction):\n loss_type: \"ppo_clip\" (default) or \"reinforce\"\n rollout_is: IS aggregation level (\"token\", \"sequence\", or None)\n rollout_is_threshold: Upper threshold for truncating IS weights (default: 2.0)\n rollout_rs: Rejection sampling level (see rollout_corr_helper for supported modes)\n rollout_rs_threshold: Threshold specification for rejection sampling\n rollout_is_batch_normalize: Whether to normalize IS weights to mean=1.0\n\n Returns:\n Tuple of (loss, metrics):\n loss: Scalar policy loss\n metrics: Dictionary with rollout correction metrics and actor/ppo_kl\n \"\"\"\n from verl.trainer.ppo.rollout_corr_helper import compute_rollout_correction_and_rejection_mask\n\n assert config is not None, \"config is required for bypass_mode loss\"\n\n # Extract rollout_correction config from policy_loss\n rollout_corr_config = config.policy_loss.get(\"rollout_correction\", None) if hasattr(config, \"policy_loss\") else None\n\n if rollout_corr_config is None:\n raise ValueError(\n \"rollout_correction config not found in policy_loss. \"\n \"When using loss_mode='bypass_mode', ensure rollout_correction config is passed.\"\n )\n\n # Extract parameters\n loss_type = rollout_corr_config.get(\"loss_type\", \"ppo_clip\")\n rollout_is = rollout_corr_config.get(\"rollout_is\", None)\n rollout_is_threshold = rollout_corr_config.get(\"rollout_is_threshold\", 2.0)\n rollout_is_batch_normalize = rollout_corr_config.get(\"rollout_is_batch_normalize\", False)\n rollout_rs = rollout_corr_config.get(\"rollout_rs\", None)\n rollout_rs_threshold = rollout_corr_config.get(\"rollout_rs_threshold\", None)\n\n # In bypass mode: old_log_prob IS rollout_log_prob\n rollout_log_prob = old_log_prob\n\n # Compute IS weights and rejection mask\n # Note: For PPO-clip, we still compute IS weights for metrics, but don't apply them\n with torch.no_grad():\n rollout_is_weights_proto, modified_response_mask, rollout_metrics = (\n compute_rollout_correction_and_rejection_mask(\n old_log_prob=log_prob, # Current policy (for IS ratio: π_current / π_rollout)\n rollout_log_prob=rollout_log_prob, # Rollout policy\n response_mask=response_mask,\n rollout_is=rollout_is,\n rollout_is_threshold=rollout_is_threshold,\n rollout_is_batch_normalize=rollout_is_batch_normalize,\n rollout_rs=rollout_rs,\n rollout_rs_threshold=rollout_rs_threshold,\n )\n )\n\n # Extract IS weights tensor (or None if disabled)\n computed_is_weights = rollout_is_weights_proto.batch[\"rollout_is_weights\"] if rollout_is_weights_proto else None\n\n # Apply rejection mask (RS + veto)\n effective_mask = modified_response_mask\n\n # Dispatch to appropriate loss function based on loss_type\n if loss_type == \"reinforce\":\n # REINFORCE: Apply IS weights explicitly\n pg_loss, pg_metrics = compute_policy_loss_reinforce(\n rollout_log_prob=rollout_log_prob,\n log_prob=log_prob,\n advantages=advantages,\n response_mask=effective_mask,\n loss_agg_mode=loss_agg_mode,\n config=config,\n rollout_is_weights=computed_is_weights,\n )\n\n elif loss_type == \"ppo_clip\":\n # PPO-clip: The ratio π_current/π_old = π_current/π_rollout already handles IS\n # DO NOT apply IS weights - would be double-counting!\n # The clipping mechanism constrains the effective IS ratio\n pg_loss, pg_metrics = compute_policy_loss_vanilla( # type: ignore[call-arg]\n old_log_prob=rollout_log_prob, # = old_log_prob in bypass mode\n log_prob=log_prob,\n advantages=advantages,\n response_mask=effective_mask,\n loss_agg_mode=loss_agg_mode,\n config=config,\n rollout_is_weights=None, # Explicitly None - no IS weights for PPO-clip\n )\n\n else:\n raise ValueError(f\"Invalid loss_type: {loss_type}. Must be 'reinforce' or 'ppo_clip'.\")\n\n # Merge rollout correction metrics\n pg_metrics.update(rollout_metrics)\n\n return pg_loss, pg_metrics\n"} {"file_name": "verl__trainer__ppo__rollout_corr_helper.py", "text": "# Copyright 2025 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nRollout Correction Helper Module\n\nThis module provides a complete pipeline to address **off-policy issues** in RL training,\nincluding:\n1. Policy mismatch between rollout and training implementations (e.g., vLLM BFloat16 vs FSDP FP32)\n2. Model update staleness (training on trajectories from older checkpoints)\n3. General distribution shifts between data collection and training\n\nIts core capabilities include computing importance sampling (IS) weights,\nfiltering outlier samples via rejection sampling (RS), and\ntracking metrics to diagnose and correct off-policy issues.\n\n## Core Capabilities\n1. **Multi-Granularity Aggregation**:\n - Importance Sampling (IS):\n Token-level\n Sequence-level\n - Rejection Sampling (RS):\n Divergence-based filters (token_k*, seq_sum_k*, seq_mean_k*, seq_max_k*)\n2. **Memory-Efficient Design**:\n - Log-space computations to avoid numerical overflow/underflow.\n - Fixed safety bounds (exp(±20)) for stable exponentiation.\n - Metrics calculated without large intermediate tensors (prevents CUDA OOM).\n3. **Comprehensive Metrics Tracking**:\n - IS/RS statistics (mean/max/min, effective sample size ESS, rejection rate).\n - Off-policy diagnostics (KL divergence, perplexity PPL, log PPL difference, χ² divergence).\n - Sequence-level breakdowns (deviation from ideal weights, outlier fraction).\n\n## Key Interfaces & Usage\n- compute_rollout_correction_and_rejection_mask(): compute IS weights + rejection mask.\n- compute_rollout_correction_weights(): only compute truncated IS weights (for variance\n reduction, no outlier rejection).\n- compute_rollout_rejection_mask(): only filter outliers (for sample cleaning, no IS weight\n computation).\n- compute_offpolicy_metrics(): called by core functions to calculate off-policy diagnostics\n (KL/PPL/χ²) — no direct external calls needed.\n\n### Integration Notes\n- Used in `ray_trainer.py` via `compute_rollout_correction_and_add_to_batch()` (batch training pipeline).\n- Used in `dp_actor.py` for distributed worker computations (distributed training scenarios).\n- All functions support batch inputs and valid token masking (via `response_mask`).\n\n\n## References\n- \"When Speed Kills Stability: Demystifying RL Collapse from the Training-Inference Mismatch\": https://richardli.xyz/rl-collapse\n- Off-policy RL (theoretical basis for IS): https://fengyao.notion.site/off-policy-rl\n\"\"\"\n\nimport math\nfrom typing import Any, Optional\n\nimport torch\n\nimport verl.utils.torch_functional as verl_F\nfrom verl.protocol import DataProto\nfrom verl.trainer.config.algorithm import RolloutCorrectionConfig\nfrom verl.workers.config.actor import PolicyLossConfig\n\n# Safety bound to prevent numerical overflow/underflow when exponentiating\n# exp(20) ≈ 485 million (upper limit for stable weights), exp(-20) ≈ 2e-9 (lower limit)\nSAFETY_BOUND = 20.0\n\nSUPPORTED_ROLLOUT_RS_OPTIONS: set[str] = {\n \"token_k1\",\n \"token_k2\",\n \"token_k3\",\n \"seq_sum_k1\",\n \"seq_sum_k2\",\n \"seq_sum_k3\",\n \"seq_mean_k1\",\n \"seq_mean_k2\",\n \"seq_mean_k3\",\n \"seq_max_k2\",\n \"seq_max_k3\",\n}\nTOKEN_LEVEL_ROLLOUT_RS_OPTIONS: set[str] = {\"token_k1\", \"token_k2\", \"token_k3\"}\n\n\ndef _parse_rollout_rs_thresholds(\n options: list[str], threshold_spec: Optional[str | float]\n) -> dict[str, dict[str, Optional[float]]]:\n if threshold_spec is None:\n raise ValueError(\"rollout_rs_threshold must be provided for rejection sampling.\")\n\n if isinstance(threshold_spec, int | float):\n raw_specs: list[str] = [str(threshold_spec)]\n elif isinstance(threshold_spec, str):\n raw_specs = [part.strip() for part in threshold_spec.split(\",\") if part.strip()]\n else:\n raise TypeError(\"rollout_rs_threshold must be a string or numeric value specifying per-option thresholds.\")\n\n if not raw_specs:\n raise ValueError(\"rollout_rs_threshold must contain at least one threshold value.\")\n\n if len(raw_specs) not in (1, len(options)):\n raise ValueError(\n f\"rollout_rs_threshold expects either one threshold shared by all options or exactly \"\n f\"{len(options)} thresholds to match the provided rollout_rs options.\"\n )\n\n if len(raw_specs) == 1 and len(options) > 1:\n raw_specs = raw_specs * len(options)\n\n thresholds: dict[str, dict[str, Optional[float]]] = {}\n for option, spec in zip(options, raw_specs, strict=False):\n if option.endswith(\"k1\"):\n if \"_\" in spec:\n lower_str, upper_str = spec.split(\"_\", 1)\n else:\n upper_str = spec\n lower_str = str(1.0 / float(upper_str))\n try:\n lower = float(lower_str)\n upper = float(upper_str)\n except ValueError as exc:\n raise ValueError(f\"Invalid numeric threshold '{spec}' for option '{option}'.\") from exc\n if lower <= 0 or upper <= 0:\n raise ValueError(f\"Thresholds for option '{option}' must be positive, got {spec}.\")\n thresholds[option] = {\n \"lower\": lower,\n \"upper\": upper,\n }\n else:\n if \"_\" in spec:\n raise ValueError(\n f\"rollout_rs_threshold for option '{option}' must provide a single upper bound \"\n f\"without '_'. Received '{spec}'.\"\n )\n try:\n upper = float(spec)\n except ValueError as exc:\n raise ValueError(f\"Invalid numeric threshold '{spec}' for option '{option}'.\") from exc\n if upper <= 0:\n raise ValueError(f\"Threshold for option '{option}' must be positive, got {spec}.\")\n thresholds[option] = {\n \"lower\": None,\n \"upper\": upper,\n }\n return thresholds\n\n\ndef compute_rollout_rejection_mask(\n log_ratio: torch.Tensor,\n response_mask: torch.Tensor,\n rollout_rs: str = \"token_k1\",\n rollout_rs_threshold: Optional[str | float] = None,\n) -> tuple[torch.Tensor, dict[str, float]]:\n \"\"\"Compute hard trust region mask using divergence estimators.\n\n This function enforces a hard trust region constraint by masking tokens/sequences\n where the estimated divergence (between training and rollout policies) exceeds\n a threshold. Unlike PPO's soft clipping, this provides a hard boundary.\n\n Multiple rejection criteria can be supplied via a comma separated `rollout_rs` string.\n All requested options must pass for a token/sequence to remain valid.\n\n Supported KL divergence-based modes (ideal = 0.0 unless noted):\n - \"token_k{1,2,3}\": Token-level divergences.\n - \"seq_sum_k{1,2,3}\": Sum of token divergences per sequence.\n - \"seq_mean_k{1,2,3}\": Mean of token divergences per sequence.\n - \"seq_max_k{2,3}\": Maximum token divergence per sequence.\n\n Args:\n log_ratio: Log ratio of training policy probability to rollout policy probability,\n shape (batch_size, seq_length).\n response_mask: Binary mask for valid tokens (1=valid, 0=padding),\n shape (batch_size, seq_length).\n rollout_rs: Comma separated rejection sampling options (e.g. \"token_k1,seq_sum_k3\").\n rollout_rs_threshold: Threshold specification string (required). Provide one entry per\n rollout_rs option separated by commas. Each entry must be a positive number.\n For K1-style options (``*k1``), specify ``lower_upper`` (e.g. ``\"0.1_1.2\"``)\n to denote lower/upper ratio bounds; other options accept a single upper bound.\n\n Returns:\n Tuple containing:\n modified_response_mask: Response mask with trust region violations masked (0=rejected),\n shape (batch_size, seq_length).\n metrics: Dictionary of trust region metrics (all scalars).\n \"\"\"\n if rollout_rs is None or not isinstance(rollout_rs, str):\n raise ValueError(\"rollout_rs must be a non-empty string (comma separated for multiple options).\")\n if rollout_rs_threshold is None:\n raise ValueError(\"rollout_rs_threshold must be provided for rejection sampling.\")\n\n if log_ratio.shape[0] == 0:\n return response_mask, {}\n\n # rollout_rs supports chained criteria via comma separation (e.g. \"token_k1,seq_mean_k3\").\n # Every listed option must pass; combined_mask aggregates them via logical AND.\n option_modes = [opt.strip() for opt in rollout_rs.split(\",\") if opt.strip()]\n if not option_modes:\n raise ValueError(\"rollout_rs must contain at least one valid option.\")\n\n normalized_options: list[str] = []\n seen: set[str] = set()\n for opt in option_modes:\n if opt not in SUPPORTED_ROLLOUT_RS_OPTIONS:\n raise ValueError(\n f\"Invalid rollout_rs option: {opt}. Must be one of {sorted(SUPPORTED_ROLLOUT_RS_OPTIONS)}.\"\n )\n if opt not in seen:\n normalized_options.append(opt)\n seen.add(opt)\n\n threshold_specs = _parse_rollout_rs_thresholds(normalized_options, rollout_rs_threshold)\n\n log_ratio_safe: torch.Tensor = torch.clamp(log_ratio, min=-SAFETY_BOUND, max=SAFETY_BOUND)\n token_k1: torch.Tensor = -log_ratio_safe\n token_k2: torch.Tensor = 0.5 * log_ratio_safe**2\n token_k3: torch.Tensor = torch.exp(log_ratio_safe) - 1.0 - log_ratio_safe\n\n response_mask_bool: torch.Tensor = response_mask.bool()\n seq_valid_mask: torch.Tensor = response_mask.sum(dim=-1) > 0\n # combined_mask accumulates per-option passes; any failure flips tokens to 0.\n combined_mask: torch.Tensor = torch.ones_like(response_mask, dtype=log_ratio.dtype)\n metrics: dict[str, float] = {}\n\n def _sequence_sum(values: torch.Tensor) -> torch.Tensor:\n return verl_F.masked_sum(values, response_mask, axis=-1)\n\n def _sequence_mean(values: torch.Tensor) -> torch.Tensor:\n return verl_F.masked_mean(values, response_mask, axis=-1)\n\n def _sequence_max(values: torch.Tensor) -> torch.Tensor:\n mask_bool = response_mask.bool()\n neg_inf = torch.tensor(float(\"-inf\"), device=values.device, dtype=values.dtype)\n masked_values = values.masked_fill(~mask_bool, neg_inf)\n max_values = masked_values.max(dim=-1).values\n return torch.where(max_values == neg_inf, torch.zeros_like(max_values), max_values)\n\n for option_name in normalized_options:\n thresholds_info = threshold_specs[option_name]\n is_k1_option = option_name.endswith(\"k1\")\n upper_value = thresholds_info[\"upper\"]\n lower_value = thresholds_info[\"lower\"]\n apply_lower_threshold = is_k1_option\n lower_log: Optional[float] = None\n upper_log: Optional[float] = None\n\n if is_k1_option:\n if lower_value is None or upper_value is None:\n raise ValueError(\n f\"rollout_rs_threshold for option '{option_name}' must specify both lower and upper bounds.\"\n )\n lower_log = math.log(lower_value)\n upper_log = math.log(upper_value)\n else:\n if upper_value is None:\n raise ValueError(f\"rollout_rs_threshold for option '{option_name}' must specify an upper bound.\")\n\n level = \"sequence\" if option_name not in TOKEN_LEVEL_ROLLOUT_RS_OPTIONS else \"token\"\n\n per_token_stat: torch.Tensor\n per_sequence_stat: Optional[torch.Tensor] = None\n token_keep_bool: torch.Tensor\n\n if option_name == \"token_k1\":\n if lower_log is None:\n raise ValueError(\"Threshold specification for token_k1 must include lower and upper bounds.\")\n per_token_stat = token_k1\n token_keep_bool = (per_token_stat >= lower_log) & (per_token_stat <= upper_log)\n elif option_name == \"token_k2\":\n per_token_stat = token_k2\n token_keep_bool = per_token_stat <= upper_value\n elif option_name == \"token_k3\":\n per_token_stat = token_k3\n token_keep_bool = per_token_stat <= upper_value\n elif option_name.startswith(\"seq_sum\"):\n if option_name.endswith(\"k1\"):\n if lower_log is None:\n raise ValueError(\n f\"Threshold specification for option '{option_name}' must include lower and upper bounds.\"\n )\n seq_stat = _sequence_sum(token_k1)\n seq_keep_bool_direct = (seq_stat >= lower_log) & (seq_stat <= upper_log)\n elif option_name.endswith(\"k2\"):\n seq_stat = _sequence_sum(token_k2)\n seq_keep_bool_direct = seq_stat <= upper_value\n elif option_name.endswith(\"k3\"):\n seq_stat = _sequence_sum(token_k3)\n seq_keep_bool_direct = seq_stat <= upper_value\n else:\n raise ValueError(f\"Unsupported rollout_rs option: {option_name}.\")\n per_sequence_stat = seq_stat\n token_keep_bool = seq_keep_bool_direct.unsqueeze(-1).expand_as(response_mask_bool)\n per_token_stat = seq_stat.unsqueeze(-1).expand_as(response_mask)\n elif option_name.startswith(\"seq_mean\"):\n if option_name.endswith(\"k1\"):\n if lower_log is None:\n raise ValueError(\n f\"Threshold specification for option '{option_name}' must include lower and upper bounds.\"\n )\n seq_stat = _sequence_mean(token_k1)\n seq_keep_bool_direct = (seq_stat >= lower_log) & (seq_stat <= upper_log)\n elif option_name.endswith(\"k2\"):\n seq_stat = _sequence_mean(token_k2)\n seq_keep_bool_direct = seq_stat <= upper_value\n elif option_name.endswith(\"k3\"):\n seq_stat = _sequence_mean(token_k3)\n seq_keep_bool_direct = seq_stat <= upper_value\n else:\n raise ValueError(f\"Unsupported rollout_rs option: {option_name}.\")\n per_sequence_stat = seq_stat\n token_keep_bool = seq_keep_bool_direct.unsqueeze(-1).expand_as(response_mask_bool)\n per_token_stat = seq_stat.unsqueeze(-1).expand_as(response_mask)\n elif option_name.startswith(\"seq_max\"):\n if option_name.endswith(\"k2\"):\n seq_stat = _sequence_max(token_k2)\n seq_keep_bool_direct = seq_stat <= upper_value\n elif option_name.endswith(\"k3\"):\n seq_stat = _sequence_max(token_k3)\n seq_keep_bool_direct = seq_stat <= upper_value\n else:\n raise ValueError(f\"Unsupported rollout_rs option: {option_name}.\")\n per_sequence_stat = seq_stat\n token_keep_bool = seq_keep_bool_direct.unsqueeze(-1).expand_as(response_mask_bool)\n per_token_stat = seq_stat.unsqueeze(-1).expand_as(response_mask)\n else:\n raise ValueError(f\"Unsupported rollout_rs option: {option_name}.\")\n\n metrics_upper_threshold = upper_log if is_k1_option else upper_value\n metrics_lower_threshold = lower_log if (is_k1_option and lower_log is not None) else 0.0\n\n token_keep_mask = token_keep_bool.to(dtype=log_ratio.dtype)\n combined_mask = combined_mask * token_keep_mask\n seq_keep_bool_tensor = (~((~token_keep_bool) & response_mask_bool)).all(dim=-1)\n\n option_metrics = compute_rs_metrics(\n option_name=option_name,\n rs_statistic=per_token_stat,\n response_mask=response_mask,\n seq_valid_mask=seq_valid_mask,\n level=level,\n per_sequence_values=per_sequence_stat,\n rollout_rs_threshold=metrics_upper_threshold,\n rollout_rs_threshold_lower=metrics_lower_threshold,\n apply_lower_threshold=apply_lower_threshold,\n )\n metrics.update(option_metrics)\n\n token_masked_fraction = verl_F.masked_mean(1 - token_keep_mask, response_mask).item()\n seq_valid_float = seq_valid_mask.float()\n if seq_valid_float.sum() > 0:\n seq_keep_float = seq_keep_bool_tensor.to(dtype=log_ratio.dtype)\n seq_masked_fraction = (((1.0 - seq_keep_float) * seq_valid_float).sum() / seq_valid_float.sum()).item()\n else:\n seq_masked_fraction = 0.0\n metrics[f\"rollout_rs_{option_name}_masked_fraction\"] = token_masked_fraction\n metrics[f\"rollout_rs_{option_name}_seq_masked_fraction\"] = seq_masked_fraction\n\n final_mask = combined_mask\n metrics[\"rollout_rs_masked_fraction\"] = verl_F.masked_mean(1 - final_mask, response_mask).item()\n final_keep_bool = (final_mask > 0.5) & response_mask_bool\n seq_has_masked: torch.Tensor = (~final_keep_bool & response_mask_bool).any(dim=-1)\n metrics[\"rollout_rs_seq_masked_fraction\"] = seq_has_masked.float().mean().item()\n\n modified_response_mask: torch.Tensor = (response_mask * final_mask).to(dtype=response_mask.dtype)\n return modified_response_mask, metrics\n\n\ndef compute_rs_metrics(\n option_name: str,\n rs_statistic: torch.Tensor,\n response_mask: torch.Tensor,\n seq_valid_mask: torch.Tensor,\n *,\n level: str,\n per_sequence_values: Optional[torch.Tensor],\n rollout_rs_threshold: float,\n rollout_rs_threshold_lower: float,\n apply_lower_threshold: bool,\n) -> dict[str, float]:\n \"\"\"Compute metrics for hard trust region enforcement (per-option).\n\n Args:\n option_name: Original option string supplied by the user.\n rs_statistic: Trust region statistic (per token) used for thresholding.\n response_mask: Binary mask for valid tokens (1=valid, 0=padding).\n seq_valid_mask: Boolean mask indicating sequences with at least one valid token.\n level: \"token\" or \"sequence\" describing aggregation level.\n per_sequence_values: Optional per-sequence statistic (same semantics as rs_statistic).\n rollout_rs_threshold: Upper threshold.\n rollout_rs_threshold_lower: Lower threshold (ignored if ``apply_lower_threshold`` is False).\n apply_lower_threshold: Whether to mask/log metrics for values below the lower threshold.\n \"\"\"\n if not response_mask.any():\n raise ValueError(\"response_mask must contain at least one valid token (1).\")\n\n metrics: dict[str, float] = {}\n prefix = f\"rollout_rs_{option_name}\"\n mask_bool: torch.Tensor = response_mask.bool()\n\n # Compute sequence statistics (used by several metrics).\n if per_sequence_values is not None:\n seq_values = per_sequence_values\n else:\n seq_values = verl_F.masked_mean(rs_statistic, response_mask, axis=-1)\n if seq_values.dim() > 1:\n seq_values = seq_values.squeeze(-1)\n seq_values_valid = seq_values[seq_valid_mask]\n\n # Mean of the statistic (always reported).\n metrics[f\"{prefix}_mean\"] = verl_F.masked_mean(rs_statistic, response_mask).item()\n\n # Max/min values.\n if level == \"sequence\" and seq_values_valid.numel() > 0:\n metrics[f\"{prefix}_max\"] = seq_values_valid.max().item()\n metrics[f\"{prefix}_min\"] = seq_values_valid.min().item()\n else:\n metrics[f\"{prefix}_max\"] = rs_statistic.masked_fill(~mask_bool, float(\"-inf\")).max().item()\n metrics[f\"{prefix}_min\"] = rs_statistic.masked_fill(~mask_bool, float(\"inf\")).min().item()\n\n # Fractions above/below the thresholds.\n if level == \"sequence\" and seq_values_valid.numel() > 0:\n fraction_high = (seq_values_valid > rollout_rs_threshold).float().mean().item()\n fraction_low = (\n (seq_values_valid < rollout_rs_threshold_lower).float().mean().item() if apply_lower_threshold else 0.0\n )\n else:\n fraction_high = verl_F.masked_mean((rs_statistic > rollout_rs_threshold).float(), response_mask).item()\n fraction_low = (\n verl_F.masked_mean((rs_statistic < rollout_rs_threshold_lower).float(), response_mask).item()\n if apply_lower_threshold\n else 0.0\n )\n metrics[f\"{prefix}_fraction_high\"] = fraction_high\n metrics[f\"{prefix}_fraction_low\"] = fraction_low\n\n # Standard deviation (clamped for stability).\n mask_count: torch.Tensor = response_mask.sum()\n if mask_count > 1:\n if apply_lower_threshold:\n clamp_min = rollout_rs_threshold_lower\n else:\n clamp_min = 0.0\n stat_for_std: torch.Tensor = rs_statistic.clamp(min=clamp_min, max=rollout_rs_threshold)\n mean_clamped: torch.Tensor = verl_F.masked_mean(stat_for_std, response_mask)\n stat_var: torch.Tensor = verl_F.masked_mean(stat_for_std.square(), response_mask) - mean_clamped.square()\n metrics[f\"{prefix}_std\"] = torch.sqrt(torch.clamp(stat_var, min=0.0)).item()\n else:\n metrics[f\"{prefix}_std\"] = 0.0\n\n # Sequence-level summary metrics.\n if seq_values_valid.numel() > 0:\n metrics[f\"{prefix}_seq_mean\"] = seq_values_valid.mean().item()\n metrics[f\"{prefix}_seq_std\"] = seq_values_valid.std().item() if seq_values_valid.numel() > 1 else 0.0\n metrics[f\"{prefix}_seq_max\"] = seq_values_valid.max().item()\n metrics[f\"{prefix}_seq_min\"] = seq_values_valid.min().item()\n metrics[f\"{prefix}_seq_max_deviation\"] = (seq_values_valid - 0.0).abs().max().item()\n metrics[f\"{prefix}_seq_fraction_high\"] = (seq_values_valid > rollout_rs_threshold).float().mean().item()\n if apply_lower_threshold:\n metrics[f\"{prefix}_seq_fraction_low\"] = (\n (seq_values_valid < rollout_rs_threshold_lower).float().mean().item()\n )\n else:\n metrics[f\"{prefix}_seq_mean\"] = 0.0\n metrics[f\"{prefix}_seq_std\"] = 0.0\n metrics[f\"{prefix}_seq_max\"] = 0.0\n metrics[f\"{prefix}_seq_min\"] = 0.0\n metrics[f\"{prefix}_seq_max_deviation\"] = 0.0\n metrics[f\"{prefix}_seq_fraction_high\"] = 0.0\n metrics[f\"{prefix}_seq_fraction_low\"] = 0.0\n\n return metrics\n\n\ndef compute_rollout_correction_weights(\n log_ratio: torch.Tensor,\n response_mask: torch.Tensor,\n rollout_is: str = \"token\",\n rollout_is_threshold: float = 2.0,\n rollout_is_batch_normalize: bool = False,\n) -> tuple[torch.Tensor, dict[str, float]]:\n \"\"\"Compute importance sampling weights to correct for off-policy distribution shifts.\n\n This function calculates IS weights (π_train / π_rollout) using log ratios for numerical stability.\n It supports multiple aggregation levels and truncates extreme weights to prevent training instability.\n\n Key design:\n - Log-space computations to avoid overflow\n - Truncation of extreme weights (TIS: Truncated Importance Sampling)\n - Optional batch normalization (normalize to mean=1.0)\n - Metrics tracking for weight distribution analysis\n\n Args:\n log_ratio: Log ratio of training policy probability to rollout policy probability,\n shape (batch_size, seq_length).\n response_mask: Binary mask for valid tokens (1=valid, 0=padding),\n shape (batch_size, seq_length).\n rollout_is: IS weight aggregation level, must be one of:\n - \"token\": Per-token weights (biased, low variance)\n - \"sequence\": Per-sequence weight (product of tokens; unbiased, high variance)\n rollout_is_threshold: Upper threshold for truncating extreme weights (e.g., 2.0),\n default 2.0.\n rollout_is_batch_normalize: Whether to normalize IS weights to have mean=1.0 per batch,\n default False.\n\n Returns:\n Tuple containing:\n rollout_is_weights: Truncated IS weights (masked to zero for padding tokens),\n shape (batch_size, seq_length). If batch_normalize=True, normalized to mean=1.0.\n metrics: Dictionary of IS weight metrics (all scalars), including:\n - rollout_is_mean/max/min: Statistic of weights (before batch normalization)\n - rollout_is_eff_sample_size: Effective sample size (ESS)\n - rollout_is_seq_*: Sequence-level weight statistics\n - rollout_is_batch_norm_factor: Normalization factor (only if batch_normalize=True)\n \"\"\"\n # Validate input parameters\n valid_is_levels = {\"token\", \"sequence\"}\n if rollout_is not in valid_is_levels:\n raise ValueError(f\"Invalid rollout_is: {rollout_is}. Must be one of {valid_is_levels}.\")\n if rollout_is_threshold <= 0:\n raise ValueError(f\"rollout_is_threshold must be positive, got {rollout_is_threshold}.\")\n\n # Compute IS weights from log ratio (handles different aggregation levels)\n if rollout_is == \"token\":\n # Per-token IS weight: exp(log(π_train/π_rollout)) with safety clamp\n log_ratio_for_metrics: torch.Tensor = log_ratio\n log_ratio_safe: torch.Tensor = torch.clamp(log_ratio, min=-SAFETY_BOUND, max=SAFETY_BOUND)\n rollout_is_weights: torch.Tensor = torch.exp(log_ratio_safe)\n\n elif rollout_is == \"sequence\":\n # Sequence-level IS weight: product of token ratios (exp(sum(log ratios)))\n log_ratio_sum: torch.Tensor = verl_F.masked_sum(log_ratio, response_mask, axis=-1).unsqueeze(\n -1\n ) # Shape: (batch_size, 1)\n log_ratio_for_metrics = log_ratio_sum\n\n log_ratio_sum_safe: torch.Tensor = torch.clamp(log_ratio_sum, min=-SAFETY_BOUND, max=SAFETY_BOUND)\n rollout_is_weights = torch.exp(log_ratio_sum_safe).expand_as(log_ratio) # Broadcast to sequence length\n\n else:\n raise ValueError(f\"Unsupported rollout_is: {rollout_is}\")\n\n # Zero out weights for padding tokens using response mask\n rollout_is_weights = rollout_is_weights * response_mask\n\n # Compute IS weight metrics (BEFORE truncation to get accurate fraction_high/low)\n metrics: dict[str, float] = compute_is_metrics(\n rollout_is_weights=rollout_is_weights,\n log_ratio_for_metrics=log_ratio_for_metrics,\n response_mask=response_mask,\n rollout_is=rollout_is,\n rollout_is_threshold=rollout_is_threshold,\n )\n\n # Truncate extreme weights (TIS: Truncated Importance Sampling)\n rollout_is_weights = rollout_is_weights.clamp(max=rollout_is_threshold)\n\n # Detach weights to prevent gradient flow (mathematically required by IS theory)\n # IS weights change the measure, not the objective. See §3.2.2 in docs/algo/rollout_corr_math.md\n rollout_is_weights = rollout_is_weights.detach()\n\n # Apply batch normalization if requested\n if rollout_is_batch_normalize:\n # Compute mean based on aggregation level\n mask_float = response_mask.to(dtype=rollout_is_weights.dtype)\n if rollout_is == \"token\":\n # Token-level: normalize over all token weights\n if torch.distributed.is_available() and torch.distributed.is_initialized():\n weights_mean = verl_F.distributed_masked_mean(rollout_is_weights, mask_float)\n else:\n weights_mean = verl_F.masked_mean(rollout_is_weights, response_mask)\n elif rollout_is == \"sequence\":\n # Sequence-level: normalize over sequence weights (one weight per sequence)\n # For each sequence, compute mean over valid tokens (they all have the same weight)\n # then average across sequences\n seq_weights = verl_F.masked_mean(rollout_is_weights, response_mask, axis=-1) # (batch_size,)\n seq_mask = (response_mask.sum(dim=-1) > 0).to(dtype=rollout_is_weights.dtype)\n if torch.distributed.is_available() and torch.distributed.is_initialized():\n weights_mean = verl_F.distributed_masked_mean(seq_weights, seq_mask)\n else:\n weights_mean = (seq_weights * seq_mask).sum() / seq_mask.sum().clamp_min(1e-8)\n else:\n raise ValueError(f\"Unsupported rollout_is: {rollout_is}\")\n\n # Normalize to mean=1.0 (avoid division by zero)\n if weights_mean > 1e-8:\n rollout_is_weights = rollout_is_weights / weights_mean\n metrics[\"rollout_is_batch_norm_factor\"] = weights_mean.item()\n else:\n metrics[\"rollout_is_batch_norm_factor\"] = 1.0\n\n return rollout_is_weights, metrics\n\n\ndef compute_is_metrics(\n rollout_is_weights: torch.Tensor,\n log_ratio_for_metrics: torch.Tensor,\n response_mask: torch.Tensor,\n rollout_is: str,\n rollout_is_threshold: float,\n) -> dict[str, float]:\n \"\"\"Compute comprehensive metrics for truncated importance sampling weights.\n\n This function calculates statistics for truncated IS weights (TIS), using log-space\n for accurate threshold checks and clamped weights for stable mean/std calculations.\n\n Args:\n rollout_is_weights: Truncated IS weights (π_train / π_rollout),\n shape (batch_size, seq_length).\n log_ratio_for_metrics: Log ratio of training to rollout probabilities (unclamped),\n shape varies by aggregation level.\n response_mask: Binary mask for valid tokens (1=valid, 0=padding),\n shape (batch_size, seq_length).\n rollout_is: IS weight aggregation level (matches compute_rollout_correction_weights).\n rollout_is_threshold: Upper threshold for truncated IS weights.\n\n Returns:\n Dictionary of IS weight metrics (all scalars).\n \"\"\"\n if not response_mask.any():\n raise ValueError(\"response_mask must contain at least one valid token (1).\")\n\n metrics: dict[str, float] = {}\n device: torch.device = rollout_is_weights.device\n # Default lower threshold (reciprocal of upper threshold)\n rollout_is_threshold_lower: float = 1.0 / rollout_is_threshold\n\n # Precompute log thresholds for accurate checks\n log_threshold_upper: torch.Tensor = torch.log(torch.tensor(rollout_is_threshold, device=device))\n log_threshold_lower: torch.Tensor = torch.log(torch.tensor(rollout_is_threshold_lower, device=device))\n\n # Compute metrics based on aggregation level\n if rollout_is == \"sequence\":\n # Sequence-level aggregation: use log-space for unclamped stats\n log_max: torch.Tensor = log_ratio_for_metrics.max()\n log_min: torch.Tensor = log_ratio_for_metrics.min()\n metrics[\"rollout_is_max\"] = torch.exp(torch.clamp(log_max, max=SAFETY_BOUND)).item()\n metrics[\"rollout_is_min\"] = torch.exp(log_min).item()\n\n # Mean uses truncated weights to avoid overflow\n metrics[\"rollout_is_mean\"] = verl_F.masked_mean(rollout_is_weights, response_mask).item()\n\n # Fraction of weights exceeding thresholds (log-space for accuracy)\n exceeds_upper: torch.Tensor = log_ratio_for_metrics > log_threshold_upper\n below_lower: torch.Tensor = log_ratio_for_metrics < log_threshold_lower\n metrics[\"rollout_is_ratio_fraction_high\"] = exceeds_upper.float().mean().item()\n metrics[\"rollout_is_ratio_fraction_low\"] = below_lower.float().mean().item()\n\n else: # token-level\n # Token-level aggregation: compute directly from truncated weights\n metrics[\"rollout_is_mean\"] = verl_F.masked_mean(rollout_is_weights, response_mask).item()\n\n # Fraction of tokens exceeding thresholds\n rollout_is_above_threshold: torch.Tensor = rollout_is_weights > rollout_is_threshold\n rollout_is_below_threshold: torch.Tensor = rollout_is_weights < rollout_is_threshold_lower\n metrics[\"rollout_is_ratio_fraction_high\"] = verl_F.masked_mean(\n rollout_is_above_threshold.float(), response_mask\n ).item()\n metrics[\"rollout_is_ratio_fraction_low\"] = verl_F.masked_mean(\n rollout_is_below_threshold.float(), response_mask\n ).item()\n\n # Max/min (mask out padding tokens)\n mask_bool: torch.Tensor = response_mask.bool()\n metrics[\"rollout_is_max\"] = rollout_is_weights.masked_fill(~mask_bool, float(\"-inf\")).max().item()\n metrics[\"rollout_is_min\"] = rollout_is_weights.masked_fill(~mask_bool, float(\"inf\")).min().item()\n\n # Compute standard deviation (using clamped weights for stability)\n mask_count: torch.Tensor = response_mask.sum()\n if mask_count > 1:\n weights_for_std: torch.Tensor = rollout_is_weights.clamp(\n min=rollout_is_threshold_lower, max=rollout_is_threshold\n )\n mean_clamped: torch.Tensor = verl_F.masked_mean(weights_for_std, response_mask)\n rollout_is_var: torch.Tensor = (\n verl_F.masked_mean(weights_for_std.square(), response_mask) - mean_clamped.square()\n )\n metrics[\"rollout_is_std\"] = torch.sqrt(torch.clamp(rollout_is_var, min=0.0)).item()\n else:\n metrics[\"rollout_is_std\"] = 0.0\n\n # Compute Effective Sample Size (ESS) for truncated weights\n weights_for_ess: torch.Tensor = rollout_is_weights.clamp(min=rollout_is_threshold_lower, max=rollout_is_threshold)\n mean_for_ess: torch.Tensor = verl_F.masked_mean(weights_for_ess, response_mask)\n is_weights_normalized: torch.Tensor = weights_for_ess / (mean_for_ess + 1e-8) # Avoid division by zero\n metrics[\"rollout_is_eff_sample_size\"] = (\n 1.0 / verl_F.masked_mean(is_weights_normalized.square(), response_mask).item()\n )\n\n # Add sequence-level metrics if weights have batch dimension\n if rollout_is_weights.dim() > 1:\n seq_mean_weights: torch.Tensor = verl_F.masked_mean(rollout_is_weights, response_mask, axis=-1)\n\n metrics[\"rollout_is_seq_mean\"] = seq_mean_weights.mean().item()\n metrics[\"rollout_is_seq_std\"] = seq_mean_weights.std().item() if seq_mean_weights.numel() > 1 else 0.0\n metrics[\"rollout_is_seq_max\"] = seq_mean_weights.max().item()\n metrics[\"rollout_is_seq_min\"] = seq_mean_weights.min().item()\n\n # Sequence deviation from ideal weight (1.0)\n seq_deviation: torch.Tensor = (seq_mean_weights - 1.0).abs()\n metrics[\"rollout_is_seq_max_deviation\"] = seq_deviation.max().item()\n\n # Fraction of sequences with extreme weights\n metrics[\"rollout_is_seq_fraction_high\"] = (seq_mean_weights > rollout_is_threshold).float().mean().item()\n metrics[\"rollout_is_seq_fraction_low\"] = (seq_mean_weights < rollout_is_threshold_lower).float().mean().item()\n\n return metrics\n\n\ndef compute_rollout_correction_and_rejection_mask(\n old_log_prob: torch.Tensor,\n rollout_log_prob: torch.Tensor,\n response_mask: torch.Tensor,\n rollout_is: Optional[str] = None,\n rollout_is_threshold: Optional[float] = 2.0,\n rollout_is_batch_normalize: bool = False,\n rollout_rs: Optional[str] = None,\n rollout_rs_threshold: Optional[str | float] = None,\n) -> tuple[Optional[DataProto], torch.Tensor, dict[str, float]]:\n \"\"\"Unified interface for computing IS weights and rejection masks.\n\n This function combines IS weight calculation (truncated) and rejection sampling (masked)\n into a single pipeline.\n\n Key design:\n - Separation of IS weights (for variance reduction) and rejection masks (for sample filtering)\n - Comprehensive metrics tracking for mismatch diagnosis\n\n Args:\n old_log_prob: Log probabilities from the training policy (e.g., FSDP FP32),\n shape (batch_size, seq_length).\n rollout_log_prob: Log probabilities from the rollout policy (e.g., vLLM BF16),\n shape (batch_size, seq_length).\n response_mask: Binary mask for valid tokens (1=valid, 0=padding),\n shape (batch_size, seq_length).\n rollout_is: IS weight aggregation level (see compute_rollout_correction_weights for options).\n Set to None to disable IS weight computation.\n rollout_is_threshold: Upper threshold for truncated IS weights (used if rollout_is is set),\n default 2.0.\n rollout_rs: Rejection sampling aggregation modes as a comma separated string\n (see compute_rollout_rejection_mask for the full list). Set to None to disable\n rejection sampling.\n rollout_rs_threshold: Threshold specification string (see compute_rollout_rejection_mask for details).\n Provide one threshold per option (comma separated). For K1-style options, specify\n ``lower_upper`` to denote the lower/upper ratio bounds.\n rollout_is_batch_normalize: Whether to normalize IS weights to have mean=1.0 per batch.\n Default: False.\n\n Returns:\n Tuple containing:\n rollout_is_weights_proto: DataProto with IS weights (None if rollout_is is None),\n key \"rollout_is_weights\", shape (batch_size, seq_length).\n modified_response_mask: Response mask with rejection sampling applied,\n shape (batch_size, seq_length).\n metrics: Dictionary of all metrics (prefixed with \"rollout_corr/\"), including:\n - IS weight statistics\n - Rejection sampling rates\n - Policy mismatch metrics (KL, PPL, etc.)\n \"\"\"\n # Validate input masks\n if not response_mask.any():\n raise ValueError(\"response_mask must contain at least one valid token (1).\")\n if old_log_prob.shape != rollout_log_prob.shape:\n raise ValueError(\n f\"old_log_prob shape {old_log_prob.shape} does not match rollout_log_prob shape {rollout_log_prob.shape}.\"\n )\n if old_log_prob.shape != response_mask.shape:\n raise ValueError(\n f\"log_prob shape {old_log_prob.shape} does not match response_mask shape {response_mask.shape}.\"\n )\n\n # Step 1: Compute log ratio (log(π_train / π_rollout))\n log_ratio: torch.Tensor = old_log_prob - rollout_log_prob\n metrics: dict[str, float] = {}\n\n # Step 2: Compute IS weights (if enabled)\n rollout_is_weights: Optional[torch.Tensor] = None\n if rollout_is is not None and rollout_is_threshold is not None:\n rollout_is_weights, is_metrics = compute_rollout_correction_weights(\n log_ratio=log_ratio,\n response_mask=response_mask,\n rollout_is=rollout_is,\n rollout_is_threshold=rollout_is_threshold,\n rollout_is_batch_normalize=rollout_is_batch_normalize,\n )\n metrics.update(is_metrics)\n\n # Step 3: Compute rejection mask (if enabled)\n modified_response_mask: torch.Tensor = response_mask.clone()\n if rollout_rs is not None:\n if rollout_rs_threshold is None:\n raise ValueError(\n \"rollout_rs_threshold must be explicitly provided when rollout_rs is enabled. \"\n \"Set rollout_rs_threshold to the desired threshold value.\"\n )\n modified_response_mask, rs_metrics = compute_rollout_rejection_mask(\n log_ratio=log_ratio,\n response_mask=response_mask,\n rollout_rs=rollout_rs,\n rollout_rs_threshold=rollout_rs_threshold,\n )\n metrics.update(rs_metrics)\n\n # Step 4: Compute off-policy metrics (KL, PPL, χ², etc.)\n offpolicy_metrics: dict[str, float] = compute_offpolicy_metrics(\n old_log_prob=old_log_prob,\n rollout_log_prob=rollout_log_prob,\n response_mask=response_mask,\n )\n metrics.update(offpolicy_metrics)\n\n # Step 6: Add \"rollout_corr/\" prefix to all metrics for logging consistency\n metrics_scalar: dict[str, float] = {}\n for key, value in metrics.items():\n if isinstance(value, torch.Tensor):\n metrics_scalar[f\"rollout_corr/{key}\"] = value.item()\n else:\n metrics_scalar[f\"rollout_corr/{key}\"] = value\n\n # Step 7: Wrap IS weights in DataProto for consistency with API\n rollout_is_weights_proto: Optional[DataProto] = None\n if rollout_is_weights is not None:\n rollout_is_weights_proto = DataProto.from_dict(tensors={\"rollout_is_weights\": rollout_is_weights})\n\n return rollout_is_weights_proto, modified_response_mask, metrics_scalar\n\n\ndef compute_offpolicy_metrics(\n old_log_prob: torch.Tensor,\n rollout_log_prob: Optional[torch.Tensor],\n response_mask: torch.Tensor,\n) -> dict[str, Any]:\n \"\"\"Compute off-policy diagnostic metrics (helper function).\n\n This helper function operates on raw tensors and is used internally by:\n - compute_rollout_correction_and_rejection_mask() in this module (automatically included)\n - Tests (test_rollout_corr.py, test_rollout_corr_integration.py)\n\n These metrics help diagnose the off-policy gap between rollout and training policies,\n which can arise from:\n - Policy mismatch (e.g., vLLM BF16 vs FSDP FP32)\n - Model staleness (training on trajectories from older checkpoints)\n - General distribution shifts\n\n Key metrics:\n - kl: Direct KL divergence estimator KL(π_rollout || π_training)\n - k3_kl: K3 KL estimator for stability (more stable for small KL)\n - training_ppl: Perplexity of training policy\n - rollout_ppl: Perplexity of rollout policy\n - log_ppl_diff: Difference in log perplexities\n - ppl_ratio: Ratio of training PPL to rollout PPL\n - chi2_token: Token-level χ² divergence E[ρ²] - 1\n - chi2_seq: Sequence-level χ² divergence E[(∏ρ_t)²] - 1\n\n Args:\n old_log_prob: Log probabilities from training policy, shape (batch_size, seq_length)\n rollout_log_prob: Log probabilities from rollout policy, shape (batch_size, seq_length)\n response_mask: Mask for valid tokens, shape (batch_size, seq_length)\n\n Returns:\n Dictionary of off-policy metrics (without prefix)\n \"\"\"\n # Validate that we have at least one valid token\n assert response_mask.any(), \"Expected at least one valid token in response_mask\"\n\n metrics = {}\n\n # 1. Training policy perplexity (always available)\n # Formula: exp(-1/|T| * Σ log π_training(y_t|y_ tuple[DataProto, dict]:\n \"\"\"Compute rollout correction weights and apply rejection sampling.\n\n Computes importance sampling weights to correct for off-policy issues between\n rollout and training policies. Applies rejection sampling by modifying response_mask.\n Always updates response_mask; conditionally adds IS weights.\n\n Key behavior:\n - response_mask: ALWAYS updated with rejection (RS exclusions removed from training)\n - rollout_is_weights: Added to batch ONLY if rollout_is parameter is set\n\n This separation ensures:\n - Rejection works independently of IS weight application\n - Metrics can be monitored before enabling IS weight correction\n\n Args:\n batch: DataProto with old_log_probs, rollout_log_probs, response_mask\n\n Returns:\n Tuple of (updated_batch, metrics):\n updated_batch: Batch with modified response_mask (always) and rollout_is_weights (if enabled)\n metrics: Dict of IS and off-policy metrics, all with \"rollout_corr/\" prefix\n\n Note:\n The implementation is copied from szrlee .\n \"\"\"\n # Get new API parameters directly from config\n rollout_is = rollout_corr_config.get(\"rollout_is\", None)\n rollout_is_threshold = rollout_corr_config.get(\"rollout_is_threshold\", 2.0)\n rollout_is_batch_normalize = rollout_corr_config.get(\"rollout_is_batch_normalize\", False)\n rollout_rs = rollout_corr_config.get(\"rollout_rs\", None)\n rollout_rs_threshold = rollout_corr_config.get(\"rollout_rs_threshold\", None)\n\n # Compute IS weights and get modified response_mask\n rollout_is_weights, modified_response_mask, rollout_corr_metrics = compute_rollout_correction_and_rejection_mask(\n old_log_prob=batch.batch[\"old_log_probs\"],\n rollout_log_prob=batch.batch[\"rollout_log_probs\"],\n response_mask=batch.batch[\"response_mask\"],\n rollout_is=rollout_is,\n rollout_is_threshold=rollout_is_threshold,\n rollout_is_batch_normalize=rollout_is_batch_normalize,\n rollout_rs=rollout_rs,\n rollout_rs_threshold=rollout_rs_threshold,\n )\n\n # ALWAYS update response_mask with rejection applied\n batch.batch[\"response_mask\"] = modified_response_mask\n\n # Add IS weights to batch if computed\n if rollout_is_weights is not None:\n batch = batch.union(rollout_is_weights)\n\n return batch, rollout_corr_metrics\n\n\ndef compute_rollout_corr_metrics_from_logprobs(\n log_prob: torch.Tensor,\n rollout_log_prob: torch.Tensor,\n response_mask: torch.Tensor,\n) -> dict[str, float]:\n \"\"\"Compute rollout correction metrics from log probabilities during training.\n\n This function is used in the actor to compute metrics using the CURRENT policy\n log probabilities versus rollout log probabilities, allowing tracking of the\n off-policy gap as training progresses.\n\n It computes off-policy diagnostic metrics (KL, PPL, χ²) from log probabilities.\n\n Args:\n log_prob: Current policy log probabilities, shape (batch_size, seq_length)\n rollout_log_prob: Rollout policy log probabilities, shape (batch_size, seq_length)\n response_mask: Valid token mask, shape (batch_size, seq_length)\n\n Returns:\n Dictionary of metrics with \"rollout_corr/\" prefix\n \"\"\"\n # Compute off-policy diagnostic metrics\n offpolicy_metrics = compute_offpolicy_metrics(\n old_log_prob=log_prob,\n rollout_log_prob=rollout_log_prob,\n response_mask=response_mask,\n )\n\n # Add rollout_corr/ prefix to all metrics\n metrics_with_prefix = {}\n for key, value in offpolicy_metrics.items():\n if isinstance(value, torch.Tensor):\n metrics_with_prefix[f\"rollout_corr/{key}\"] = value.item()\n else:\n metrics_with_prefix[f\"rollout_corr/{key}\"] = value\n\n return metrics_with_prefix\n\n\ndef apply_bypass_mode(\n batch: DataProto,\n rollout_corr_config: Optional[RolloutCorrectionConfig] = None,\n policy_loss_config: PolicyLossConfig = None,\n) -> None:\n \"\"\"\n Setup bypass mode: Use rollout_log_probs as old_log_probs.\n\n Bypass mode skips expensive actor forward pass for old_log_prob computation\n by setting old_log_probs = rollout_log_probs (2 policies instead of 3).\n\n Uses compute_policy_loss_bypass_mode() which supports:\n - loss_type=\"ppo_clip\" (default): PPO clipped objective (IS handled by ratio)\n - loss_type=\"reinforce\": REINFORCE with explicit IS weights\n\n Both loss types benefit from rejection sampling (RS) which masks out-of-distribution samples.\n\n Note:\n The implementation is copied from szrlee .\n \"\"\"\n from omegaconf import open_dict\n\n if \"rollout_log_probs\" not in batch.batch:\n raise ValueError(\n \"bypass_mode=True requires rollout_log_probs in batch. \"\n \"Ensure rollout worker is configured to calculate_log_probs=true.\"\n )\n\n # Use rollout log probs as old log probs (zero-cost substitution)\n batch.batch[\"old_log_probs\"] = batch.batch[\"rollout_log_probs\"]\n\n with open_dict(policy_loss_config):\n # Pass rollout_correction config to actor for loss computation and metrics\n policy_loss_config[\"rollout_correction\"] = rollout_corr_config\n # Always use bypass_mode loss function which handles both loss_types\n policy_loss_config[\"loss_mode\"] = \"bypass_mode\"\n"} {"file_name": "verl__trainer__ppo__utils.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom enum import Enum\n\nfrom omegaconf import DictConfig\n\nfrom verl.single_controller.base import Worker\nfrom verl.trainer.ppo.core_algos import AdvantageEstimator\n\nWorkerType = type[Worker]\n\n\nclass Role(Enum):\n \"\"\"\n To create more roles dynamically, you can subclass Role and add new members\n \"\"\"\n\n Actor = 0\n Rollout = 1\n ActorRollout = 2\n Critic = 3\n RefPolicy = 4\n RewardModel = 5\n ActorRolloutRef = 6\n Env = 7\n\n def __str__(self):\n return self._get_role_string()\n\n def _get_role_string(self):\n role_mapping = {\n Role.Actor: \"actor\",\n Role.Rollout: \"rollout\",\n Role.ActorRollout: \"actor_rollout\",\n Role.Critic: \"critic\",\n Role.RefPolicy: \"ref\",\n Role.RewardModel: \"rm\",\n Role.ActorRolloutRef: \"actor_rollout_ref\",\n }\n return role_mapping.get(self, self.name.lower())\n\n @classmethod\n def from_string(cls, name: str):\n string_mapping = {\n \"actor\": cls.Actor,\n \"rollout\": cls.Rollout,\n \"actor_rollout\": cls.ActorRollout,\n \"critic\": cls.Critic,\n \"ref\": cls.RefPolicy,\n \"rm\": cls.RewardModel,\n \"actor_rollout_ref\": cls.ActorRolloutRef,\n }\n role = string_mapping.get(name.lower())\n if role is None:\n raise ValueError(f\"No Role found for string: {name}\")\n return role\n\n\ndef need_reference_policy(\n config: DictConfig,\n) -> bool:\n \"\"\"Given the config, do we need ref policy.\"\"\"\n return config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss\n\n\ndef need_reward_model(\n config: DictConfig,\n) -> bool:\n \"\"\"Given the config, do we need reward model.\"\"\"\n return config.reward.reward_model.enable\n\n\ndef need_critic(config: DictConfig) -> bool:\n \"\"\"Given a config, do we need critic.\"\"\"\n if config.critic.enable is not None:\n return bool(config.critic.enable)\n elif config.algorithm.adv_estimator == AdvantageEstimator.GAE:\n return True\n else:\n warnings.warn(\n \"Disabled critic as algorithm.adv_estimator != gae. If it is not intended, please set critic.enable=True\",\n stacklevel=2,\n )\n return False\n"} {"file_name": "verl__trainer__sft_trainer_ray.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nfrom functools import partial\n\nfrom tensordict.tensorclass import NonTensorData\n\nos.environ[\"NCCL_DEBUG\"] = \"WARN\"\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"true\"\n\nimport logging\n\nimport hydra\nimport ray\nimport torch\nimport torch.distributed\nfrom omegaconf import OmegaConf\nfrom torch.utils.data import DistributedSampler\nfrom torchdata.stateful_dataloader import StatefulDataLoader\nfrom tqdm import tqdm\n\nfrom verl.utils import tensordict_utils as tu\nfrom verl.utils.checkpoint import CheckpointHandler, OrchestrationMode\nfrom verl.utils.dataset.dataset_utils import SFTTensorCollator\nfrom verl.utils.dataset.multiturn_sft_dataset import MultiTurnSFTDataset\nfrom verl.utils.device import auto_set_device, get_device_name\nfrom verl.utils.logger import log_with_rank\nfrom verl.utils.tracking import Tracking\nfrom verl.workers.engine_workers import TrainingWorker\n\nlogger = logging.getLogger(__file__)\nlogger.setLevel(os.getenv(\"VERL_SFT_LOGGING_LEVEL\", \"WARN\"))\n\n\nclass SFTTrainer:\n def __init__(\n self,\n config,\n ):\n self.config = config\n\n self._build_config()\n self._build_dataset()\n self._build_dataloader()\n\n self._build_engine()\n self._build_ckpt_handler()\n\n # Initialize resume-related variables\n self.resume_global_step = self.ckpt_handler.load_checkpoint()\n\n self.device_name = self.config.trainer.device\n\n print(self.config)\n\n def _build_ckpt_handler(self):\n resume_mode = getattr(self.config.trainer, \"resume_mode\", \"auto\")\n resume_from_path = getattr(self.config.trainer, \"resume_from_path\", None)\n max_ckpt_to_keep = getattr(self.config.trainer, \"max_ckpt_to_keep\", None)\n default_hdfs_dir = getattr(self.config.trainer, \"default_hdfs_dir\", None)\n\n self.ckpt_handler = CheckpointHandler(\n engine=self.training_client,\n train_dataloader=self.train_dataloader,\n default_local_dir=self.config.trainer.default_local_dir,\n max_ckpt_to_keep=max_ckpt_to_keep,\n default_hdfs_dir=default_hdfs_dir,\n resume_mode=resume_mode,\n resume_from_path=resume_from_path,\n mode=OrchestrationMode.RAY,\n )\n\n def _build_config(self):\n from verl.utils.config import omega_conf_to_dataclass\n\n self.model_config = omega_conf_to_dataclass(self.config.model)\n self.engine_config = omega_conf_to_dataclass(self.config.engine)\n self.optimizer_config = omega_conf_to_dataclass(self.config.optim)\n self.checkpoint_config = omega_conf_to_dataclass(self.config.checkpoint)\n self.profiler_config = omega_conf_to_dataclass(self.config.profiler)\n\n # check profile interval\n self.profiler_interval = self.config.trainer.profile_interval\n self._validate_profiler_interval()\n\n def _validate_profiler_interval(self):\n assert len(self.profiler_interval) == 2\n self.start_profile_step = self.profiler_interval[0]\n self.end_profile_step = self.profiler_interval[1]\n assert self.end_profile_step >= self.start_profile_step\n if self.start_profile_step < 0:\n assert self.end_profile_step < 0\n\n def _build_engine(self):\n from verl.workers.engine_workers import TrainingWorkerConfig\n from verl.workers.utils.losses import sft_loss\n\n self.loss_fn = partial(sft_loss, config=None)\n\n config = TrainingWorkerConfig(\n model_type=\"language_model\",\n model_config=self.model_config,\n engine_config=self.engine_config,\n optimizer_config=self.optimizer_config,\n checkpoint_config=self.checkpoint_config,\n profiler_config=self.profiler_config,\n )\n\n # create resource pool and worker group\n from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup\n\n n_gpus_per_node = self.config.trainer.n_gpus_per_node\n nnodes = self.config.trainer.nnodes\n self.resource_pool = RayResourcePool(process_on_nodes=[n_gpus_per_node] * nnodes)\n ray_cls_with_init = RayClassWithInitArgs(ray.remote(TrainingWorker), config=config)\n self.training_client = RayWorkerGroup(\n resource_pool=self.resource_pool,\n ray_cls_with_init=ray_cls_with_init,\n device_name=self.config.trainer.device,\n )\n self.training_client.set_loss_fn(loss_fn=self.loss_fn)\n self.training_client.reset()\n\n def _build_dataset(self):\n config = self.config\n tokenizer = self.model_config.tokenizer\n processor = self.model_config.processor\n train_dataset = create_sft_dataset(\n config.data.train_files,\n config.data,\n tokenizer,\n processor=processor,\n max_samples=config.data.get(\"train_max_samples\", -1),\n )\n if config.data.val_files:\n val_dataset = create_sft_dataset(\n config.data.val_files,\n config.data,\n tokenizer,\n processor=processor,\n max_samples=config.data.get(\"val_max_samples\", -1),\n )\n else:\n val_dataset = None\n\n self.train_dataset, self.val_dataset = train_dataset, val_dataset\n\n def _build_dataloader(self):\n # build dataset\n config = self.config\n # build dataloader\n # Use data parallel rank and size instead of global rank and world size\n\n # Set pin_memory_device when pin_memory is enabled.\n device_name = get_device_name()\n\n dp_rank = 0\n dp_size = 1\n\n self.train_sampler = DistributedSampler(\n self.train_dataset, shuffle=True, num_replicas=dp_size, rank=dp_rank, drop_last=True\n )\n\n self.global_batch_size = config.data.train_batch_size\n self.train_batch_size_per_dp = self.global_batch_size // dp_size\n self.collate_fn = SFTTensorCollator(config.data.pad_mode)\n\n self.train_dataloader = StatefulDataLoader(\n dataset=self.train_dataset,\n batch_size=self.train_batch_size_per_dp,\n sampler=self.train_sampler,\n collate_fn=self.collate_fn,\n num_workers=8,\n pin_memory=False,\n drop_last=True,\n pin_memory_device=device_name,\n )\n\n if self.val_dataset:\n self.val_sampler = DistributedSampler(\n self.val_dataset, shuffle=False, num_replicas=dp_size, rank=dp_rank, drop_last=True\n )\n self.val_dataloader = StatefulDataLoader(\n dataset=self.val_dataset,\n batch_size=self.train_batch_size_per_dp,\n sampler=self.val_sampler,\n collate_fn=self.collate_fn,\n num_workers=8,\n pin_memory=False,\n drop_last=True,\n pin_memory_device=device_name,\n )\n else:\n self.val_dataloader = None\n\n # update\n if self.config.trainer.total_training_steps is not None:\n self.total_training_steps = self.config.trainer.total_training_steps\n else:\n self.total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs\n self.optimizer_config.total_training_steps = self.total_training_steps\n\n self.steps_per_epoch = len(self.train_dataloader)\n\n # manage save and test frequency\n self.save_freq = self.config.trainer.save_freq\n if self.save_freq == \"after_each_epoch\":\n self.save_freq = self.steps_per_epoch\n\n self.test_freq = self.config.trainer.test_freq\n if self.test_freq == \"after_each_epoch\":\n self.test_freq = self.steps_per_epoch\n\n def _get_batch_seqlens(self, data):\n # mean over dp group\n is_nested = data[\"input_ids\"].is_nested\n if is_nested:\n batch_seqlens: torch.Tensor = data[\"input_ids\"].offsets().diff()\n else:\n batch_seqlens: torch.Tensor = data[\"attention_mask\"].sum(dim=-1)\n return batch_seqlens\n\n def fit(self):\n tracking = Tracking(\n project_name=self.config.trainer.project_name,\n experiment_name=self.config.trainer.experiment_name,\n default_backend=self.config.trainer.logger,\n config=OmegaConf.to_container(self.config, resolve=True),\n )\n\n global_step = self.resume_global_step # Start from resumed step\n last_valid_metric = None\n\n log_with_rank(\n f\"Total training steps: {self.total_training_steps},\",\n logger=logger,\n rank=0,\n log_only_rank_0=True,\n )\n\n # With StatefulDataLoader, we don't need to manually calculate epochs and steps\n # The dataloader will automatically resume from where it left off\n if global_step > 0:\n log_with_rank(\n f\"StatefulDataLoader will automatically resume from global step: {global_step}\",\n logger=logger,\n rank=0,\n log_only_rank_0=True,\n )\n\n # Calculate which epoch we're starting from for sampler.set_epoch()\n start_epoch = global_step // self.steps_per_epoch\n\n meta_info = {\n \"use_remove_padding\": self.config.model.use_remove_padding,\n \"use_dynamic_bsz\": self.config.data.use_dynamic_bsz,\n \"max_token_len_per_gpu\": self.config.data.max_token_len_per_gpu,\n \"micro_batch_size_per_gpu\": self.config.data.micro_batch_size_per_gpu,\n \"temperature\": 1.0,\n \"global_batch_size\": self.global_batch_size,\n \"pad_mode\": self.config.data.pad_mode,\n \"pad_token_id\": self.model_config.tokenizer.pad_token_id,\n }\n\n train_time = 0\n total_tokens = 0\n for epoch in range(start_epoch, self.config.trainer.total_epochs):\n self.train_sampler.set_epoch(epoch=epoch)\n\n for step_in_epoch, data in enumerate(\n tqdm(\n self.train_dataloader,\n initial=global_step % self.steps_per_epoch if epoch == start_epoch else 0,\n total=self.steps_per_epoch,\n desc=f\"Epoch {epoch + 1}/{self.config.trainer.total_epochs}\",\n )\n ):\n global_step += 1\n # construct tensordict\n data = tu.get_tensordict(tensor_dict=data, non_tensor_dict=meta_info)\n batch_seqlens = self._get_batch_seqlens(data=data).tolist()\n # this is necessary. Otherwise, it is interpreted as NonTensorStack\n batch_seqlens_ntd = NonTensorData(batch_seqlens)\n\n tu.assign_non_tensor(data, update_lr_scheduler=True, global_token_num=batch_seqlens_ntd)\n\n # start profile in SPMD mode\n if global_step == self.start_profile_step:\n self.training_client.start_profile()\n # train for on batch\n output = self.training_client.train_batch(data)\n output = output.get()\n\n if global_step == self.end_profile_step:\n self.training_client.stop_profile()\n\n metrics = tu.get(output, \"metrics\")\n\n # TODO: we can actual accumulate metrics for N steps and perform aggregate metrics\n metrics[\"train/loss\"] = metrics.pop(\"loss\")\n metrics[\"train/grad_norm\"] = metrics.pop(\"grad_norm\")\n metrics[\"train/lr\"] = metrics.pop(\"lr\")\n metrics[\"train/mfu\"] = metrics.pop(\"mfu\")\n metrics[\"train/global_tokens\"] = torch.sum(torch.tensor(batch_seqlens, device=self.device_name)).item()\n total_tokens += metrics[\"train/global_tokens\"]\n metrics[\"train/total_tokens(B)\"] = total_tokens / 1e9\n tracking.log(data=metrics, step=global_step)\n\n is_last_step = global_step >= self.total_training_steps\n is_valid_step = global_step % self.test_freq == 0\n is_save_step = global_step % self.save_freq == 0\n\n # early exit or validation step\n if is_last_step and self.val_dataloader is not None or (self.test_freq > 0 and is_valid_step):\n # Perform validation\n val_losses = []\n for val_data in self.val_dataloader:\n val_data = tu.get_tensordict(tensor_dict=val_data, non_tensor_dict=meta_info)\n output = self.training_client.infer_batch(val_data)\n output = output.get()\n metrics = tu.get(output, \"metrics\")\n val_losses.append(metrics[\"loss\"])\n\n val_loss = torch.mean(torch.tensor(val_losses, device=self.device_name))\n\n metric = {\"val/loss\": val_loss.detach().item()}\n tracking.log(data=metric, step=global_step)\n last_valid_metric = metric\n\n if is_last_step or (self.save_freq > 0 and is_save_step):\n self.ckpt_handler.save_checkpoint(step=global_step)\n\n if is_last_step:\n print(f\"Total time for train steps: {train_time:.2f}s\")\n print(f\"Final validation metrics: {last_valid_metric}\")\n return\n\n\ndef run_sft(config):\n ray.init()\n trainer = SFTTrainer(config=config)\n trainer.fit()\n\n\n@hydra.main(config_path=\"config\", config_name=\"sft_trainer_engine\", version_base=None)\ndef main(config):\n # Automatically set `config.trainer.device = npu` when running on Ascend NPU.\n auto_set_device(config)\n run_sft(config)\n\n\ndef create_sft_dataset(data_paths, data_config, tokenizer, processor, max_samples=-1):\n \"\"\"Create a dataset.\"\"\"\n # build dataset\n # First check if a custom dataset class is specified\n if data_config.custom_cls.get(\"path\", None):\n from verl.utils.import_utils import load_extern_type\n\n dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name)\n else:\n # Default to multi-turn dataset\n dataset_cls = MultiTurnSFTDataset\n\n # Create datasets based on the selected class\n dataset = dataset_cls(\n parquet_files=data_paths, tokenizer=tokenizer, config=data_config, processor=processor, max_samples=max_samples\n )\n return dataset\n\n\nif __name__ == \"__main__\":\n main()\n"} {"file_name": "verl__utils__config.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dataclasses import is_dataclass\nfrom typing import Any, Optional\n\nfrom omegaconf import DictConfig, ListConfig, OmegaConf\n\n__all__ = [\"omega_conf_to_dataclass\", \"validate_config\"]\n\n\ndef omega_conf_to_dataclass(config: DictConfig | dict, dataclass_type: Optional[type[Any]] = None) -> Any:\n \"\"\"\n Convert an OmegaConf DictConfig to a dataclass.\n\n Args:\n config: The OmegaConf DictConfig or dict to convert.\n dataclass_type: The dataclass type to convert to. When dataclass_type is None,\n the DictConfig must contain _target_ to be instantiated via hydra.instantiate API.\n\n Returns:\n The dataclass instance.\n \"\"\"\n # Got an empty config\n if not config:\n return dataclass_type if dataclass_type is None else dataclass_type()\n # Got an object\n if not isinstance(config, DictConfig | ListConfig | dict | list):\n return config\n\n if dataclass_type is None:\n assert \"_target_\" in config, (\n \"When dataclass_type is not provided, config must contain _target_. \"\n \"See trainer/config/ppo_trainer.yaml algorithm section for an example. \"\n f\"Got config: {config}\"\n )\n from hydra.utils import instantiate\n\n return instantiate(config, _convert_=\"partial\")\n\n if not is_dataclass(dataclass_type):\n raise ValueError(f\"{dataclass_type} must be a dataclass\")\n cfg = OmegaConf.create(config) # in case it's a dict\n # pop _target_ to avoid hydra instantiate error, as most dataclass do not have _target_\n # Updated (vermouth1992) We add _target_ to BaseConfig so that it is compatible.\n # Otherwise, this code path can't support recursive instantiation.\n # if \"_target_\" in cfg:\n # cfg.pop(\"_target_\")\n cfg_from_dataclass = OmegaConf.structured(dataclass_type)\n # let cfg override the existing vals in `cfg_from_dataclass`\n cfg_merged = OmegaConf.merge(cfg_from_dataclass, cfg)\n # now convert to `dataclass_type`\n config_object = OmegaConf.to_object(cfg_merged)\n return config_object\n\n\ndef update_dict_with_config(dictionary: dict, config: DictConfig):\n for key in dictionary:\n if hasattr(config, key):\n dictionary[key] = getattr(config, key)\n\n\ndef validate_config(\n config: DictConfig,\n use_reference_policy: bool,\n use_critic: bool,\n) -> None:\n \"\"\"Validate an OmegaConf DictConfig.\n\n Args:\n config (DictConfig): The OmegaConf DictConfig to validate.\n use_reference_policy (bool): is ref policy needed\n use_critic (bool): is critic needed\n \"\"\"\n # number of GPUs total\n n_gpus = config.trainer.n_gpus_per_node * config.trainer.nnodes\n\n if not config.actor_rollout_ref.actor.use_dynamic_bsz:\n if config.actor_rollout_ref.actor.strategy == \"megatron\":\n model_parallel_size = (\n config.actor_rollout_ref.actor.megatron.tensor_model_parallel_size\n * config.actor_rollout_ref.actor.megatron.pipeline_model_parallel_size\n )\n assert (\n n_gpus % (model_parallel_size * config.actor_rollout_ref.actor.megatron.context_parallel_size) == 0\n ), (\n f\"n_gpus ({n_gpus}) must be divisible by model_parallel_size ({model_parallel_size}) times \"\n f\"context_parallel_size ({config.actor_rollout_ref.actor.megatron.context_parallel_size})\"\n )\n megatron_dp = n_gpus // (\n model_parallel_size * config.actor_rollout_ref.actor.megatron.context_parallel_size\n )\n minimal_bsz = megatron_dp * config.actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu\n else:\n minimal_bsz = n_gpus\n\n # 1. Check total batch size for data correctness\n real_train_batch_size = config.data.train_batch_size * config.actor_rollout_ref.rollout.n\n assert real_train_batch_size % minimal_bsz == 0, (\n f\"real_train_batch_size ({real_train_batch_size}) must be divisible by minimal possible batch size \"\n f\"({minimal_bsz})\"\n )\n\n # A helper function to check \"micro_batch_size\" vs \"micro_batch_size_per_gpu\"\n # We throw an error if the user sets both. The new convention is \"..._micro_batch_size_per_gpu\".\n def check_mutually_exclusive(mbs, mbs_per_gpu, name: str):\n \"\"\"Validate mutually exclusive micro batch size configuration options.\n\n Ensures that users don't set both deprecated micro_batch_size and\n the new micro_batch_size_per_gpu parameters simultaneously.\n\n Args:\n mbs: Deprecated micro batch size parameter value.\n mbs_per_gpu: New micro batch size per GPU parameter value.\n name (str): Configuration section name for error messages.\n\n Raises:\n ValueError: If both parameters are set or neither is set.\n \"\"\"\n settings = {\n \"actor_rollout_ref.ref\": \"log_prob_micro_batch_size\",\n \"actor_rollout_ref.rollout\": \"log_prob_micro_batch_size\",\n }\n\n if name in settings:\n param = settings[name]\n param_per_gpu = f\"{param}_per_gpu\"\n\n if mbs is None and mbs_per_gpu is None:\n raise ValueError(f\"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'.\")\n\n if mbs is not None and mbs_per_gpu is not None:\n raise ValueError(\n f\"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove \"\n f\"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated).\"\n )\n\n # Actor validation done in ActorConfig.__post_init__ and validate()\n actor_config = omega_conf_to_dataclass(config.actor_rollout_ref.actor)\n actor_config.validate(n_gpus, config.data.train_batch_size, config.actor_rollout_ref.model)\n\n if not config.actor_rollout_ref.actor.use_dynamic_bsz:\n if use_reference_policy:\n # reference: log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu\n check_mutually_exclusive(\n config.actor_rollout_ref.ref.log_prob_micro_batch_size,\n config.actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu,\n \"actor_rollout_ref.ref\",\n )\n\n # The rollout section also has log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu\n check_mutually_exclusive(\n config.actor_rollout_ref.rollout.log_prob_micro_batch_size,\n config.actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu,\n \"actor_rollout_ref.rollout\",\n )\n\n if config.algorithm.use_kl_in_reward and config.actor_rollout_ref.actor.use_kl_loss:\n print(\"NOTICE: You have both enabled in-reward kl and kl loss.\")\n\n # critic\n if use_critic:\n critic_config = omega_conf_to_dataclass(config.critic)\n critic_config.validate(n_gpus, config.data.train_batch_size)\n\n if config.data.get(\"val_batch_size\", None) is not None:\n print(\n \"WARNING: val_batch_size is deprecated.\"\n + \" Validation datasets are sent to inference engines as a whole batch,\"\n + \" which will schedule the memory themselves.\"\n )\n\n # check eval config\n if config.actor_rollout_ref.rollout.val_kwargs.do_sample:\n assert config.actor_rollout_ref.rollout.temperature > 0, (\n \"validation gen temperature should be greater than 0 when enabling do_sample\"\n )\n\n # check LoRA rank in vLLM\n lora_config = config.actor_rollout_ref.model.get(\"lora\", {})\n lora_rank = lora_config.get(\"rank\", 0)\n if lora_rank <= 0:\n lora_rank = config.actor_rollout_ref.model.get(\"lora_rank\", 0)\n if lora_config.get(\"merge\", False):\n lora_rank = 0\n if lora_rank > 0 and config.actor_rollout_ref.rollout.name == \"vllm\":\n from verl.workers.rollout.vllm_rollout.utils import get_vllm_max_lora_rank\n\n get_vllm_max_lora_rank(lora_rank)\n\n print(\"[validate_config] All configuration checks passed successfully!\")\n"} {"file_name": "verl__utils__fs.py", "text": "#!/usr/bin/env python\n# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# -*- coding: utf-8 -*-\n\"\"\"File-system agnostic IO APIs\"\"\"\n\nimport hashlib\nimport os\nimport shutil\nimport tempfile\n\ntry:\n from hdfs_io import copy, exists, makedirs # for internal use only\nexcept ImportError:\n from .hdfs_io import copy, exists, makedirs\n\n__all__ = [\"copy\", \"exists\", \"makedirs\"]\n\n_HDFS_PREFIX = \"hdfs://\"\n\n\ndef is_non_local(path):\n \"\"\"Check if a path is a non-local (HDFS) path.\n\n Args:\n path (str): The path to check.\n\n Returns:\n bool: True if the path is an HDFS path, False otherwise.\n \"\"\"\n return path.startswith(_HDFS_PREFIX)\n\n\ndef md5_encode(path: str) -> str:\n \"\"\"Generate an MD5 hash of a path string.\n\n This function is used to create unique identifiers for paths, typically\n for creating cache directories or lock files.\n\n Args:\n path (str): The path to encode.\n\n Returns:\n str: The hexadecimal MD5 hash of the path.\n \"\"\"\n return hashlib.md5(path.encode()).hexdigest()\n\n\ndef get_local_temp_path(hdfs_path: str, cache_dir: str) -> str:\n \"\"\"Generate a unique local cache path for an HDFS resource.\n Creates a MD5-hashed subdirectory in cache_dir to avoid name conflicts,\n then returns path combining this subdirectory with the HDFS basename.\n\n Args:\n hdfs_path (str): Source HDFS path to be cached\n cache_dir (str): Local directory for storing cached files\n\n Returns:\n str: Absolute local filesystem path in format:\n {cache_dir}/{md5(hdfs_path)}/{basename(hdfs_path)}\n \"\"\"\n # make a base64 encoding of hdfs_path to avoid directory conflict\n encoded_hdfs_path = md5_encode(hdfs_path)\n temp_dir = os.path.join(cache_dir, encoded_hdfs_path)\n os.makedirs(temp_dir, exist_ok=True)\n dst = os.path.join(temp_dir, os.path.basename(hdfs_path))\n return dst\n\n\ndef verify_copy(src: str, dest: str) -> bool:\n \"\"\"\n verify the copy of src to dest by comparing their sizes and file structures.\n\n return:\n bool: True if the copy is verified, False otherwise.\n \"\"\"\n if not os.path.exists(src):\n return False\n if not os.path.exists(dest):\n return False\n\n if os.path.isfile(src) != os.path.isfile(dest):\n return False\n\n if os.path.isfile(src):\n src_size = os.path.getsize(src)\n dest_size = os.path.getsize(dest)\n if src_size != dest_size:\n return False\n return True\n\n src_files = set()\n dest_files = set()\n\n for root, dirs, files in os.walk(src):\n rel_path = os.path.relpath(root, src)\n dest_root = os.path.join(dest, rel_path) if rel_path != \".\" else dest\n\n if not os.path.exists(dest_root):\n return False\n\n for entry in os.listdir(root):\n src_entry = os.path.join(root, entry)\n src_files.add(os.path.relpath(src_entry, src))\n\n for entry in os.listdir(dest_root):\n dest_entry = os.path.join(dest_root, entry)\n dest_files.add(os.path.relpath(dest_entry, dest))\n\n if src_files != dest_files:\n return False\n\n for rel_path in src_files:\n src_entry = os.path.join(src, rel_path)\n dest_entry = os.path.join(dest, rel_path)\n\n if os.path.isdir(src_entry) != os.path.isdir(dest_entry):\n return False\n\n if os.path.isfile(src_entry):\n src_size = os.path.getsize(src_entry)\n dest_size = os.path.getsize(dest_entry)\n if src_size != dest_size:\n return False\n\n return True\n\n\ndef copy_to_shm(src: str):\n \"\"\"\n Load the model into /dev/shm to make the process of loading the model multiple times more efficient.\n \"\"\"\n shm_model_root = \"/dev/shm/verl-cache/\"\n src_abs = os.path.abspath(os.path.normpath(src))\n dest = os.path.join(shm_model_root, hashlib.md5(src_abs.encode(\"utf-8\")).hexdigest())\n os.makedirs(dest, exist_ok=True)\n dest = os.path.join(dest, os.path.basename(src_abs))\n if os.path.exists(dest) and verify_copy(src, dest):\n # inform user and depends on him\n print(\n f\"[WARNING]: The memory model path {dest} already exists. If it is not you want, please clear it and \"\n f\"restart the task.\"\n )\n else:\n if os.path.isdir(src):\n shutil.copytree(src, dest, symlinks=False, dirs_exist_ok=True)\n else:\n shutil.copy2(src, dest)\n return dest\n\n\ndef _record_directory_structure(folder_path):\n record_file = os.path.join(folder_path, \".directory_record.txt\")\n with open(record_file, \"w\") as f:\n for root, dirs, files in os.walk(folder_path):\n for dir_name in dirs:\n relative_dir = os.path.relpath(os.path.join(root, dir_name), folder_path)\n f.write(f\"dir:{relative_dir}\\n\")\n for file_name in files:\n if file_name != \".directory_record.txt\":\n relative_file = os.path.relpath(os.path.join(root, file_name), folder_path)\n f.write(f\"file:{relative_file}\\n\")\n return record_file\n\n\ndef _check_directory_structure(folder_path, record_file):\n if not os.path.exists(record_file):\n return False\n existing_entries = set()\n for root, dirs, files in os.walk(folder_path):\n for dir_name in dirs:\n relative_dir = os.path.relpath(os.path.join(root, dir_name), folder_path)\n existing_entries.add(f\"dir:{relative_dir}\")\n for file_name in files:\n if file_name != \".directory_record.txt\":\n relative_file = os.path.relpath(os.path.join(root, file_name), folder_path)\n existing_entries.add(f\"file:{relative_file}\")\n with open(record_file) as f:\n recorded_entries = set(f.read().splitlines())\n return existing_entries == recorded_entries\n\n\ndef copy_to_local(\n src: str, cache_dir=None, filelock=\".file.lock\", verbose=False, always_recopy=False, use_shm: bool = False\n) -> str:\n \"\"\"Copy files/directories from HDFS to local cache with validation.\n\n Args:\n src (str): Source path - HDFS path (hdfs://...), local filesystem path, or Hugging Face model ID\n cache_dir (str, optional): Local directory for cached files. Uses system tempdir if None\n filelock (str): Base name for file lock. Defaults to \".file.lock\"\n verbose (bool): Enable copy operation logging. Defaults to False\n always_recopy (bool): Force fresh copy ignoring cache. Defaults to False\n use_shm (bool): Enable shared memory copy. Defaults to False\n\n Returns:\n str: Local filesystem path to copied resource\n \"\"\"\n # Save to a local path for persistence.\n local_path = copy_local_path_from_hdfs(src, cache_dir, filelock, verbose, always_recopy)\n\n if use_shm and isinstance(local_path, str) and not os.path.exists(local_path):\n try:\n from huggingface_hub import snapshot_download\n\n resolved = snapshot_download(local_path)\n if isinstance(resolved, str) and os.path.exists(resolved):\n local_path = resolved\n except ImportError:\n pass\n except Exception as e:\n print(f\"WARNING: Failed to download model from Hugging Face: {e}\")\n\n # Load into shm to improve efficiency.\n if use_shm:\n return copy_to_shm(local_path)\n return local_path\n\n\ndef copy_local_path_from_hdfs(\n src: str, cache_dir=None, filelock=\".file.lock\", verbose=False, always_recopy=False\n) -> str:\n \"\"\"Deprecated. Please use copy_to_local instead.\"\"\"\n from filelock import FileLock\n\n assert src[-1] != \"/\", f\"Make sure the last char in src is not / because it will cause error. Got {src}\"\n\n if is_non_local(src):\n # download from hdfs to local\n if cache_dir is None:\n # get a temp folder\n cache_dir = tempfile.gettempdir()\n os.makedirs(cache_dir, exist_ok=True)\n assert os.path.exists(cache_dir)\n local_path = get_local_temp_path(src, cache_dir)\n # get a specific lock\n filelock = md5_encode(src) + \".lock\"\n lock_file = os.path.join(cache_dir, filelock)\n with FileLock(lock_file=lock_file):\n if always_recopy and os.path.exists(local_path):\n if os.path.isdir(local_path):\n shutil.rmtree(local_path, ignore_errors=True)\n else:\n os.remove(local_path)\n if not os.path.exists(local_path):\n if verbose:\n print(f\"Copy from {src} to {local_path}\")\n copy(src, local_path)\n if os.path.isdir(local_path):\n _record_directory_structure(local_path)\n elif os.path.isdir(local_path):\n # always_recopy=False, local path exists, and it is a folder: check whether there is anything missed\n record_file = os.path.join(local_path, \".directory_record.txt\")\n if not _check_directory_structure(local_path, record_file):\n if verbose:\n print(f\"Recopy from {src} to {local_path} due to missing files or directories.\")\n shutil.rmtree(local_path, ignore_errors=True)\n copy(src, local_path)\n _record_directory_structure(local_path)\n return local_path\n else:\n return src\n\n\ndef local_mkdir_safe(path):\n \"\"\"_summary_\n Thread-safe directory creation function that ensures the directory is created\n even if multiple processes attempt to create it simultaneously.\n\n Args:\n path (str): The path to create a directory at.\n \"\"\"\n\n from filelock import FileLock\n\n if not os.path.isabs(path):\n working_dir = os.getcwd()\n path = os.path.join(working_dir, path)\n\n # Using hash value of path as lock file name to avoid long file name\n lock_filename = f\"ckpt_{hash(path) & 0xFFFFFFFF:08x}.lock\"\n lock_path = os.path.join(tempfile.gettempdir(), lock_filename)\n\n try:\n with FileLock(lock_path, timeout=60): # Add timeout\n # make a new dir\n os.makedirs(path, exist_ok=True)\n except Exception as e:\n print(f\"Warning: Failed to acquire lock for {path}: {e}\")\n # Even if the lock is not acquired, try to create the directory\n os.makedirs(path, exist_ok=True)\n\n return path\n"} {"file_name": "verl__utils__memory_buffer.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis file contains utilities to manipulate torch memory buffers\n\"\"\"\n\nfrom typing import Optional\n\nimport torch\nfrom torch import nn\n\nfrom verl.utils.device import get_device_name\n\n\nclass MemoryBuffer:\n \"\"\"\n A memory buffer is a contiguous torch tensor that may combine multiple tensors sharing with the underlying\n memory. It must have a unique type to support this behavior.\n \"\"\"\n\n def __init__(self, numel: int, numel_padded: int, dtype: torch.dtype, source: Optional[torch.Tensor] = None):\n self.numel = numel\n self.numel_padded = numel_padded\n self.dtype = dtype\n if source is not None:\n self.data = source\n else:\n self.data = torch.zeros(self.numel_padded, dtype=self.dtype, device=get_device_name(), requires_grad=False)\n\n def zero(self):\n \"\"\"Reset the buffer to zero.\"\"\"\n self.data.zero_()\n\n def get(self, shape, start_index):\n \"\"\"Return a tensor with the input `shape` as a view into the\n 1-D data starting at `start_index`.\"\"\"\n end_index = start_index + shape.numel()\n assert end_index <= self.numel, \"requested tensor is out of the buffer range.\"\n buffer_tensor = self.data[start_index:end_index]\n buffer_tensor = buffer_tensor.view(shape)\n return buffer_tensor\n\n\ndef calc_padded_numel(shape: torch.Size, dtype: torch.dtype):\n \"\"\"for cuda memory alignment, make sure alignment by 128-bits\"\"\"\n align_numel = 128 // torch.finfo(dtype).bits\n numel = shape.numel()\n return (numel + align_numel - 1) // align_numel * align_numel\n\n\ndef get_weight_buffer_meta_from_module(module: nn.Module) -> dict[str, dict]:\n \"\"\"\n Return a dictionary containing name to a shape and dtype.\n \"\"\"\n weight_buffer_meta = {}\n for name, param in sorted(module.named_parameters()):\n weight_buffer_meta[name] = {\"shape\": param.shape, \"dtype\": param.dtype}\n return weight_buffer_meta\n\n\ndef build_memory_buffer(weight_buffer_meta: dict[str, dict]) -> dict[torch.dtype, MemoryBuffer]:\n \"\"\"Build the memory buffer given weight_buffer_meta\n\n Args:\n weight_buffer_meta: contains mapping from name to a dictionary containing shape and dtype of the tensors\n\n Returns: a large memory buffer for each dtype that can hold all the tensors\n\n \"\"\"\n memory_buffers = {}\n total_numel_map = {} # map from dtype to the total numel\n for name, meta_info in sorted(weight_buffer_meta.items()):\n shape = meta_info[\"shape\"]\n dtype = meta_info[\"dtype\"]\n\n assert isinstance(shape, torch.Size)\n assert isinstance(dtype, torch.dtype)\n\n if dtype not in total_numel_map:\n total_numel_map[dtype] = 0\n\n total_numel_map[dtype] += calc_padded_numel(shape, dtype)\n\n for dtype, total_numel in total_numel_map.items():\n memory_buffers[dtype] = MemoryBuffer(total_numel, total_numel, dtype)\n\n return memory_buffers\n\n\ndef build_memory_reference_from_module(\n module: torch.nn.Module, memory_buffers: dict[torch.dtype, MemoryBuffer], maintain_weight=True\n):\n start_index = {}\n for dtype in memory_buffers:\n start_index[dtype] = 0\n for name, param in sorted(module.named_parameters()):\n memory_buffer = memory_buffers[param.dtype]\n buffer = memory_buffer.get(shape=param.shape, start_index=start_index[param.dtype])\n # need to increment start_index\n start_index[param.dtype] += calc_padded_numel(param.shape, param.dtype)\n if maintain_weight:\n buffer.copy_(param.data)\n param.data = buffer\n\n\ndef build_memory_reference(weight_buffer_meta: dict[str, dict], memory_buffers: dict[torch.dtype, MemoryBuffer]):\n \"\"\"Build the memory references. The memory buffers are built using the build_memory_buffer API.\n This API will allocate a weight buffer pointer to the memory buffer according to the weight_buffer_meta.\n\n Args:\n weight_buffer_meta:\n memory_buffers:\n\n Returns:\n\n \"\"\"\n start_idx = {}\n weight_buffers = {}\n for dtype in memory_buffers:\n start_idx[dtype] = 0\n\n for name, meta_info in sorted(weight_buffer_meta.items()):\n shape = meta_info[\"shape\"]\n dtype = meta_info[\"dtype\"]\n\n buffer = memory_buffers[dtype].get(shape, start_index=start_idx[dtype])\n start_idx[dtype] += calc_padded_numel(shape, dtype)\n weight_buffers[name] = buffer\n\n return weight_buffers\n\n\nclass MemoryBufferModuleWrapper:\n \"\"\"\n Note that we do not design MemoryBufferModuleWrapper as an nn.Module due to\n - It will change the checkpoint name\n \"\"\"\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n self.weight_buffer_meta = get_weight_buffer_meta_from_module(self.module)\n self.memory_buffers = build_memory_buffer(self.weight_buffer_meta)\n build_memory_reference_from_module(self.module, self.memory_buffers)\n\n def get_memory_buffers(self):\n return self.memory_buffers\n\n def get_weight_buffer_meta(self):\n return self.weight_buffer_meta\n\n\nclass MegatronMemoryBufferForRollout:\n \"\"\"\n We assume that\n - inference engine has tp + dp\n - actor has tp + pp + dp\n - the tp between inference engine and actor should be the same\n - memory_buffers: contains a list of memory_buffers, each is a dict from dtype to MemoryBuffer\n - weight_buffers: contains a list of weight_buffers, each is a dict from name to param\n - named_parameters: a dict from name to parameter that normalizes the names from pp and vpp. Note that\n the named_parameters may not be directly compatible with inference engine. User has to take care of\n this part such as the layout mismatches. (e.g. qkv transpose)\n - Note that weight_buffer, named_parameters and memory_buffers share the same underlying GPU memory.\n - When doing weight sync, the data is transfer via memory buffers\n \"\"\"\n\n def __init__(self, transform_memory_param_fn):\n self._memory_buffers = []\n self._weight_buffers = []\n self._named_parameters = {}\n self.transform_memory_param_fn = transform_memory_param_fn\n\n def initialize_weight_buffer(self, weight_buffer_meta_pp: list[dict[str, dict]]):\n \"\"\"\n Initialize the weight buffer. The weight buffer is obtained according to the actor. We will construct\n a large buffer for each dtype in the weight_buffer.\n\n Args:\n weight_buffer_meta: contains pp models, each pp models contains a dictionary of mapping from\n\n Returns: None\n\n \"\"\"\n self.weight_buffer_meta_pp = weight_buffer_meta_pp\n\n for weight_buffer_meta in self.weight_buffer_meta_pp:\n memory_buffer = build_memory_buffer(weight_buffer_meta)\n self._memory_buffers.append(memory_buffer)\n self._weight_buffers.append(None)\n\n def build_memory_reference(self):\n for i, weight_buffer_meta in enumerate(self.weight_buffer_meta_pp):\n self._weight_buffers[i] = build_memory_reference(weight_buffer_meta, self._memory_buffers[i])\n self._named_parameters = self.transform_memory_param_fn(self._weight_buffers)\n\n @property\n def named_parameters(self):\n return self._named_parameters\n\n @property\n def weight_buffers(self):\n return self._weight_buffers\n\n @property\n def memory_buffers(self):\n return self._memory_buffers\n"} {"file_name": "verl__utils__net_utils.py", "text": "# Copyright 2023-2024 SGLang Team\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport ipaddress\nimport socket\n\n\ndef is_ipv4(ip_str: str) -> bool:\n \"\"\"\n Check if the given string is an IPv4 address\n\n Args:\n ip_str: The IP address string to check\n\n Returns:\n bool: Returns True if it's an IPv4 address, False otherwise\n \"\"\"\n try:\n ipaddress.IPv4Address(ip_str)\n return True\n except ipaddress.AddressValueError:\n return False\n\n\ndef is_ipv6(ip_str: str) -> bool:\n \"\"\"\n Check if the given string is an IPv6 address\n\n Args:\n ip_str: The IP address string to check\n\n Returns:\n bool: Returns True if it's an IPv6 address, False otherwise\n \"\"\"\n try:\n ipaddress.IPv6Address(ip_str)\n return True\n except ipaddress.AddressValueError:\n return False\n\n\ndef is_valid_ipv6_address(address: str) -> bool:\n try:\n ipaddress.IPv6Address(address)\n return True\n except ValueError:\n return False\n\n\ndef get_free_port(address: str) -> tuple[int, socket.socket]:\n family = socket.AF_INET\n if is_valid_ipv6_address(address):\n family = socket.AF_INET6\n\n sock = socket.socket(family=family, type=socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n sock.bind((address, 0))\n\n port = sock.getsockname()[1]\n return port, sock\n"} {"file_name": "verl__utils__profiler__config.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\nimport warnings\nfrom dataclasses import dataclass, field\nfrom typing import Any, Optional\n\nfrom omegaconf import MISSING\n\nfrom verl.base_config import BaseConfig\n\n\n@dataclass\nclass NsightToolConfig(BaseConfig):\n \"\"\"Nsight tool config.\"\"\"\n\n \"True for each task has its own database, False for all tasks in one training step share one database.\"\n discrete: bool = False\n name: str = \"nsight\"\n\n def __post_init__(self) -> None:\n pass\n\n\n@dataclass\nclass TorchProfilerToolConfig(BaseConfig):\n \"\"\"Torch profiler tool config.\"\"\"\n\n # options: cuda, cpu, memory, shapes, stack\n contents: list[str] = field(default_factory=list)\n discrete: bool = False\n name: str = \"torch\"\n\n def __post_init__(self) -> None:\n \"\"\"config validation logics go here\"\"\"\n __support_contents = [\"cuda\", \"cpu\", \"memory\", \"shapes\", \"stack\"]\n for content in self.contents:\n assert content in __support_contents, (\n f\"Profiler contents only supports {__support_contents}, but gets {content}\"\n )\n assert isinstance(self.contents, list), f\"Profiler contents must be of type list, got {type(self.contents)}\"\n\n\n@dataclass\nclass TorchMemoryToolConfig(BaseConfig):\n \"\"\"Torch memory profiler tool config.\n\n Args:\n trace_alloc_max_entries (int): Maximum number of memory allocation entries to track.\n stack_depth (int): Stack trace depth for memory allocations.\n \"\"\"\n\n trace_alloc_max_entries: int = 100_000\n stack_depth: int = 32\n name: str = \"torch_memory\"\n\n def __post_init__(self) -> None:\n \"\"\"config validation logics go here\"\"\"\n assert isinstance(self.trace_alloc_max_entries, int), (\n f\"trace_alloc_max_entries must be int, got {type(self.trace_alloc_max_entries)}\"\n )\n assert isinstance(self.stack_depth, int), f\"stack_depth must be int, got {type(self.stack_depth)}\"\n assert self.trace_alloc_max_entries > 0, (\n f\"trace_alloc_max_entries must be positive, got {self.trace_alloc_max_entries}\"\n )\n assert self.stack_depth > 0, f\"stack_depth must be positive, got {self.stack_depth}\"\n\n\n@dataclass\nclass NPUToolConfig(NsightToolConfig):\n \"\"\"NPU profiler too; config.\"\"\"\n\n # options: npu, cpu, memory, shapes, module, stack\n contents: list[str] = field(default_factory=list)\n\n # Collection level, optional values: level_none, level0, level1, level2.\n level: str = \"level0\"\n\n # Whether to automatically parse the data.\n analysis: bool = False\n\n name: str = \"npu\"\n\n def __post_init__(self) -> None:\n \"\"\"config validation logics go here\"\"\"\n assert isinstance(self.contents, list), f\"Profiler contents must be of type list, got {type(self.contents)}\"\n assert isinstance(self.level, str), f\"Profiler level must be of type str, got {type(self.level)}\"\n assert isinstance(self.analysis, bool), f\"Profiler analysis must be of type bool, got {type(self.analysis)}\"\n for content in self.contents:\n assert content in [\"npu\", \"cpu\", \"memory\", \"shapes\", \"module\", \"stack\"], (\n f\"Profiler contents only supports npu, cpu, memory, shapes, module, stack, but gets {content}\"\n )\n assert self.level in [\"level_none\", \"level0\", \"level1\", \"level2\"], (\n f\"Profiler level only supports level0, 1, 2, and level_none, but gets {self.level}\"\n )\n\n\n@dataclass\nclass ProfilerConfig(BaseConfig):\n \"\"\"Worker profiler config.\n\n The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.\n\n Args:\n discrete (bool): True for each task has its own database, False for all tasks in one training step\n share one database.\n all_ranks (bool): Whether to profile all ranks.\n ranks (list[int]): The ranks that will be profiled. Defaults to [].\n global_tool_config (Any): Global tool configuration for all profiling tools.\n \"\"\"\n\n tool: Optional[str] = MISSING\n enable: bool = False\n all_ranks: bool = False\n ranks: list[int] = field(default_factory=list)\n save_path: Optional[str] = MISSING\n tool_config: Any = MISSING # Just a placeholder, will use configs above directly\n global_tool_config: Optional[Any] = None # Global tool configuration for all profiling tools\n\n def union(self, other: \"ProfilerConfig\") -> \"ProfilerConfig\":\n assert self.tool == other.tool, f\"Cannot union ProfilerConfig with different tools: {self.tool} vs {other.tool}\"\n return ProfilerConfig(\n tool=self.tool,\n enable=self.enable or other.enable,\n all_ranks=self.all_ranks or other.all_ranks,\n ranks=list(set(self.ranks or []) | set(other.ranks or [])),\n save_path=self.save_path,\n tool_config=self.tool_config,\n global_tool_config=self.global_tool_config or other.global_tool_config,\n )\n\n def intersect(self, other: \"ProfilerConfig\") -> \"ProfilerConfig\":\n assert self.tool == other.tool, (\n f\"Cannot intersect ProfilerConfig with different tools: {self.tool} vs {other.tool}\"\n )\n return ProfilerConfig(\n tool=self.tool,\n enable=self.enable and other.enable,\n all_ranks=self.all_ranks and other.all_ranks,\n ranks=list(set(self.ranks or []) & set(other.ranks or [])),\n save_path=self.save_path,\n tool_config=self.tool_config,\n global_tool_config=self.global_tool_config if self.global_tool_config else other.global_tool_config,\n )\n\n def __post_init__(self) -> None:\n \"\"\"config validation logics go here\"\"\"\n assert isinstance(self.ranks, set | list | tuple), (\n f\"Profiler ranks must be of type list, got {type(self.ranks)}\"\n )\n\n\ndef build_vllm_profiler_args(profiler_config: ProfilerConfig, tool_config: BaseConfig, rank: int) -> dict:\n \"\"\"\n Build arguments and environment variables for vLLM profiler.\n\n Acts as an adapter to bridge verl's unified profiler config and vLLM's specific requirements.\n It sets environment variables for compatibility and constructs arguments for vLLM >= 0.13.0.\n\n Args:\n profiler_config (ProfilerConfig): The unified profiler configuration.\n tool_config (BaseConfig): The tool configuration.\n rank (int): The rank of the replica.\n\n Returns:\n dict: A dictionary of arguments to be passed to vLLM's start_profile method.\n \"\"\"\n if not profiler_config or not tool_config or not hasattr(tool_config, \"contents\"):\n return {}\n\n contents = tool_config.contents\n with_stack = True if \"stack\" in contents or \"module\" in contents else False\n record_shapes = True if \"shapes\" in contents else False\n with_memory = True if \"memory\" in contents else False\n save_path = os.path.join(profiler_config.save_path, f\"agent_loop_rollout_replica_{rank}\")\n\n # vLLM < 0.13.0 supports controlling profiler via environment variables\n os.environ[\"VLLM_TORCH_PROFILER_DIR\"] = save_path\n os.environ[\"VLLM_TORCH_PROFILER_WITH_STACK\"] = \"1\" if with_stack else \"0\"\n os.environ[\"VLLM_TORCH_PROFILER_RECORD_SHAPES\"] = \"1\" if record_shapes else \"0\"\n os.environ[\"VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY\"] = \"1\" if with_memory else \"0\"\n\n # vLLM >= 0.13.0 supports controlling profiler via arguments.\n # While it maintains backward compatibility with environment variables,\n # we provide arguments explicitly to align with the new API style.\n return {\n \"profiler_config\": json.dumps(\n {\n \"profiler\": \"torch\",\n \"torch_profiler_dir\": save_path,\n \"torch_profiler_with_memory\": with_memory,\n \"torch_profiler_with_stack\": with_stack,\n \"torch_profiler_record_shapes\": record_shapes,\n }\n )\n }\n\n\ndef build_sglang_profiler_args(profiler_config: ProfilerConfig, tool_config: BaseConfig, rank: int) -> dict:\n \"\"\"\n Build arguments for SGLang profiler.\n\n Args:\n profiler_config (ProfilerConfig): The unified profiler configuration.\n tool_config (BaseConfig): The tool configuration.\n rank (int): The rank of the replica.\n\n Returns:\n dict: A dictionary of arguments suitable for starting the SGLang profiler.\n \"\"\"\n if not profiler_config or not tool_config or not hasattr(tool_config, \"contents\"):\n return {}\n\n contents = tool_config.contents\n if \"memory\" in contents:\n warnings.warn(\"SGLang profiler does not support memory profiling. Ignoring memory content.\", stacklevel=2)\n\n return {\n \"output_dir\": os.path.join(profiler_config.save_path, f\"agent_loop_rollout_replica_{rank}\"),\n \"with_stack\": \"stack\" in contents or \"module\" in contents,\n \"record_shapes\": \"shapes\" in contents,\n }\n"} {"file_name": "verl__utils__py_functional.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nContain small python utility functions\n\"\"\"\n\nimport importlib\nimport multiprocessing\nimport os\nimport queue # Import the queue module for exception type hint\nimport signal\nfrom contextlib import contextmanager\nfrom functools import wraps\nfrom types import SimpleNamespace\nfrom typing import Any, Callable, Iterator, Optional\n\nfrom verl.utils.metric import Metric\n\n\n# --- Top-level helper for multiprocessing timeout ---\n# This function MUST be defined at the top level to be pickleable\ndef _mp_target_wrapper(target_func: Callable, mp_queue: multiprocessing.Queue, args: tuple, kwargs: dict[str, Any]):\n \"\"\"\n Internal wrapper function executed in the child process.\n Calls the original target function and puts the result or exception into the queue.\n \"\"\"\n try:\n result = target_func(*args, **kwargs)\n mp_queue.put((True, result)) # Indicate success and put result\n except Exception as e:\n # Ensure the exception is pickleable for the queue\n try:\n import pickle\n\n pickle.dumps(e) # Test if the exception is pickleable\n mp_queue.put((False, e)) # Indicate failure and put exception\n except (pickle.PicklingError, TypeError):\n # Fallback if the original exception cannot be pickled\n mp_queue.put((False, RuntimeError(f\"Original exception type {type(e).__name__} not pickleable: {e}\")))\n\n\n# Renamed the function from timeout to timeout_limit\ndef timeout_limit(seconds: float, use_signals: bool = False):\n \"\"\"\n Decorator to add a timeout to a function.\n\n Args:\n seconds: The timeout duration in seconds.\n use_signals: (Deprecated) This is deprecated because signals only work reliably in the main thread\n and can cause issues in multiprocessing or multithreading contexts.\n Defaults to False, which uses the more robust multiprocessing approach.\n\n Returns:\n A decorated function with timeout.\n\n Raises:\n TimeoutError: If the function execution exceeds the specified time.\n RuntimeError: If the child process exits with an error (multiprocessing mode).\n NotImplementedError: If the OS is not POSIX (signals are only supported on POSIX).\n \"\"\"\n\n def decorator(func):\n if use_signals:\n if os.name != \"posix\":\n raise NotImplementedError(f\"Unsupported OS: {os.name}\")\n # Issue deprecation warning if use_signals is explicitly True\n print(\n \"WARN: The 'use_signals=True' option in the timeout decorator is deprecated. \\\n Signals are unreliable outside the main thread. \\\n Please use the default multiprocessing-based timeout (use_signals=False).\"\n )\n\n @wraps(func)\n def wrapper_signal(*args, **kwargs):\n def handler(signum, frame):\n # Update function name in error message if needed (optional but good practice)\n raise TimeoutError(f\"Function {func.__name__} timed out after {seconds} seconds (signal)!\")\n\n old_handler = signal.getsignal(signal.SIGALRM)\n signal.signal(signal.SIGALRM, handler)\n # Use setitimer for float seconds support, alarm only supports integers\n signal.setitimer(signal.ITIMER_REAL, seconds)\n\n try:\n result = func(*args, **kwargs)\n finally:\n # Reset timer and handler\n signal.setitimer(signal.ITIMER_REAL, 0)\n signal.signal(signal.SIGALRM, old_handler)\n return result\n\n return wrapper_signal\n else:\n # --- Multiprocessing based timeout (existing logic) ---\n @wraps(func)\n def wrapper_mp(*args, **kwargs):\n q = multiprocessing.Queue(maxsize=1)\n process = multiprocessing.Process(target=_mp_target_wrapper, args=(func, q, args, kwargs))\n process.start()\n process.join(timeout=seconds)\n\n if process.is_alive():\n process.terminate()\n process.join(timeout=0.5) # Give it a moment to terminate\n if process.is_alive():\n print(f\"Warning: Process {process.pid} did not terminate gracefully after timeout.\")\n # Update function name in error message if needed (optional but good practice)\n raise TimeoutError(f\"Function {func.__name__} timed out after {seconds} seconds (multiprocessing)!\")\n\n try:\n success, result_or_exc = q.get(timeout=0.1) # Small timeout for queue read\n if success:\n return result_or_exc\n else:\n raise result_or_exc # Reraise exception from child\n except queue.Empty as err:\n exitcode = process.exitcode\n if exitcode is not None and exitcode != 0:\n raise RuntimeError(\n f\"Child process exited with error (exitcode: {exitcode}) before returning result.\"\n ) from err\n else:\n # Should have timed out if queue is empty after join unless process died unexpectedly\n # Update function name in error message if needed (optional but good practice)\n raise TimeoutError(\n f\"Operation timed out or process finished unexpectedly without result \"\n f\"(exitcode: {exitcode}).\"\n ) from err\n finally:\n q.close()\n q.join_thread()\n\n return wrapper_mp\n\n return decorator\n\n\ndef union_two_dict(dict1: dict, dict2: dict):\n \"\"\"Union two dict. Will throw an error if there is an item not the same object with the same key.\n\n Args:\n dict1:\n dict2:\n\n Returns:\n\n \"\"\"\n for key, val in dict2.items():\n if key in dict1:\n assert dict2[key] == dict1[key], f\"{key} in meta_dict1 and meta_dict2 are not the same object\"\n dict1[key] = val\n\n return dict1\n\n\ndef rename_dict(data: dict, prefix: str = \"\") -> dict:\n \"\"\"Add a prefix to all the keys in the data dict if it's name is not started with prefix\n\n Args:\n data: a dictionary\n prefix: prefix\n\n Returns:\n dictionary with modified name\n\n \"\"\"\n new_data = {}\n for key, val in data.items():\n new_key = f\"{prefix}{key}\" if not key.startswith(prefix) else key\n new_data[new_key] = val\n return new_data\n\n\ndef append_to_dict(data: dict, new_data: dict, prefix: str = \"\"):\n \"\"\"Append values from new_data to lists in data.\n\n For each key in new_data, this function appends the corresponding value to a list\n stored under the same key in data. If the key doesn't exist in data, a new list is created.\n\n Args:\n data (Dict): The target dictionary containing lists as values.\n new_data (Dict): The source dictionary with values to append.\n\n Returns:\n None: The function modifies data in-place.\n \"\"\"\n for key, val in new_data.items():\n new_key = f\"{prefix}{key}\" if not key.startswith(prefix) else key\n if new_key not in data:\n data[new_key] = val.init_list() if isinstance(val, Metric) else []\n if isinstance(val, list):\n data[new_key].extend(val)\n else:\n data[new_key].append(val)\n\n\nclass NestedNamespace(SimpleNamespace):\n \"\"\"A nested version of SimpleNamespace that recursively converts dictionaries to namespaces.\n\n This class allows for dot notation access to nested dictionary structures by recursively\n converting dictionaries to NestedNamespace objects.\n\n Example:\n config_dict = {\"a\": 1, \"b\": {\"c\": 2, \"d\": 3}}\n config = NestedNamespace(config_dict)\n # Access with: config.a, config.b.c, config.b.d\n\n Args:\n dictionary: The dictionary to convert to a nested namespace.\n **kwargs: Additional attributes to set on the namespace.\n \"\"\"\n\n def __init__(self, dictionary, **kwargs):\n super().__init__(**kwargs)\n for key, value in dictionary.items():\n if isinstance(value, dict):\n self.__setattr__(key, NestedNamespace(value))\n else:\n self.__setattr__(key, value)\n\n\nclass DynamicEnumMeta(type):\n def __iter__(cls) -> Iterator[Any]:\n return iter(cls._registry.values())\n\n def __contains__(cls, item: Any) -> bool:\n # allow `name in EnumClass` or `member in EnumClass`\n if isinstance(item, str):\n return item in cls._registry\n return item in cls._registry.values()\n\n def __getitem__(cls, name: str) -> Any:\n return cls._registry[name]\n\n def __reduce_ex__(cls, protocol):\n # Always load the existing module and grab the class\n return getattr, (importlib.import_module(cls.__module__), cls.__name__)\n\n def names(cls):\n return list(cls._registry.keys())\n\n def values(cls):\n return list(cls._registry.values())\n\n\nclass DynamicEnum(metaclass=DynamicEnumMeta):\n _registry: dict[str, \"DynamicEnum\"] = {}\n _next_value: int = 0\n\n def __init__(self, name: str, value: int):\n self.name = name\n self.value = value\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}.{self.name}: {self.value}>\"\n\n def __reduce_ex__(self, protocol):\n \"\"\"\n Unpickle via: getattr(import_module(module).Dispatch, 'ONE_TO_ALL')\n so the existing class is reused instead of re-executed.\n \"\"\"\n module = importlib.import_module(self.__class__.__module__)\n enum_cls = getattr(module, self.__class__.__name__)\n return getattr, (enum_cls, self.name)\n\n @classmethod\n def register(cls, name: str) -> \"DynamicEnum\":\n key = name.upper()\n if key in cls._registry:\n raise ValueError(f\"{key} already registered\")\n member = cls(key, cls._next_value)\n cls._registry[key] = member\n setattr(cls, key, member)\n cls._next_value += 1\n return member\n\n @classmethod\n def remove(cls, name: str):\n key = name.upper()\n member = cls._registry.pop(key)\n delattr(cls, key)\n return member\n\n @classmethod\n def from_name(cls, name: str) -> Optional[\"DynamicEnum\"]:\n return cls._registry.get(name.upper())\n\n\n@contextmanager\ndef temp_env_var(key: str, value: str):\n \"\"\"Context manager for temporarily setting an environment variable.\n\n This context manager ensures that environment variables are properly set and restored,\n even if an exception occurs during the execution of the code block.\n\n Args:\n key: Environment variable name to set\n value: Value to set the environment variable to\n\n Yields:\n None\n\n Example:\n >>> with temp_env_var(\"MY_VAR\", \"test_value\"):\n ... # MY_VAR is set to \"test_value\"\n ... do_something()\n ... # MY_VAR is restored to its original value or removed if it didn't exist\n \"\"\"\n original = os.environ.get(key)\n os.environ[key] = value\n try:\n yield\n finally:\n if original is None:\n os.environ.pop(key, None)\n else:\n os.environ[key] = original\n\n\ndef convert_to_regular_types(obj):\n \"\"\"Convert Hydra configs and other special types to regular Python types.\"\"\"\n from omegaconf import DictConfig, ListConfig\n\n if isinstance(obj, ListConfig | DictConfig):\n return {k: convert_to_regular_types(v) for k, v in obj.items()} if isinstance(obj, DictConfig) else list(obj)\n elif isinstance(obj, list | tuple):\n return [convert_to_regular_types(x) for x in obj]\n elif isinstance(obj, dict):\n return {k: convert_to_regular_types(v) for k, v in obj.items()}\n return obj\n"} {"file_name": "verl__utils__reward_score__prime_math____init__.py", "text": "# Copyright 2024 PRIME team and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAnswer checker API that uses sympy to simplify expressions and check for equality.\n\nCall grade_answer(given_answer: str, ground_truth: str).\n\nFROM: https://github.com/openai/prm800k/blob/main/prm800k/grading/grader.py\n\"\"\"\n\nimport contextlib\nimport math\nimport re\n\nimport sympy\nfrom pylatexenc import latex2text\nfrom sympy.parsing import sympy_parser\n\nfrom verl.utils.py_functional import timeout_limit\n\nfrom . import math_normalize\nfrom .grader import math_equal\n\n# import math_normalize\n# from grader import math_equal\n\n# sympy might hang -- we don't care about trying to be lenient in these cases\nBAD_SUBSTRINGS = [\"^{\", \"^(\"]\nBAD_REGEXES = [r\"\\^[0-9]+\\^\", r\"\\^[0-9][0-9]+\"]\nTUPLE_CHARS = \"()[]\"\n\n\ndef _sympy_parse(expr: str):\n \"\"\"Parses an expression with sympy.\"\"\"\n py_expr = expr.replace(\"^\", \"**\")\n return sympy_parser.parse_expr(\n py_expr,\n transformations=(sympy_parser.standard_transformations + (sympy_parser.implicit_multiplication_application,)),\n )\n\n\ndef _parse_latex(expr: str) -> str:\n \"\"\"Attempts to parse latex to an expression sympy can read.\"\"\"\n expr = expr.replace(\"\\\\tfrac\", \"\\\\frac\")\n expr = expr.replace(\"\\\\dfrac\", \"\\\\frac\")\n expr = expr.replace(\"\\\\frac\", \" \\\\frac\") # Play nice with mixed numbers.\n expr = latex2text.LatexNodes2Text().latex_to_text(expr)\n\n # Replace the specific characters that this parser uses.\n expr = expr.replace(\"√\", \"sqrt\")\n expr = expr.replace(\"π\", \"pi\")\n expr = expr.replace(\"∞\", \"inf\")\n expr = expr.replace(\"∪\", \"U\")\n expr = expr.replace(\"·\", \"*\")\n expr = expr.replace(\"×\", \"*\")\n\n return expr.strip()\n\n\ndef _is_float(num: str) -> bool:\n try:\n float(num)\n return True\n except ValueError:\n return False\n\n\ndef _is_int(x: float) -> bool:\n try:\n return abs(x - int(round(x))) <= 1e-7\n except Exception:\n return False\n\n\ndef _is_frac(expr: str) -> bool:\n return bool(re.search(r\"^-?[0-9]+.?/0*[1-9][0-9]*.?$\", expr))\n\n\ndef _str_is_int(x: str) -> bool:\n try:\n x = _strip_properly_formatted_commas(x)\n x = float(x)\n return abs(x - int(round(x))) <= 1e-7\n except Exception:\n return False\n\n\ndef _str_to_int(x: str) -> bool:\n x = x.replace(\",\", \"\")\n x = float(x)\n return int(x)\n\n\ndef _inject_implicit_mixed_number(step: str):\n \"\"\"\n Automatically make a mixed number evalable\n e.g. 7 3/4 => 7+3/4\n \"\"\"\n p1 = re.compile(r\"([0-9]) +([0-9])\")\n step = p1.sub(r\"\\1+\\2\", step) ## implicit mults\n return step\n\n\ndef _strip_properly_formatted_commas(expr: str):\n # We want to be careful because we don't want to strip tuple commas\n p1 = re.compile(r\"(\\d)(,)(\\d\\d\\d)($|\\D)\")\n while True:\n next_expr = p1.sub(r\"\\1\\3\\4\", expr)\n if next_expr == expr:\n break\n expr = next_expr\n return next_expr\n\n\ndef _normalize(expr: str) -> str:\n \"\"\"Normalize answer expressions.\"\"\"\n if expr is None:\n return None\n\n # Remove enclosing `\\text{}`.\n m = re.search(r\"^\\\\text\\{(?P.+?)\\}$\", expr)\n if m is not None:\n expr = m.group(\"text\")\n\n expr = expr.replace(\"\\\\%\", \"%\")\n expr = expr.replace(\"\\\\$\", \"$\")\n expr = expr.replace(\"$\", \"\")\n expr = expr.replace(\"%\", \"\")\n expr = expr.replace(\" or \", \" , \")\n expr = expr.replace(\" and \", \" , \")\n\n expr = expr.replace(\"million\", \"*10^6\")\n expr = expr.replace(\"billion\", \"*10^9\")\n expr = expr.replace(\"trillion\", \"*10^12\")\n\n for unit in [\n \"degree\",\n \"cm\",\n \"centimeter\",\n \"meter\",\n \"mile\",\n \"second\",\n \"minute\",\n \"hour\",\n \"day\",\n \"week\",\n \"month\",\n \"year\",\n \"foot\",\n \"feet\",\n \"inch\",\n \"yard\",\n \"liter\",\n ]:\n expr = re.sub(f\"{unit}(es)?(s)? *(\\\\^[0-9]+)?\", \"\", expr)\n expr = re.sub(r\"\\^ *\\\\circ\", \"\", expr)\n\n if len(expr) > 0 and expr[0] == \"{\" and expr[-1] == \"}\":\n expr = expr[1:-1]\n\n expr = re.sub(\",\\\\\\\\! *\", \"\", expr)\n if _is_float(expr) and _is_int(float(expr)):\n expr = str(int(round(float(expr))))\n if \"\\\\\" in expr:\n with contextlib.suppress(Exception):\n expr = _parse_latex(expr)\n\n # edge case with mixed numbers and negative signs\n expr = re.sub(\"- *\", \"-\", expr)\n\n expr = _inject_implicit_mixed_number(expr)\n\n # don't be case sensitive for text answers\n expr = expr.lower()\n\n if _str_is_int(expr):\n expr = str(_str_to_int(expr))\n\n return expr\n\n\ndef count_unknown_letters_in_expr(expr: str):\n expr = expr.replace(\"sqrt\", \"\")\n expr = expr.replace(\"frac\", \"\")\n letters_in_expr = set([x for x in expr if x.isalpha()])\n return len(letters_in_expr)\n\n\ndef should_allow_eval(expr: str):\n # we don't want to try parsing unknown text or functions of more than two variables\n if count_unknown_letters_in_expr(expr) > 2:\n return False\n\n for bad_string in BAD_SUBSTRINGS:\n if bad_string in expr:\n return False\n\n return all(re.search(bad_regex, expr) is None for bad_regex in BAD_REGEXES)\n\n\n@timeout_limit(seconds=10)\ndef are_equal_under_sympy(ground_truth_normalized: str, given_normalized: str):\n are_equal = False\n try:\n expr = f\"({ground_truth_normalized})-({given_normalized})\"\n if should_allow_eval(expr):\n sympy_diff = _sympy_parse(expr)\n simplified = sympy.simplify(sympy_diff)\n if simplified == 0:\n are_equal = True\n except Exception:\n pass\n return are_equal\n\n\ndef split_tuple(expr: str):\n \"\"\"\n Split the elements in a tuple/interval, while handling well-formatted commas in large numbers\n \"\"\"\n expr = _strip_properly_formatted_commas(expr)\n if len(expr) == 0:\n return []\n if (\n len(expr) > 2\n and expr[0] in TUPLE_CHARS\n and expr[-1] in TUPLE_CHARS\n and all([ch not in expr[1:-1] for ch in TUPLE_CHARS])\n ):\n elems = [elem.strip() for elem in expr[1:-1].split(\",\")]\n else:\n elems = [expr]\n return elems\n\n\ndef grade_answer(given_answer: str, ground_truth: str) -> bool:\n \"\"\"\n The answer will be considered correct if:\n (a) it normalizes to the same string as the ground truth answer\n OR\n (b) sympy can simplify the difference between the expressions to 0\n \"\"\"\n if given_answer is None:\n return False\n\n ground_truth_normalized_mathd = math_normalize.normalize_answer(ground_truth)\n given_answer_normalized_mathd = math_normalize.normalize_answer(given_answer)\n\n # be at least as lenient as mathd\n if ground_truth_normalized_mathd == given_answer_normalized_mathd:\n return True\n\n ground_truth_normalized = _normalize(ground_truth)\n given_normalized = _normalize(given_answer)\n\n if ground_truth_normalized is None:\n return False\n\n if ground_truth_normalized == given_normalized:\n return True\n\n if len(given_normalized) == 0:\n return False\n\n ground_truth_elems = split_tuple(ground_truth_normalized)\n given_elems = split_tuple(given_normalized)\n\n if (\n len(ground_truth_elems) > 1\n and (ground_truth_normalized[0] != given_normalized[0] or ground_truth_normalized[-1] != given_normalized[-1])\n or len(ground_truth_elems) != len(given_elems)\n ):\n is_correct = False\n else:\n for ground_truth_elem, given_elem in zip(ground_truth_elems, given_elems, strict=True):\n if _is_frac(ground_truth_elem) and _is_frac(given_elem):\n # if fractions aren't reduced, then shouldn't be marked as correct\n # so, we don't want to allow sympy.simplify in this case\n is_correct = ground_truth_elem == given_elem\n elif _str_is_int(ground_truth_elem) != _str_is_int(given_elem):\n # if the ground truth answer is an integer, we require the given answer to be a strict match\n # (no sympy.simplify)\n is_correct = False\n else:\n try:\n is_correct = are_equal_under_sympy(ground_truth_elem, given_elem)\n except Exception as e:\n # if there's an error, we'll just say it's not correct\n is_correct = False\n print(f\"Error: {e} from are_equal_under_sympy, {ground_truth_elem}, {given_elem}\")\n if not is_correct:\n break\n\n return is_correct\n\n\ndef remove_boxed(s):\n left = \"\\\\boxed{\"\n try:\n assert s[: len(left)] == left\n assert s[-1] == \"}\"\n return s[len(left) : -1]\n except Exception:\n return None\n\n\ndef _last_boxed_only_string(string):\n idx = string.rfind(\"\\\\boxed\")\n if idx < 0:\n idx = string.rfind(\"\\\\fbox\")\n if idx < 0:\n return None\n\n i = idx\n left_brace_idx = None\n right_brace_idx = None\n num_left_braces_open = 0\n while i < len(string):\n if string[i] == \"{\":\n num_left_braces_open += 1\n if left_brace_idx is None:\n left_brace_idx = i\n elif string[i] == \"}\":\n num_left_braces_open -= 1\n if num_left_braces_open == 0:\n right_brace_idx = i\n break\n\n i += 1\n\n if left_brace_idx is None or right_brace_idx is None:\n return None\n\n return string[left_brace_idx + 1 : right_brace_idx].strip()\n\n\ndef match_answer(response):\n is_matched = False\n for ans_marker in [\"answer:\", \"answer is\", \"answers are\"]:\n ans_idx = response.lower().rfind(ans_marker)\n if ans_idx != -1:\n is_matched = True\n response = response[ans_idx + len(ans_marker) :].strip()\n if response.endswith(\"\\n\"):\n response = response[:-2]\n\n for ans_marker in [\"is answer\", \"is the answer\", \"are answers\", \"are the answers\"]:\n ans_idx = response.lower().rfind(ans_marker)\n if ans_idx != -1:\n is_matched = True\n response = response[:ans_idx].strip()\n if response.endswith(\"\\n\"):\n response = response[:-2]\n\n # Find boxed\n ans_boxed = _last_boxed_only_string(response)\n if ans_boxed:\n is_matched = True\n response = ans_boxed\n\n if \". \" in response:\n dot_idx = response.lower().rfind(\". \")\n if dot_idx != -1:\n response = response[:dot_idx].strip()\n\n for ans_marker in [\"be \", \"is \", \"are \", \"=\", \": \", \"get \", \"be\\n\", \"is\\n\", \"are\\n\", \":\\n\", \"get\\n\"]:\n ans_idx = response.lower().rfind(ans_marker)\n if ans_idx != -1:\n is_matched = True\n response = response[ans_idx + len(ans_marker) :].strip()\n if response.endswith(\"\\n\"):\n response = response[:-2]\n\n is_matched = is_matched if any([c.isdigit() for c in response]) else False # answer must have a digit\n # Grade\n return is_matched, response\n\n\ndef compute_score(model_output: str, ground_truth: str) -> bool:\n model_output = str(model_output)\n ground_truth = str(ground_truth)\n\n is_matched, extracted_model_output = match_answer(model_output)\n format_correctness = \"Step 2:\" in model_output and \"\\\\box\" in model_output\n\n # grade simple algebra questions. if succeeded, return; otherwise, proceed to more complex grading\n if grade_answer(extracted_model_output, ground_truth):\n return True, True, extracted_model_output\n\n try:\n if \"\\\\pi\" in extracted_model_output or \"\\\\pi\" in ground_truth:\n equivs = []\n for pi in [math.pi, 3.14]:\n equivs.append(math_equal(extracted_model_output, ground_truth, timeout=True, pi=pi))\n is_correct = any(equivs)\n else:\n is_correct = math_equal(extracted_model_output, ground_truth, timeout=True)\n except Exception:\n is_correct = False\n\n return is_correct, format_correctness, extracted_model_output\n"} {"file_name": "verl__utils__tracking.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA unified tracking interface that supports logging data to different backend\n\"\"\"\n\nimport dataclasses\nimport json\nimport os\nfrom enum import Enum\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Any\n\nimport orjson\n\n\nclass Tracking:\n \"\"\"A unified tracking interface for logging experiment data to multiple backends.\n\n This class provides a centralized way to log experiment metrics, parameters, and artifacts\n to various tracking backends including WandB, MLflow, SwanLab, TensorBoard, and console.\n\n Attributes:\n supported_backend: List of supported tracking backends.\n logger: Dictionary of initialized logger instances for each backend.\n \"\"\"\n\n supported_backend = [\n \"wandb\",\n \"mlflow\",\n \"swanlab\",\n \"vemlp_wandb\",\n \"tensorboard\",\n \"console\",\n \"clearml\",\n \"trackio\",\n \"file\",\n ]\n\n def __init__(self, project_name, experiment_name, default_backend: str | list[str] = \"console\", config=None):\n if isinstance(default_backend, str):\n default_backend = [default_backend]\n for backend in default_backend:\n if backend == \"tracking\":\n import warnings\n\n warnings.warn(\"`tracking` logger is deprecated. use `wandb` instead.\", DeprecationWarning, stacklevel=2)\n else:\n assert backend in self.supported_backend, f\"{backend} is not supported\"\n\n self.logger = {}\n\n if \"tracking\" in default_backend or \"wandb\" in default_backend:\n import os\n\n import wandb\n\n settings = None\n if config and config[\"trainer\"].get(\"wandb_proxy\", None):\n settings = wandb.Settings(https_proxy=config[\"trainer\"][\"wandb_proxy\"])\n entity = os.environ.get(\"WANDB_ENTITY\", None)\n wandb.init(project=project_name, name=experiment_name, entity=entity, config=config, settings=settings)\n self.logger[\"wandb\"] = wandb\n\n if \"trackio\" in default_backend:\n import trackio\n\n trackio.init(project=project_name, name=experiment_name, config=config)\n self.logger[\"trackio\"] = trackio\n\n if \"mlflow\" in default_backend:\n import os\n\n import mlflow\n\n MLFLOW_TRACKING_URI = os.environ.get(\"MLFLOW_TRACKING_URI\", \"sqlite:////tmp/mlruns.db\")\n mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)\n\n # Some cloud providers like Azure ML or Databricks automatically set MLFLOW_RUN_ID\n # If set, attach to the existing run instead of creating a new one\n run_id = os.environ.get(\"MLFLOW_RUN_ID\")\n if run_id:\n mlflow.start_run(run_id=run_id)\n else:\n # Project_name is actually experiment_name in MLFlow\n # If experiment does not exist, will create a new experiment\n experiment = mlflow.set_experiment(project_name)\n mlflow.start_run(experiment_id=experiment.experiment_id, run_name=experiment_name)\n\n mlflow.log_params(_compute_mlflow_params_from_objects(config))\n self.logger[\"mlflow\"] = _MlflowLoggingAdapter()\n\n if \"swanlab\" in default_backend:\n import os\n\n import swanlab\n\n SWANLAB_API_KEY = os.environ.get(\"SWANLAB_API_KEY\", None)\n SWANLAB_LOG_DIR = os.environ.get(\"SWANLAB_LOG_DIR\", \"swanlog\")\n SWANLAB_MODE = os.environ.get(\"SWANLAB_MODE\", \"cloud\")\n if SWANLAB_API_KEY:\n swanlab.login(SWANLAB_API_KEY) # NOTE: previous login information will be overwritten\n\n if config is None:\n config = {} # make sure config is not None, otherwise **config will raise error\n swanlab.init(\n project=project_name,\n experiment_name=experiment_name,\n config={\"FRAMEWORK\": \"verl\", **config},\n logdir=SWANLAB_LOG_DIR,\n mode=SWANLAB_MODE,\n )\n self.logger[\"swanlab\"] = swanlab\n\n if \"vemlp_wandb\" in default_backend:\n import os\n\n import volcengine_ml_platform\n from volcengine_ml_platform import wandb as vemlp_wandb\n\n volcengine_ml_platform.init(\n ak=os.environ[\"VOLC_ACCESS_KEY_ID\"],\n sk=os.environ[\"VOLC_SECRET_ACCESS_KEY\"],\n region=os.environ[\"MLP_TRACKING_REGION\"],\n )\n\n vemlp_wandb.init(\n project=project_name,\n name=experiment_name,\n config=config,\n sync_tensorboard=True,\n )\n self.logger[\"vemlp_wandb\"] = vemlp_wandb\n\n if \"tensorboard\" in default_backend:\n self.logger[\"tensorboard\"] = _TensorboardAdapter(project_name, experiment_name)\n\n if \"console\" in default_backend:\n from verl.utils.logger import LocalLogger\n\n self.console_logger = LocalLogger(print_to_console=True)\n self.logger[\"console\"] = self.console_logger\n\n if \"clearml\" in default_backend:\n self.logger[\"clearml\"] = ClearMLLogger(project_name, experiment_name, config)\n\n if \"file\" in default_backend:\n self.logger[\"file\"] = FileLogger(project_name, experiment_name)\n\n def log(self, data, step, backend=None):\n for default_backend, logger_instance in self.logger.items():\n if backend is None or default_backend in backend:\n logger_instance.log(data=data, step=step)\n\n def __del__(self):\n if \"wandb\" in self.logger:\n self.logger[\"wandb\"].finish(exit_code=0)\n if \"swanlab\" in self.logger:\n self.logger[\"swanlab\"].finish()\n if \"vemlp_wandb\" in self.logger:\n self.logger[\"vemlp_wandb\"].finish(exit_code=0)\n if \"tensorboard\" in self.logger:\n self.logger[\"tensorboard\"].finish()\n if \"clearml\" in self.logger:\n self.logger[\"clearml\"].finish()\n if \"trackio\" in self.logger:\n self.logger[\"trackio\"].finish()\n if \"file\" in self.logger:\n self.logger[\"file\"].finish()\n\n\nclass ClearMLLogger:\n def __init__(self, project_name: str, experiment_name: str, config):\n self.project_name = project_name\n self.experiment_name = experiment_name\n\n import clearml\n\n self._task: clearml.Task = clearml.Task.init(\n task_name=experiment_name,\n project_name=project_name,\n continue_last_task=True,\n output_uri=False,\n )\n\n self._task.connect_configuration(config, name=\"Hyperparameters\")\n\n def _get_logger(self):\n return self._task.get_logger()\n\n def log(self, data, step):\n import numpy as np\n import pandas as pd\n\n # logs = self._rewrite_logs(data)\n logger = self._get_logger()\n for k, v in data.items():\n title, series = k.split(\"/\", 1)\n\n if isinstance(v, int | float | np.floating | np.integer):\n logger.report_scalar(\n title=title,\n series=series,\n value=v,\n iteration=step,\n )\n elif isinstance(v, pd.DataFrame):\n logger.report_table(\n title=title,\n series=series,\n table_plot=v,\n iteration=step,\n )\n else:\n logger.warning(\n f'Trainer is attempting to log a value of \"{v}\" of type {type(v)} for key \"{k}\". This '\n f\"invocation of ClearML logger's function is incorrect so this attribute was dropped. \"\n )\n\n def finish(self):\n self._task.close()\n\n\nclass FileLogger:\n def __init__(self, project_name: str, experiment_name: str):\n self.project_name = project_name\n self.experiment_name = experiment_name\n\n self.filepath = os.getenv(\"VERL_FILE_LOGGER_PATH\", None)\n if self.filepath is None:\n root_path = os.path.expanduser(os.getenv(\"VERL_FILE_LOGGER_ROOT\", \".\"))\n directory = os.path.join(root_path, self.project_name)\n os.makedirs(directory, exist_ok=True)\n self.filepath = os.path.join(directory, f\"{self.experiment_name}.jsonl\")\n print(f\"Creating file logger at {self.filepath}\")\n self.fp = open(self.filepath, \"wb\", buffering=0)\n\n def log(self, data, step):\n data = {\"step\": step, \"data\": data}\n self.fp.write(orjson.dumps(data, option=orjson.OPT_SERIALIZE_NUMPY) + b\"\\n\")\n\n def finish(self):\n self.fp.close()\n\n\nclass _TensorboardAdapter:\n def __init__(self, project_name, experiment_name):\n import os\n\n from torch.utils.tensorboard import SummaryWriter\n\n tensorboard_dir = os.environ.get(\"TENSORBOARD_DIR\", f\"tensorboard_log/{project_name}/{experiment_name}\")\n os.makedirs(tensorboard_dir, exist_ok=True)\n print(f\"Saving tensorboard log to {tensorboard_dir}.\")\n self.writer = SummaryWriter(tensorboard_dir)\n\n def log(self, data, step):\n for key in data:\n self.writer.add_scalar(key, data[key], step)\n\n def finish(self):\n self.writer.close()\n\n\nclass _MlflowLoggingAdapter:\n def __init__(self):\n import logging\n import re\n\n self.logger = logging.getLogger(__name__)\n # MLflow metric key validation logic:\n # https://github.com/mlflow/mlflow/blob/master/mlflow/utils/validation.py#L157C12-L157C44\n # Only characters allowed: slashes, alphanumerics, underscores, periods, dashes, colons,\n # and spaces.\n self._invalid_chars_pattern = re.compile(\n r\"[^/\\w.\\- :]\"\n ) # Allowed: slashes, alphanumerics, underscores, periods, dashes, colons, and spaces.\n self._consecutive_slashes_pattern = re.compile(r\"/+\")\n\n def log(self, data, step):\n import mlflow\n\n def sanitize_key(key):\n # First replace @ with _at_ for backward compatibility\n sanitized = key.replace(\"@\", \"_at_\")\n # Replace consecutive slashes with a single slash (MLflow treats them as file paths)\n sanitized = self._consecutive_slashes_pattern.sub(\"/\", sanitized)\n # Then replace any other invalid characters with _\n sanitized = self._invalid_chars_pattern.sub(\"_\", sanitized)\n if sanitized != key:\n self.logger.warning(\n \"[MLflow] Metric key '%s' sanitized to '%s' due to invalid characters.\", key, sanitized\n )\n return sanitized\n\n results = {sanitize_key(k): v for k, v in data.items()}\n mlflow.log_metrics(metrics=results, step=step)\n\n\ndef _compute_mlflow_params_from_objects(params) -> dict[str, Any]:\n if params is None:\n return {}\n\n return _flatten_dict(_transform_params_to_json_serializable(params, convert_list_to_dict=True), sep=\"/\")\n\n\ndef _transform_params_to_json_serializable(x, convert_list_to_dict: bool):\n _transform = partial(_transform_params_to_json_serializable, convert_list_to_dict=convert_list_to_dict)\n\n if dataclasses.is_dataclass(x):\n return _transform(dataclasses.asdict(x))\n if isinstance(x, dict):\n return {k: _transform(v) for k, v in x.items()}\n if isinstance(x, list):\n if convert_list_to_dict:\n return {\"list_len\": len(x)} | {f\"{i}\": _transform(v) for i, v in enumerate(x)}\n else:\n return [_transform(v) for v in x]\n if isinstance(x, Path):\n return str(x)\n if isinstance(x, Enum):\n return x.value\n\n return x\n\n\ndef _flatten_dict(raw: dict[str, Any], *, sep: str) -> dict[str, Any]:\n import pandas as pd\n\n ans = pd.json_normalize(raw, sep=sep).to_dict(orient=\"records\")[0]\n assert isinstance(ans, dict)\n return ans\n\n\n@dataclasses.dataclass\nclass ValidationGenerationsLogger:\n project_name: str = None\n experiment_name: str = None\n\n def log(self, loggers, samples, step):\n if \"wandb\" in loggers:\n self.log_generations_to_wandb(samples, step)\n if \"swanlab\" in loggers:\n self.log_generations_to_swanlab(samples, step)\n if \"mlflow\" in loggers:\n self.log_generations_to_mlflow(samples, step)\n\n if \"clearml\" in loggers:\n self.log_generations_to_clearml(samples, step)\n if \"tensorboard\" in loggers:\n self.log_generations_to_tensorboard(samples, step)\n\n if \"vemlp_wandb\" in loggers:\n self.log_generations_to_vemlp_wandb(samples, step)\n\n def log_generations_to_vemlp_wandb(self, samples, step):\n from volcengine_ml_platform import wandb as vemlp_wandb\n\n self._log_generations_to_wandb(samples, step, vemlp_wandb)\n\n def log_generations_to_wandb(self, samples, step):\n import wandb\n\n self._log_generations_to_wandb(samples, step, wandb)\n\n def _log_generations_to_wandb(self, samples, step, wandb):\n \"\"\"Log samples to wandb as a table\"\"\"\n\n # Create column names for all samples\n columns = [\"step\"] + sum(\n [[f\"input_{i + 1}\", f\"output_{i + 1}\", f\"score_{i + 1}\"] for i in range(len(samples))], []\n )\n\n if not hasattr(self, \"validation_table\"):\n # Initialize the table on first call\n self.validation_table = wandb.Table(columns=columns)\n\n # Create a new table with same columns and existing data\n # Workaround for https://github.com/wandb/wandb/issues/2981#issuecomment-1997445737\n new_table = wandb.Table(columns=columns, data=self.validation_table.data)\n\n # Add new row with all data\n row_data = []\n row_data.append(step)\n for sample in samples:\n row_data.extend(sample)\n\n new_table.add_data(*row_data)\n\n # Update reference and log\n if wandb.run is not None:\n wandb.log({\"val/generations\": new_table}, step=step)\n self.validation_table = new_table\n\n def log_generations_to_swanlab(self, samples, step):\n \"\"\"Log samples to swanlab as text\"\"\"\n import swanlab\n\n swanlab_table = swanlab.echarts.Table()\n\n # Create column names\n headers = [\"step\", \"input\", \"output\", \"score\"]\n\n swanlab_row_list = [[step, *sample] for sample in samples]\n swanlab_table.add(headers=headers, rows=swanlab_row_list)\n\n # Log to swanlab\n swanlab.log({\"val/generations\": swanlab_table}, step=step)\n\n def log_generations_to_mlflow(self, samples, step):\n \"\"\"Log validation generation to mlflow as artifacts\"\"\"\n # https://mlflow.org/docs/latest/api_reference/python_api/mlflow.html?highlight=log_artifact#mlflow.log_artifact\n\n import tempfile\n\n import mlflow\n\n try:\n with tempfile.TemporaryDirectory() as tmp_dir:\n validation_gen_step_file = Path(tmp_dir, f\"val_step{step}.json\")\n row_data = []\n for sample in samples:\n data = {\"input\": sample[0], \"output\": sample[1], \"score\": sample[2]}\n row_data.append(data)\n with open(validation_gen_step_file, \"w\") as file:\n json.dump(row_data, file)\n mlflow.log_artifact(validation_gen_step_file)\n except Exception as e:\n print(f\"WARNING: save validation generation file to mlflow failed with error {e}\")\n\n def log_generations_to_clearml(self, samples, step):\n \"\"\"Log validation generation to clearml as table\"\"\"\n\n import clearml\n import pandas as pd\n\n task: clearml.Task | None = clearml.Task.current_task()\n if task is None:\n return\n\n table = [\n {\n \"step\": step,\n \"input\": sample[0],\n \"output\": sample[1],\n \"score\": sample[2],\n }\n for sample in samples\n ]\n\n logger = task.get_logger()\n logger.report_table(\n series=\"Validation generations\",\n title=\"Validation\",\n table_plot=pd.DataFrame.from_records(table),\n iteration=step,\n )\n\n def log_generations_to_tensorboard(self, samples, step):\n \"\"\"Log samples to tensorboard as text\"\"\"\n # Initialize tensorboard writer if not exists\n if not hasattr(self, \"writer\"):\n from torch.utils.tensorboard import SummaryWriter\n\n # Use the same directory structure as _TensorboardAdapter\n if self.project_name and self.experiment_name:\n default_dir = os.path.join(\"tensorboard_log\", self.project_name, self.experiment_name)\n else:\n default_dir = \"tensorboard_log\"\n\n tensorboard_dir = os.environ.get(\"TENSORBOARD_DIR\", default_dir)\n os.makedirs(tensorboard_dir, exist_ok=True)\n self.writer = SummaryWriter(log_dir=tensorboard_dir)\n\n # Format the samples data into readable text\n text_content = f\"**Generation Results - Step {step}**\\n\\n\"\n\n for i, sample in enumerate(samples):\n text_content += f\"### Sample {i + 1}\\n\"\n\n # Assuming sample contains [input, output, score]\n if len(sample) >= 3:\n input_text, output_text, score = sample[0], sample[1], sample[2]\n\n text_content += f\"**Input:** {input_text}\\n\\n\"\n text_content += f\"**Output:** {output_text}\\n\\n\"\n text_content += f\"**Score:** {score}\\n\\n\"\n else:\n # Handle cases where sample format might be different\n text_content += f\"**Data:** {sample}\\n\\n\"\n\n text_content += \"---\\n\\n\"\n\n # Log to tensorboard as text\n self.writer.add_text(\"val/generations\", text_content, step)\n # Flush to ensure data is written\n self.writer.flush()\n"} {"file_name": "verl__workers__actor__dp_actor.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n# Copyright 2023-2024 SGLang Team\n# Copyright 2025 ModelBest Inc. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nSingle Process Actor\n\"\"\"\n\nimport logging\nimport os\n\nimport torch\nfrom torch import nn\nfrom torch.distributed.fsdp import FullyShardedDataParallel as FSDP\nfrom torch.distributed.tensor import DTensor\n\nimport verl.utils.torch_functional as verl_F\nfrom verl import DataProto\nfrom verl.trainer.ppo.core_algos import agg_loss, get_policy_loss_fn, kl_penalty\nfrom verl.utils.attention_utils import index_first_axis, pad_input, rearrange, unpad_input\nfrom verl.utils.device import get_device_id, get_device_name\nfrom verl.utils.fsdp_utils import FSDPModule, fsdp2_clip_grad_norm_\nfrom verl.utils.profiler import GPUMemoryLogger\nfrom verl.utils.py_functional import append_to_dict\nfrom verl.utils.seqlen_balancing import prepare_dynamic_batch, restore_dynamic_batch\nfrom verl.utils.torch_dtypes import PrecisionType\nfrom verl.utils.torch_functional import logprobs_from_logits\nfrom verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad, ulysses_pad_and_slice_inputs\nfrom verl.workers.actor import BasePPOActor\nfrom verl.workers.config import ActorConfig\n\n__all__ = [\"DataParallelPPOActor\"]\n\nlogger = logging.getLogger(__file__)\nlogger.setLevel(os.getenv(\"VERL_LOGGING_LEVEL\", \"WARN\"))\n\n\nclass DataParallelPPOActor(BasePPOActor):\n \"\"\"FSDP DataParallel PPO Actor or Ref worker\n\n Args:\n config (ActorConfig): Actor config\n actor_module (nn.Module): Actor or ref module\n actor_optimizer (torch.optim.Optimizer, optional): Actor optimizer. Defaults to None.\n \"\"\"\n\n def __init__(self, config: ActorConfig, actor_module: nn.Module, actor_optimizer: torch.optim.Optimizer = None):\n \"\"\"When optimizer is None, it is Reference Policy\"\"\"\n super().__init__(config)\n self.actor_module = actor_module\n self.actor_optimizer = actor_optimizer\n role = \"Ref\" if actor_optimizer is None else \"Actor\"\n\n self.use_remove_padding = self.config.get(\"use_remove_padding\", False)\n if torch.distributed.get_rank() == 0:\n print(f\"{role} use_remove_padding={self.use_remove_padding}\")\n self.use_fused_kernels = self.config.get(\"use_fused_kernels\", False)\n if torch.distributed.get_rank() == 0:\n print(f\"{role} use_fused_kernels={self.use_fused_kernels}\")\n\n self.ulysses_sequence_parallel_size = self.config.ulysses_sequence_parallel_size\n self.use_ulysses_sp = self.ulysses_sequence_parallel_size > 1\n\n self.use_dynamic_bsz = self.config.get(\"use_dynamic_bsz\", False)\n\n self.use_prefix_grouper = self.config.get(\"use_prefix_grouper\", False)\n if torch.distributed.get_rank() == 0:\n print(f\"{role} use_prefix_grouper={self.use_prefix_grouper}\")\n\n if self.config.entropy_from_logits_with_chunking:\n entropy_from_logits = verl_F.entropy_from_logits_with_chunking\n else:\n entropy_from_logits = verl_F.entropy_from_logits\n\n self.compute_entropy_from_logits = (\n torch.compile(entropy_from_logits, dynamic=True)\n if self.config.get(\"use_torch_compile\", True) # use torch compile by default\n else entropy_from_logits\n )\n self.device_name = get_device_name()\n self.param_dtype = PrecisionType.to_dtype(self.config.fsdp_config.get(\"dtype\", \"bfloat16\"))\n if self.param_dtype == torch.float16:\n from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n\n self.scaler = ShardedGradScaler(growth_interval=400)\n else:\n self.scaler = None\n\n # Sum of squared probabilities computation (for optimal_token_baseline)\n # Only initialize if calculate_sum_pi_squared config is enabled\n if self.config.get(\"calculate_sum_pi_squared\", False):\n self.calculate_sum_pi_squared_from_logits = (\n torch.compile(verl_F.calculate_sum_pi_squared_from_logits, dynamic=True)\n if self.config.get(\"use_torch_compile\", True)\n else verl_F.calculate_sum_pi_squared_from_logits\n )\n assert not (self.use_fused_kernels or self.use_prefix_grouper), (\n \"calculate_sum_pi_squared is not supported with \"\n f\"{self.use_fused_kernels=} or {self.use_prefix_grouper=} for now.\"\n )\n\n def _forward_micro_batch(\n self, micro_batch: dict[str, torch.Tensor], temperature: float, calculate_entropy: bool = False\n ) -> dict[str, torch.Tensor]:\n \"\"\"\n Returns:\n dict[str, torch.Tensor]:\n log_probs: (bs, response_len)\n if calculate_entropy is True:\n entropys: (bs, response_len)\n if calculate_sum_pi_squared is False:\n sum_pi_squared: (bs, response_len)\n \"\"\"\n calculate_sum_pi_squared = self.config.get(\"calculate_sum_pi_squared\", False)\n sum_pi_squared_checkpointing = self.config.get(\"sum_pi_squared_checkpointing\", False)\n # PrefixGrouper path for shared-prefix optimization\n if self.use_prefix_grouper:\n can_use_pg = (\n not self.use_remove_padding\n and not self.use_ulysses_sp\n and not self.use_fused_kernels\n and not self.use_dynamic_bsz\n )\n if can_use_pg and \"response_mask\" in micro_batch and \"uid\" in micro_batch:\n from verl.trainer.ppo.prefix_grouper_utils import forward_micro_batch_with_prefix_grouper\n\n return forward_micro_batch_with_prefix_grouper(\n micro_batch=micro_batch,\n model=self.actor_module,\n temperature=temperature,\n calculate_entropy=calculate_entropy,\n device_name=self.device_name,\n param_dtype=self.param_dtype,\n use_chunking_entropy=self.config.get(\"entropy_from_logits_with_chunking\", False),\n )\n\n response_length = micro_batch[\"responses\"].size(-1)\n multi_modal_inputs = {}\n if \"multi_modal_inputs\" in micro_batch.keys():\n from verl.utils.model import extract_multi_modal_inputs\n\n multi_modal_inputs = extract_multi_modal_inputs(micro_batch[\"multi_modal_inputs\"])\n\n with torch.autocast(device_type=self.device_name, dtype=self.param_dtype):\n input_ids = micro_batch[\"input_ids\"]\n batch_size, seqlen = input_ids.shape\n attention_mask = micro_batch[\"attention_mask\"]\n position_ids = micro_batch[\"position_ids\"]\n entropy = None\n if position_ids.dim() == 3: # qwen2vl mrope\n position_ids = position_ids.transpose(0, 1) # (bsz, 4, seqlen) -> (4, bsz, seqlen)\n\n if self.use_remove_padding:\n input_ids_rmpad, indices, cu_seqlens, *_ = unpad_input(\n input_ids.unsqueeze(-1), attention_mask\n ) # input_ids_rmpad (total_nnz, ...)\n input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz)\n\n # unpad the position_ids to align the rotary\n if position_ids.dim() == 3:\n position_ids_rmpad = (\n index_first_axis(rearrange(position_ids, \"c b s ... -> (b s) c ...\"), indices)\n .transpose(0, 1)\n .unsqueeze(1)\n ) # (4, bsz, seqlen) -> (4, 1, bsz * seqlen)\n else:\n position_ids_rmpad = index_first_axis(\n rearrange(position_ids.unsqueeze(-1), \"b s ... -> (b s) ...\"), indices\n ).transpose(0, 1)\n\n is_mask_all_zero = attention_mask.sum() == 0\n if is_mask_all_zero:\n input_ids_rmpad = torch.zeros(\n (1, self.ulysses_sequence_parallel_size),\n device=input_ids.device,\n dtype=input_ids.dtype,\n )\n if position_ids.dim() == 3:\n position_ids_rmpad = torch.zeros(\n (position_ids.shape[0], 1, self.ulysses_sequence_parallel_size),\n device=position_ids.device,\n dtype=position_ids.dtype,\n )\n else:\n position_ids_rmpad = torch.zeros(\n (1, self.ulysses_sequence_parallel_size),\n device=position_ids.device,\n dtype=position_ids.dtype,\n )\n\n if \"image_bound\" in multi_modal_inputs:\n from verl.utils.dataset.vision_utils import process_multi_modal_inputs_for_minicpmo\n\n multi_modal_inputs = process_multi_modal_inputs_for_minicpmo(\n input_ids, attention_mask, position_ids, cu_seqlens, multi_modal_inputs\n )\n\n # for compute the log_prob\n input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz)\n\n # pad and slice the inputs if sp > 1\n if self.use_ulysses_sp:\n is_vlm_model = hasattr(\n getattr(self.actor_module, \"module\", self.actor_module).config, \"vision_config\"\n )\n if is_vlm_model:\n # vlm model's inputs will be sliced after embedding\n input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad(\n input_ids_rmpad,\n position_ids_rmpad=position_ids_rmpad,\n sp_size=self.ulysses_sequence_parallel_size,\n )\n else:\n input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs(\n input_ids_rmpad,\n position_ids_rmpad=position_ids_rmpad,\n sp_size=self.ulysses_sequence_parallel_size,\n )\n input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs(\n input_ids_rmpad_rolled,\n position_ids_rmpad=None,\n sp_size=self.ulysses_sequence_parallel_size,\n )\n\n input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad)\n\n # only pass input_ids and position_ids to enable flash_attn_varlen\n extra_args = {}\n if self.use_fused_kernels:\n extra_args[\"temperature\"] = temperature\n extra_args[\"return_dict\"] = True\n\n output = self.actor_module(\n input_ids=input_ids_rmpad,\n attention_mask=None,\n position_ids=position_ids_rmpad,\n **multi_modal_inputs,\n use_cache=False,\n **extra_args,\n ) # prevent model thinks we are generating\n\n if self.use_fused_kernels:\n log_probs = output.log_probs.squeeze(0) # (total_nnz,)\n entropy_rmpad = output.entropy.squeeze(0) # (total_nnz,)\n\n else:\n logits_rmpad = output.logits.squeeze(0) # (total_nnz, vocab_size)\n logits_rmpad.div_(temperature)\n\n # if use_sp: ((total_nnz / sp) + pad) ; if not use_sp: (batch, seqlen)\n inplace_backward = True\n if calculate_entropy:\n inplace_backward = False\n log_probs = logprobs_from_logits(\n logits=logits_rmpad,\n labels=input_ids_rmpad_rolled,\n inplace_backward=inplace_backward,\n )\n\n # compute entropy\n if calculate_entropy:\n # ((total_nnz / sp) + pad)\n entropy_rmpad = (\n self.compute_entropy_from_logits(logits_rmpad)\n if not self.config.entropy_checkpointing\n else torch.utils.checkpoint.checkpoint(self.compute_entropy_from_logits, logits_rmpad)\n )\n\n # Compute sum_pi_squared if requested (for optimal_token_baseline)\n if calculate_sum_pi_squared:\n sum_pi_squared_rmpad = (\n self.calculate_sum_pi_squared_from_logits(logits_rmpad)\n if not sum_pi_squared_checkpointing\n else torch.utils.checkpoint.checkpoint(\n self.calculate_sum_pi_squared_from_logits, logits_rmpad\n )\n )\n\n # gather log_prob if sp > 1\n if self.use_ulysses_sp:\n # gather and unpad for the ulysses sp\n log_probs = gather_outputs_and_unpad(\n log_probs,\n gather_dim=0,\n unpad_dim=0,\n padding_size=pad_size,\n )\n if calculate_entropy:\n entropy_rmpad = gather_outputs_and_unpad(\n entropy_rmpad,\n gather_dim=0,\n unpad_dim=0,\n padding_size=pad_size,\n )\n if calculate_sum_pi_squared:\n sum_pi_squared_rmpad = gather_outputs_and_unpad(\n sum_pi_squared_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size\n )\n\n if is_mask_all_zero:\n log_probs = log_probs[:0]\n if calculate_entropy:\n entropy_rmpad = entropy_rmpad[:0]\n\n # pad back to (bsz, seqlen)\n if calculate_entropy:\n full_entropy = pad_input(\n hidden_states=entropy_rmpad.unsqueeze(-1),\n indices=indices,\n batch=batch_size,\n seqlen=seqlen,\n )\n if calculate_sum_pi_squared:\n full_sum_pi_squared = pad_input(\n hidden_states=sum_pi_squared_rmpad.unsqueeze(-1),\n indices=indices,\n batch=batch_size,\n seqlen=seqlen,\n )\n full_log_probs = pad_input(\n hidden_states=log_probs.unsqueeze(-1),\n indices=indices,\n batch=batch_size,\n seqlen=seqlen,\n )\n\n # only return response part:\n if calculate_entropy:\n entropy = full_entropy.squeeze(-1)[:, -response_length - 1 : -1] # (bsz, response_length)\n if calculate_sum_pi_squared:\n # (bsz, response_length)\n sum_pi_squared = full_sum_pi_squared.squeeze(-1)[:, -response_length - 1 : -1]\n log_probs = full_log_probs.squeeze(-1)[:, -response_length - 1 : -1] # (bsz, response_length)\n\n else: # not using rmpad and no ulysses sp\n extra_args = {}\n if self.use_fused_kernels:\n extra_args[\"temperature\"] = temperature\n extra_args[\"return_dict\"] = True\n\n output = self.actor_module(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n **multi_modal_inputs,\n use_cache=False,\n **extra_args,\n ) # prevent model thinks we are generating\n\n if self.use_fused_kernels:\n log_probs = output.log_probs[:, -response_length - 1 : -1]\n entropy = output.entropy[:, -response_length - 1 : -1] # (bsz, response_length)\n\n else:\n logits = output.logits\n\n logits.div_(temperature)\n logits = logits[:, -response_length - 1 : -1, :] # (bsz, response_length, vocab_size)\n log_probs = logprobs_from_logits(logits, micro_batch[\"responses\"])\n if calculate_entropy:\n if not self.config.entropy_checkpointing:\n entropy = verl_F.entropy_from_logits(logits) # (bsz, response_length)\n else:\n entropy = torch.utils.checkpoint.checkpoint(verl_F.entropy_from_logits, logits)\n # Compute sum_pi_squared if requested (for optimal_token_baseline)\n if calculate_sum_pi_squared:\n sum_pi_squared = (\n self.calculate_sum_pi_squared_from_logits(logits)\n if not sum_pi_squared_checkpointing\n else torch.utils.checkpoint.checkpoint(self.calculate_sum_pi_squared_from_logits, logits)\n )\n\n outputs = {\"log_probs\": log_probs}\n if calculate_entropy:\n outputs[\"entropys\"] = entropy\n if calculate_sum_pi_squared:\n outputs[\"sum_pi_squared\"] = sum_pi_squared\n return outputs\n\n def _optimizer_step(self):\n assert self.config.grad_clip is not None\n if self.scaler is not None:\n self.scaler.unscale_(self.actor_optimizer)\n if isinstance(self.actor_module, FSDP):\n grad_norm = self.actor_module.clip_grad_norm_(max_norm=self.config.grad_clip)\n elif isinstance(self.actor_module, FSDPModule):\n grad_norm = fsdp2_clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip)\n else:\n grad_norm = torch.nn.utils.clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip)\n\n if isinstance(grad_norm, DTensor):\n grad_norm = grad_norm.full_tensor()\n\n # if grad_norm is not finite, skip the update\n if self.scaler is not None:\n self.scaler.step(self.actor_optimizer)\n self.scaler.update()\n else:\n if not torch.isfinite(grad_norm):\n print(f\"WARN: rank {torch.distributed.get_rank()} grad_norm is not finite: {grad_norm}\")\n self.actor_optimizer.zero_grad()\n else:\n self.actor_optimizer.step()\n\n # Clear cached weight scales for QAT (weights changed)\n if getattr(self.actor_module, \"_qat_fuse_enabled\", False):\n from verl.utils.qat import invalidate_all_scales\n\n invalidate_all_scales(self.actor_module)\n\n return grad_norm\n\n @GPUMemoryLogger(role=\"dp actor\", logger=logger)\n def compute_log_prob(self, data: DataProto, calculate_entropy: bool = False) -> dict[str, torch.Tensor]:\n \"\"\"Compute the log probability of the responses given input_ids, attention_mask and position_ids\n\n Args:\n data (DataProto): a DataProto containing keys\n\n ``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the\n concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``.\n\n ``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64.\n\n ``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64.\n\n ``responses``: tensor of shape [batch_size, response_length]. torch.int64.\n\n Returns:\n dict[str, torch.Tensor]: a dict containing keys\n - ``log_probs``: tensor of shape [batch_size, response_length]. torch.float32.\n - ``entropys``: tensor of shape [batch_size, response_length]. torch.float32.\n - ``sum_pi_squared``: tensor of shape [batch_size, response_length]. torch.float32.\n \"\"\"\n calculate_sum_pi_squared = self.config.get(\"calculate_sum_pi_squared\", False)\n\n # set to eval\n self.actor_module.eval()\n\n micro_batch_size = data.meta_info[\"micro_batch_size\"]\n temperature = data.meta_info[\"temperature\"] # temperature must be in the data.meta_info to avoid silent error\n use_dynamic_bsz = data.meta_info[\"use_dynamic_bsz\"]\n pad_token_id = data.meta_info.get(\"pad_token_id\", 0)\n has_multi_modal_inputs = \"multi_modal_inputs\" in data.non_tensor_batch.keys()\n\n select_keys = [\"responses\", \"input_ids\", \"attention_mask\", \"position_ids\"]\n non_tensor_select_keys = [\"multi_modal_inputs\"] if has_multi_modal_inputs else []\n if self.use_prefix_grouper:\n select_keys += [k for k in [\"prompts\", \"response_mask\"] if k in data.batch]\n if \"uid\" in data.non_tensor_batch:\n non_tensor_select_keys.append(\"uid\")\n\n data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys)\n\n if use_dynamic_bsz:\n max_token_len = data.meta_info[\"max_token_len\"] * self.ulysses_sequence_parallel_size\n micro_batches, batch_idx_list = prepare_dynamic_batch(data, max_token_len=max_token_len)\n else:\n micro_batches = data.split(micro_batch_size)\n\n log_probs_lst = []\n entropy_lst = []\n sum_pi_squared_lst = []\n for micro_batch in micro_batches:\n micro_batch = micro_batch.to(get_device_id())\n model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch, \"pad_token_id\": pad_token_id}\n with torch.no_grad():\n outputs = self._forward_micro_batch(\n model_inputs, temperature=temperature, calculate_entropy=calculate_entropy\n )\n log_probs_lst.append(outputs[\"log_probs\"])\n if calculate_entropy:\n entropy_lst.append(outputs[\"entropys\"])\n if calculate_sum_pi_squared:\n sum_pi_squared_lst.append(outputs[\"sum_pi_squared\"])\n\n log_probs = torch.concat(log_probs_lst, dim=0)\n if calculate_entropy:\n entropys = torch.concat(entropy_lst, dim=0)\n if calculate_sum_pi_squared:\n sum_pi_squared = torch.concat(sum_pi_squared_lst, dim=0)\n\n if use_dynamic_bsz:\n log_probs = restore_dynamic_batch(log_probs, batch_idx_list)\n if calculate_entropy:\n entropys = restore_dynamic_batch(entropys, batch_idx_list)\n if calculate_sum_pi_squared:\n sum_pi_squared = restore_dynamic_batch(sum_pi_squared, batch_idx_list)\n\n outputs = {\"log_probs\": log_probs}\n if calculate_entropy:\n outputs[\"entropys\"] = entropys\n if calculate_sum_pi_squared:\n outputs[\"sum_pi_squared\"] = sum_pi_squared\n return outputs\n\n @GPUMemoryLogger(role=\"dp actor\", logger=logger)\n def update_policy(self, data: DataProto):\n # make sure we are in training mode\n self.actor_module.train()\n\n temperature = data.meta_info[\"temperature\"] # temperature must be in the data.meta_info to avoid silent error\n pad_token_id = data.meta_info.get(\"pad_token_id\", 0)\n\n select_keys = [\n \"responses\",\n \"response_mask\",\n \"input_ids\",\n \"attention_mask\",\n \"position_ids\",\n \"old_log_probs\",\n \"advantages\",\n ]\n if self.use_prefix_grouper and \"prompts\" in data.batch.keys():\n select_keys.append(\"prompts\")\n if self.config.use_kl_loss:\n select_keys.append(\"ref_log_prob\")\n # Include pre-computed IS weights if present in batch\n # Weights are computed centrally in trainer and added to batch when algorithm.rollout_is=True\n if \"rollout_is_weights\" in data.batch.keys():\n select_keys.append(\"rollout_is_weights\")\n # Include rollout_log_probs for computing rollout_corr metrics in bypass mode\n if \"rollout_log_probs\" in data.batch.keys():\n select_keys.append(\"rollout_log_probs\")\n\n has_multi_modal_inputs = \"multi_modal_inputs\" in data.non_tensor_batch.keys()\n non_tensor_select_keys = []\n if has_multi_modal_inputs:\n non_tensor_select_keys.append(\"multi_modal_inputs\")\n if self.use_prefix_grouper and \"uid\" in data.non_tensor_batch.keys():\n non_tensor_select_keys.append(\"uid\")\n\n data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys)\n\n # Split to make minibatch iterator for updating the actor\n # See PPO paper for details. https://arxiv.org/abs/1707.06347\n mini_batches = data.split(self.config.ppo_mini_batch_size)\n\n on_policy = len(mini_batches) == 1 and self.config.ppo_epochs == 1\n\n metrics = {\n \"actor/pg_loss\": 0.0,\n \"actor/kl_loss\": 0.0,\n }\n for _ in range(self.config.ppo_epochs):\n for batch_idx, mini_batch in enumerate(mini_batches):\n if self.config.use_dynamic_bsz:\n max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size\n micro_batches, _ = prepare_dynamic_batch(mini_batch, max_token_len=max_token_len)\n else:\n self.gradient_accumulation = (\n self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu\n )\n micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu)\n\n self.actor_optimizer.zero_grad()\n\n for micro_batch in micro_batches:\n micro_batch = micro_batch.to(get_device_id())\n micro_batch_metrics = {}\n model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch, \"pad_token_id\": pad_token_id}\n response_mask = model_inputs[\"response_mask\"]\n old_log_prob = model_inputs[\"old_log_probs\"]\n advantages = model_inputs[\"advantages\"]\n\n entropy_coeff = self.config.entropy_coeff\n loss_agg_mode = self.config.loss_agg_mode\n\n calculate_entropy = self.config.calculate_entropy or (entropy_coeff != 0)\n\n if self.config.use_dynamic_bsz:\n loss_scale_factor = response_mask.shape[0] / self.config.ppo_mini_batch_size\n else:\n loss_scale_factor = 1 / self.gradient_accumulation\n\n # all return: (bsz, response_length)\n outputs = self._forward_micro_batch(\n model_inputs, temperature=temperature, calculate_entropy=calculate_entropy\n )\n log_prob = outputs[\"log_probs\"]\n entropy = outputs[\"entropys\"] if calculate_entropy else None\n\n # for fully_async_policy\n if hasattr(self.config, \"use_rollout_log_probs\") and self.config.use_rollout_log_probs:\n old_log_prob = model_inputs[\"old_log_probs\"]\n else:\n if on_policy:\n old_log_prob = log_prob.detach()\n else:\n old_log_prob = model_inputs[\"old_log_probs\"]\n\n loss_mode = self.config.policy_loss.get(\"loss_mode\", \"vanilla\")\n # vanilla -> verl.trainer.ppo.core_algos.compute_policy_loss_vanilla\n\n # Extract pre-computed rollout correction weights if present\n # Weights are computed centrally in trainer and added when algorithm.rollout_is=True\n rollout_is_weights = model_inputs.get(\"rollout_is_weights\", None)\n\n # gpg -> verl.trainer.ppo.core_algos.compute_policy_loss_gpg\n # clip_cov -> verl.trainer.ppo.core_algos.compute_policy_loss_clip_cov\n policy_loss_fn = get_policy_loss_fn(loss_mode)\n\n # Compute policy loss (any function is expected to return 2 values)\n pg_loss, pg_metrics = policy_loss_fn(\n old_log_prob=old_log_prob,\n log_prob=log_prob,\n advantages=advantages,\n response_mask=response_mask,\n loss_agg_mode=loss_agg_mode,\n config=self.config,\n rollout_is_weights=rollout_is_weights,\n )\n micro_batch_metrics.update(pg_metrics)\n\n # Skip if using bypass_mode loss (metrics already computed in pg_metrics)\n rollout_log_prob = model_inputs.get(\"rollout_log_probs\", None)\n if loss_mode != \"bypass_mode\" and rollout_log_prob is not None:\n # Compute metrics using CURRENT policy π_θ vs π_rollout\n # Tracks evolving off-policy gap as π_θ updates during mini-batch training\n from verl.trainer.ppo.rollout_corr_helper import compute_rollout_corr_metrics_from_logprobs\n\n rollout_corr_metrics = compute_rollout_corr_metrics_from_logprobs(\n log_prob=log_prob,\n rollout_log_prob=rollout_log_prob,\n response_mask=response_mask,\n )\n micro_batch_metrics.update(rollout_corr_metrics)\n\n policy_loss = pg_loss\n if calculate_entropy and entropy is not None:\n entropy_agg = agg_loss(loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode)\n micro_batch_metrics[\"actor/entropy\"] = entropy_agg.detach().item()\n if entropy_coeff != 0:\n policy_loss -= entropy_agg * entropy_coeff\n\n if self.config.use_kl_loss:\n ref_log_prob = model_inputs[\"ref_log_prob\"]\n # compute kl loss\n kld = kl_penalty(\n logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=self.config.kl_loss_type\n )\n kl_loss = agg_loss(loss_mat=kld, loss_mask=response_mask, loss_agg_mode=loss_agg_mode)\n\n policy_loss = policy_loss + kl_loss * self.config.kl_loss_coef\n metrics[\"actor/kl_loss\"] += kl_loss.detach().item() * loss_scale_factor\n micro_batch_metrics[\"actor/kl_coef\"] = self.config.kl_loss_coef\n\n if self.config.use_dynamic_bsz:\n # relative to the dynamic bsz\n loss = policy_loss * loss_scale_factor\n else:\n loss = policy_loss * loss_scale_factor\n if self.scaler is not None:\n self.scaler.scale(loss).backward()\n else:\n loss.backward()\n\n metrics[\"actor/pg_loss\"] += pg_loss.detach().item() * loss_scale_factor\n append_to_dict(metrics, micro_batch_metrics)\n\n grad_norm = self._optimizer_step()\n mini_batch_metrics = {\"actor/grad_norm\": grad_norm.detach().item()}\n append_to_dict(metrics, mini_batch_metrics)\n self.actor_optimizer.zero_grad()\n return metrics\n"} {"file_name": "verl__workers__config__model.py", "text": "# Copyright 2025 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dataclasses import dataclass, field\nfrom typing import Any, Optional\n\nfrom omegaconf import MISSING\nfrom transformers import AutoConfig\n\nfrom verl.base_config import BaseConfig\nfrom verl.utils import hf_processor, hf_tokenizer\nfrom verl.utils.fs import copy_to_local\nfrom verl.utils.import_utils import import_external_libs\nfrom verl.utils.model import get_generation_config, update_model_config\n\n__all__ = [\"HFModelConfig\", \"MtpConfig\"]\n\n\n@dataclass\nclass MtpConfig(BaseConfig):\n \"\"\"\n Configuration for MTP model.\n\n enable: Enable loading and saving of MTP parameters, but do not use them\n\n enable_train: Whether to enable using MTP parameters during training\n enable_rollout: Whether to enable using MTP parameters during rollout\n\n Training parameters:\n detach_encoder: Whether to detach encoder parameters during MTP training\n mtp_loss_scaling_factor: Loss scaling factor during MTP training\n\n vLLM rollout parameters:\n method: \"mtp\"\n num-speculative-tokens: 1\n\n SGLang rollout parameters:\n speculative-algorithm: EAGLE\n speculative-num-steps: 3\n speculative-eagle-topk: 1\n speculative-num-draft-tokens: 4\n \"\"\"\n\n enable: bool = False\n enable_train: bool = False\n enable_rollout: bool = False\n\n detach_encoder: bool = False\n mtp_loss_scaling_factor: float = 0.1\n\n speculative_algorithm: str = \"EAGLE\"\n speculative_num_steps: int = 3\n speculative_eagle_topk: int = 1\n speculative_num_draft_tokens: int = 4\n\n method: str = \"mtp\"\n num_speculative_tokens: int = 1\n\n\n@dataclass\nclass HFModelConfig(BaseConfig):\n # note that we separate model_path, model_config_path and tokenizer_path in case they are different\n _mutable_fields = {\n \"hf_config_path\",\n \"tokenizer_path\",\n \"hf_config\",\n \"generation_config\",\n \"tokenizer\",\n \"processor\",\n \"local_path\",\n \"architectures\",\n \"local_hf_config_path\",\n \"local_tokenizer_path\",\n }\n\n path: str = MISSING\n local_path: Optional[str] = None\n hf_config_path: Optional[str] = None\n local_hf_config_path: Optional[str] = None\n tokenizer_path: Optional[str] = None\n local_tokenizer_path: Optional[str] = None\n\n # whether to load tokenizer. This is useful when we only want to load model config\n load_tokenizer: bool = True\n\n hf_config: Any = None\n generation_config: Any = None\n tokenizer: Any = None\n processor: Any = None\n\n # whether to use shared memory\n use_shm: bool = False\n trust_remote_code: bool = False\n\n # custom chat template for the model\n custom_chat_template: Optional[str] = None\n\n external_lib: Optional[str] = None\n\n override_config: dict = field(default_factory=dict)\n\n enable_gradient_checkpointing: bool = True\n enable_activation_offload: bool = False\n\n use_remove_padding: bool = True\n\n # TODO: unify fsdp and megatron lora config\n # fsdp lora related. We may setup a separate config later\n lora_rank: int = 0\n lora_alpha: int = 16\n target_modules: Optional[Any] = \"all-linear\" # allow both \"all-linear\" and [\"q_proj\",\"k_proj\"]\n target_parameters: Optional[list[str]] = None # for lora adapter on nn.Parameter\n\n exclude_modules: Optional[str] = None\n\n # megatron lora config\n lora: dict[str, Any] = field(default_factory=dict)\n\n # path to pre-trained LoRA adapter to load for continued training\n lora_adapter_path: Optional[str] = None\n use_liger: bool = False\n\n use_fused_kernels: bool = False\n fused_kernel_options: dict = field(default_factory=dict)\n\n # TiledMLP configuration for memory-efficient MLP computation\n tiled_mlp: dict = field(default_factory=lambda: {\"enabled\": False, \"num_shards\": 4})\n\n architectures: Optional[list[str]] = None\n\n mtp: MtpConfig = field(default_factory=MtpConfig)\n\n def __post_init__(self):\n import_external_libs(self.external_lib)\n\n if self.hf_config_path is None:\n self.hf_config_path = self.path\n if self.tokenizer_path is None:\n self.tokenizer_path = self.path\n\n self.local_path = copy_to_local(self.path, use_shm=self.use_shm)\n\n # construct tokenizer\n if self.load_tokenizer:\n self.local_tokenizer_path = copy_to_local(self.tokenizer_path, use_shm=self.use_shm)\n self.tokenizer = hf_tokenizer(self.local_tokenizer_path, trust_remote_code=self.trust_remote_code)\n self.processor = hf_processor(self.local_tokenizer_path, trust_remote_code=self.trust_remote_code)\n\n if self.custom_chat_template is not None:\n if self.processor is not None:\n self.processor.chat_template = self.custom_chat_template\n else:\n self.tokenizer.chat_template = self.custom_chat_template\n\n self.local_hf_config_path = copy_to_local(self.hf_config_path, use_shm=self.use_shm)\n self.generation_config = get_generation_config(\n self.local_hf_config_path, trust_remote_code=self.trust_remote_code\n )\n\n # construct hf_config\n attn_implementation = self.override_config.get(\"attn_implementation\", \"flash_attention_2\")\n self.hf_config = AutoConfig.from_pretrained(\n self.local_hf_config_path, trust_remote_code=self.trust_remote_code, attn_implementation=attn_implementation\n )\n\n override_config_kwargs = {}\n\n if self.tokenizer is not None:\n override_config_kwargs.update(\n {\n \"bos_token_id\": self.tokenizer.bos_token_id,\n \"eos_token_id\": self.tokenizer.eos_token_id,\n \"pad_token_id\": self.tokenizer.pad_token_id,\n }\n )\n\n # TODO: (vermouth1992). self.config.model in megatron differs from that of fsdp in the override_config.\n override_config = (\n self.override_config[\"model_config\"] if \"model_config\" in self.override_config else self.override_config\n )\n override_config_kwargs.update(override_config)\n update_model_config(self.hf_config, override_config_kwargs=override_config_kwargs)\n\n self.share_embeddings_and_output_weights = getattr(self.hf_config, \"tie_word_embeddings\", False)\n\n # get model architectures\n self.architectures = getattr(self.hf_config, \"architectures\", None)\n assert self.architectures is not None and len(self.architectures) == 1, (\n \"Expect only one architecture, got {}\".format(self.architectures)\n )\n\n # per model patch\n if getattr(self.hf_config, \"model_type\", None) == \"kimi_vl\":\n self.hf_config.text_config.topk_method = \"greedy\"\n\n # Ensure target_modules is a str or list[str] (only if not None)\n if self.target_modules is not None:\n if not isinstance(self.target_modules, (str | list)):\n raise TypeError(\n \"target_modules must be a string or a list of strings, \"\n f\"but got {type(self.target_modules).__name__}\"\n )\n if isinstance(self.target_modules, list):\n for x in self.target_modules:\n if not isinstance(x, str):\n raise TypeError(\n f\"All elements in target_modules list must be strings, but found {type(x).__name__}\"\n )\n\n def get_processor(self):\n return self.processor if self.processor is not None else self.tokenizer\n"} {"file_name": "verl__workers__rollout__utils.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport asyncio\nimport logging\nimport os\n\nimport uvicorn\nfrom fastapi import FastAPI\n\nfrom verl.utils.net_utils import get_free_port\n\nlogger = logging.getLogger(__file__)\n\n\ndef get_max_position_embeddings(hf_config) -> int:\n max_len = getattr(hf_config, \"max_position_embeddings\", None)\n if max_len is None:\n text_config = getattr(hf_config, \"text_config\", None)\n if text_config is not None:\n max_len = getattr(text_config, \"max_position_embeddings\", None)\n\n if max_len is None:\n raise ValueError(\"max_position_embeddings not found in HFModelConfig!\")\n return int(max_len)\n\n\nasync def run_unvicorn(app: FastAPI, server_args, server_address, max_retries=5) -> tuple[int, asyncio.Task]:\n server_port, server_task = None, None\n\n for i in range(max_retries):\n try:\n server_port, sock = get_free_port(server_address)\n app.server_args = server_args\n config = uvicorn.Config(app, host=server_address, port=server_port, log_level=\"warning\")\n server = uvicorn.Server(config)\n server.should_exit = True\n await server.serve()\n server_task = asyncio.create_task(server.main_loop())\n break\n except (OSError, SystemExit) as e:\n logger.error(f\"Failed to start HTTP server on port {server_port} at try {i}, error: {e}\")\n else:\n logger.error(f\"Failed to start HTTP server after {max_retries} retries, exiting...\")\n os._exit(-1)\n\n logger.info(f\"HTTP server started on port {server_port}\")\n return server_port, server_task\n\n\nasync def ensure_async_iterator(iterable):\n \"\"\"Convert an iterable to an async iterator.\"\"\"\n if hasattr(iterable, \"__aiter__\"):\n async for item in iterable:\n yield item\n else:\n for item in iterable:\n yield item\n"} {"file_name": "verl__workers__rollout__vllm_rollout__utils.py", "text": "# Copyright 2024 Bytedance Ltd. and/or its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport ctypes\nimport gc\nimport json\nimport logging\nimport os\nimport platform\nimport signal\nimport threading\nfrom multiprocessing import shared_memory\nfrom types import MethodType\nfrom typing import Any, Callable, Literal, TypedDict, get_args\n\nimport torch\nimport zmq\n\nfrom verl.utils.device import get_torch_device, is_npu_available\nfrom verl.utils.vllm import TensorLoRARequest, VLLMHijack\nfrom verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader\nfrom verl.utils.vllm.vllm_fp8_utils import apply_vllm_fp8_patches, is_fp8_model, load_quanted_weights\n\nlogger = logging.getLogger(__file__)\nlogger.setLevel(os.getenv(\"VERL_LOGGING_LEVEL\", \"WARN\"))\n\n# magic numbers that ensure we are using the same LoRA adapter during the rollout and training process\nVLLM_LORA_INT_ID = 123\nVLLM_LORA_NAME = \"123\"\nVLLM_LORA_PATH = \"simon_lora_path\"\n\nVLLM_ASCEND_REQUIRED_ENV_VARS = {\"VLLM_ALL2ALL_BACKEND\": \"flashinfer_all2allv\", \"VLLM_ASCEND_ENABLE_NZ\": \"0\"}\n\n\ndef set_death_signal():\n \"\"\"Kill the current process when the parent process exits.\"\"\"\n if platform.system() != \"Linux\":\n return\n libc = ctypes.CDLL(\"libc.so.6\")\n libc.prctl(1, signal.SIGKILL)\n if os.getppid() == 1:\n os.kill(os.getpid(), signal.SIGKILL)\n\n\ndef get_device_uuid(device_id: int) -> str:\n from vllm.platforms import current_platform\n\n # Convert torch.npu.current_device to its corresponding ASCEND_RT_VISIBLE_DEVICES.\n if is_npu_available:\n if os.getenv(\"ASCEND_RT_VISIBLE_DEVICES\") is not None:\n npu_visible_devices = os.environ[\"ASCEND_RT_VISIBLE_DEVICES\"].split(\",\")\n assert device_id < len(npu_visible_devices), f\"device_id {device_id} must less than {npu_visible_devices}\"\n return \"NPU-\" + npu_visible_devices[device_id]\n else:\n return f\"NPU-{device_id}\"\n else:\n return current_platform.get_device_uuid(device_id)\n\n\ndef get_vllm_max_lora_rank(lora_rank: int):\n \"\"\"\n For vLLM, automatically adjusts the `max_lora_rank` to the nearest allowed value.\n The allowed values are retrieved from vLLM's MaxLoRARanks type definition.\n \"\"\"\n assert lora_rank > 0, f\"lora_rank must be greater than 0, get {lora_rank}\"\n\n try:\n from vllm.config.lora import MaxLoRARanks\n except Exception:\n # FIXME: migrate vllm version https://github.com/vllm-project/vllm/blob/main/vllm/config/lora.py#L25\n MaxLoRARanks = Literal[1, 8, 16, 32, 64, 128, 256, 320, 512]\n\n vllm_max_lora_ranks = sorted(get_args(MaxLoRARanks))\n if lora_rank > vllm_max_lora_ranks[-1]:\n raise ValueError(f\"lora_rank must be less than or equal to {vllm_max_lora_ranks[-1]}, but got {lora_rank}\")\n\n for rank in vllm_max_lora_ranks:\n if lora_rank <= rank:\n return rank\n\n\n# https://github.com/vllm-project/vllm/issues/13175\ndef monkey_patch_compute_logits(model, vocab_size: int):\n original_compute_logits = model.compute_logits\n\n def compute_logits(\n self,\n *args,\n **kwargs,\n ) -> torch.Tensor:\n logits = original_compute_logits(*args, **kwargs)\n logits[..., vocab_size:] = float(\"-inf\")\n return logits\n\n model.compute_logits = MethodType(compute_logits, model)\n\n\n# copy from https://github.com/vllm-project/vllm/blob/main/examples/offline_inference/rlhf_utils.py\ndef rebuild_ipc(handle: tuple[Callable, tuple], device_id: int | None = None) -> torch.Tensor:\n func, args = handle\n list_args = list(args)\n if device_id is not None:\n # the key is to change device id to the current device id\n # in case two processes have different CUDA_VISIBLE_DEVICES\n list_args[6] = device_id\n buffer = func(*list_args)\n return buffer\n\n\ndef create_shared_memory(size: int, name: str):\n \"\"\"Create shared memory for weight transfer. If already exists, attach to it.\"\"\"\n try:\n shm = shared_memory.SharedMemory(name=name, create=True, size=size)\n except FileExistsError:\n shm = shared_memory.SharedMemory(name=name)\n return shm\n\n\ndef rebuild_shared_memory(name: str, size: int, dtype=torch.uint8):\n \"\"\"Rebuild tensor from shared memory.\"\"\"\n shm = shared_memory.SharedMemory(name=name)\n tensor = torch.frombuffer(shm.buf[:size], dtype=dtype)\n\n return tensor, shm\n\n\nclass TensorMetadata(TypedDict):\n name: str\n shape: torch.Size\n dtype: torch.dtype\n offset: int\n\n\nclass vLLMColocateWorkerExtension:\n \"\"\"\n The class for vLLM's worker to inherit from, in the colocate setting.\n By defining an extension class, the code can work no matter what is\n the underlying worker class. This way, the code can be compatible\n with both vLLM V0 and V1.\n NOTE: we define this class in a separate module, and the main module\n should pass the full qualified name as `worker_extension_cls` argument.\n\n Feature support:\n 1. LoRA\n 2. Online FP8 quantization\n \"\"\"\n\n def __new__(cls, **kwargs):\n set_death_signal()\n\n # 1. patch for Lora\n VLLMHijack.hijack()\n # 2. patch online fp8 quant\n if os.environ.get(\"VERL_VLLM_FP8_QUANT_ENABLED\", \"0\") == \"1\":\n apply_vllm_fp8_patches()\n # 3. patch QAT (compressed-tensors NVFP4) for dynamic weight loading\n vllm_config = kwargs.get(\"vllm_config\")\n quant_config = getattr(vllm_config, \"quant_config\", None) if vllm_config else None\n _is_qat_model = getattr(quant_config, \"quant_format\", None) == \"nvfp4-pack-quantized\"\n if _is_qat_model:\n from verl.utils.qat import apply_qat_patches\n\n apply_qat_patches()\n logger.info(\"Applied QAT patches in vLLM worker subprocess\")\n\n # TODO: For ascend NPU, when the corresponding vllm-ascend version is upgraded to v0.13.0,\n # please remove the VLLM_ASCEND_REQUIRED_ENV_VARS variable replacement action.\n # This is only a fix for vllm version < v0.13.0.\n if is_npu_available:\n for k in VLLM_ASCEND_REQUIRED_ENV_VARS:\n if k not in os.environ:\n os.environ[k] = VLLM_ASCEND_REQUIRED_ENV_VARS[k]\n\n instance = super().__new__(cls)\n instance._is_qat_model = _is_qat_model\n return instance\n\n def monkey_patch_model(self, vocab_size: int):\n # patch compute_logits to avoid sampling OOV token\n monkey_patch_compute_logits(self.model_runner.model, vocab_size)\n # patch weight loader to support MoE model\n patch_vllm_moe_model_weight_loader(self.model_runner.model)\n\n def update_weights_from_ipc(self, peft_config: dict = None, base_sync_done=False, use_shm: bool = False):\n \"\"\"Update the weights of the rollout model.\"\"\"\n from vllm.platforms import current_platform\n\n if current_platform.device_type == \"npu\" and self.device is None:\n self.device = torch.device(f\"npu:{self.local_rank}\")\n\n # In async mode, make sure the old lora is removed before adding the new one\n if peft_config and base_sync_done:\n self.remove_lora(VLLM_LORA_INT_ID)\n\n # build communication buffer\n assert self.device is not None\n if not hasattr(self, \"_zmq_ctx\") or self._zmq_ctx is None:\n self._zmq_ctx = zmq.Context()\n socket = self._zmq_ctx.socket(zmq.REP)\n socket.connect(self._get_zmq_handle())\n\n comm_metadata = socket.recv_pyobj()\n buffer, shm = None, None\n if not use_shm:\n handle = comm_metadata\n buffer = rebuild_ipc(handle, self.device.index)\n assert buffer.dtype == torch.uint8\n else:\n shm_name = comm_metadata[\"name\"]\n shm_size = comm_metadata[\"size\"]\n buffer, shm = rebuild_shared_memory(shm_name, shm_size, dtype=torch.uint8)\n socket.send(b\"\")\n\n use_standard_weight_load = not (peft_config and base_sync_done) and not is_fp8_model(\n self.model_runner.vllm_config\n )\n\n if self._is_qat_model:\n # QAT: Prepare for weight loading BEFORE receiving any buckets\n from verl.utils.qat import prepare_qat_for_load_weights\n\n prepare_qat_for_load_weights(self.model_runner.model, device=self.device)\n logger.info(\"QAT: prepare_qat_for_load_weights completed\")\n elif use_standard_weight_load:\n # Re-apply here because async IPC weight sync can happen long after init and lose MoE weight_loader attrs.\n patch_vllm_moe_model_weight_loader(self.model_runner.model)\n\n # receive bucket and update weights\n while True:\n metadata = socket.recv_pyobj()\n weights, tensor = [], None\n for name, meta in metadata[\"bucket_meta\"].items():\n shape, dtype, offset = meta[\"shape\"], meta[\"dtype\"], meta[\"offset\"]\n size = dtype.itemsize * shape.numel()\n # NOTE: we need to clone the tensor to release CUDA IPC memory\n # but for shared memory, it's not necessary and if we do clone,\n # it will cause extra memory copy overhead and slow down the process.\n tensor = buffer[offset : offset + size].view(dtype=dtype).view(shape)\n if not use_shm:\n tensor = tensor.clone()\n else:\n tensor = tensor.to(self.device)\n weights.append((name, tensor))\n get_torch_device().synchronize()\n socket.send(b\"\")\n self._update_weights(weights, peft_config=peft_config, base_sync_done=base_sync_done)\n del weights, tensor\n if metadata[\"is_last\"]:\n break\n\n if self._is_qat_model:\n # QAT: call process_weights_after_loading AFTER all buckets are received\n from verl.utils.qat import manual_process_weights_after_loading\n\n manual_process_weights_after_loading(self.model_runner.model)\n logger.info(\"QAT: process_weights_after_loading completed\")\n elif use_standard_weight_load:\n # Some post-load transforms are non-idempotent; run once after all buckets.\n from vllm.model_executor.model_loader.utils import process_weights_after_loading\n\n model = self.model_runner.model\n model_config = self.model_runner.vllm_config.model_config\n process_weights_after_loading(model, model_config, self.device)\n\n # clean up\n socket.close()\n del buffer\n if shm is not None:\n shm.close()\n del shm\n get_torch_device().synchronize()\n gc.collect()\n get_torch_device().ipc_collect()\n get_torch_device().empty_cache()\n\n def _update_weights(self, weights: list[tuple[str, torch.Tensor]], peft_config: dict, base_sync_done: bool):\n if peft_config and base_sync_done:\n weights = dict(weights)\n lora_request = TensorLoRARequest(\n lora_name=VLLM_LORA_NAME,\n lora_int_id=VLLM_LORA_INT_ID,\n lora_path=VLLM_LORA_PATH,\n peft_config=peft_config,\n lora_tensors=weights,\n )\n self.add_lora(lora_request)\n logger.info(f\"vLLM load weights, loaded_params: {len(weights)}\")\n else:\n # Add the FP8 related logic here as sharding manager has been deprecated.\n # Check if FP8 quantization is enabled and apply appropriate weight loading\n if is_fp8_model(self.model_runner.vllm_config):\n logger.info(f\"FP8 model detected (async): {self.model_runner.vllm_config.quant_config}\")\n # Convert bf16 weights to fp8 format before loading\n loaded_params = load_quanted_weights(weights, self.model_runner)\n logger.info(f\"FP8 weights loaded (async), loaded_params: {len(loaded_params)}\")\n else:\n logger.info(\"Loading standard weights (non-FP8, async)\")\n self.model_runner.model.load_weights(weights)\n\n def _get_zmq_handle(self) -> str:\n \"\"\"Get ZMQ handle for communication.\"\"\"\n if not hasattr(self, \"device_uuid\") or not self.device_uuid:\n self.device_uuid = get_device_uuid(self.device.index)\n return f\"ipc:///tmp/rl-colocate-zmq-{self.device_uuid}.sock\"\n\n\nclass SuppressSignalInThread:\n def __enter__(self):\n self.original_signal = signal.signal\n\n def no_op_signal(sig, action):\n if threading.current_thread() is not threading.main_thread():\n print(f\"Ignored signal {sig} in thread {threading.current_thread().name}\")\n return\n return self.original_signal(sig, action)\n\n signal.signal = no_op_signal\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n signal.signal = self.original_signal\n\n\ndef build_cli_args_from_config(config: dict[str, Any]) -> list[str]:\n \"\"\"\n Convert a config dictionary to CLI arguments for vLLM server.\n\n Handles different value types appropriately:\n - None: skipped\n - bool True: adds '--key'\n - bool False: skipped\n - list: expands to '--key item1 item2 ...'\n - empty list: skipped (vLLM uses nargs=\"+\" which requires at least one value)\n - dict: JSON serialized\n - other: string converted\n\n Args:\n config: Dictionary of configuration key-value pairs\n\n Returns:\n List of CLI argument strings\n \"\"\"\n cli_args = []\n for k, v in config.items():\n if v is None:\n continue\n if isinstance(v, bool):\n if v:\n cli_args.append(f\"--{k}\")\n elif isinstance(v, list):\n if not v:\n # Skip empty lists - vLLM uses nargs=\"+\" which requires at least one value\n continue\n # Lists need to be expanded as multiple separate arguments\n # e.g., --cuda-graph-sizes 1 2 4 8 becomes ['--cuda-graph-sizes', '1', '2', '4', '8']\n cli_args.append(f\"--{k}\")\n cli_args.extend([str(item) for item in v])\n else:\n cli_args.append(f\"--{k}\")\n # Use json.dumps for dict to ensure valid JSON format\n cli_args.append(json.dumps(v) if isinstance(v, dict) else str(v))\n return cli_args\n"}