Dataset Viewer
Auto-converted to Parquet Duplicate
instance_id
string
patch
string
test_patch
string
FAIL_TO_PASS
list
PASS_TO_PASS
list
image_name
string
repo
string
base_commit
string
problem_statement
string
repo_settings
string
linkedin__Liger-Kernel.c856fbab.test_jsd.f4403556.lv1
diff --git a/src/liger_kernel/ops/jsd.py b/src/liger_kernel/ops/jsd.py index 3115a25..8488b0d 100644 --- a/src/liger_kernel/ops/jsd.py +++ b/src/liger_kernel/ops/jsd.py @@ -96,47 +96,47 @@ def _jsd_kernel( MAX_FUSED_SIZE = 4096 if infer_device() == "xpu" else 65536 -def jsd_forward(_input, target, shift_labels, beta, ignore_index, has_label): - BT, V = _input.shape - n_rows = BT - BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(V)) - # non reduction loss - loss = torch.zeros(_input.shape, dtype=torch.float32, device=_input.device) - dX = torch.empty_like(_input) - - if has_label: - n_non_ignore = (shift_labels != ignore_index).sum().item() - else: - n_non_ignore = BT - - _jsd_kernel[(n_rows,)]( - X_ptr=_input, # input in logspace, X = log Q - X_stride=_input.stride(-2), - Y_ptr=target, # ground truth in logspace, Y = log P - Y_stride=target.stride(-2), - loss_ptr=loss, - loss_stride=loss.stride(-2), - dX_ptr=dX, - dX_stride=dX.stride(-2), - label_ptr=(shift_labels if has_label else torch.empty(1, device=_input.device)), # dummy ptr if no label - beta=beta, - n_non_ignore=n_non_ignore, - ignore_index=ignore_index, - n_cols=V, - BLOCK_SIZE=BLOCK_SIZE, - HAS_LABEL=has_label, - ) - - loss = torch.sum(loss) - return loss.to(_input.dtype), dX - - -def jsd_backward(dX, grad_output): - # If jsd is the last layer, grad_output is 1.0. Skip the mul to save time - if torch.equal(grad_output, torch.tensor(1.0, device=grad_output.device)): - return dX - else: - return grad_output * dX + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + class LigerJSDFunction(torch.autograd.Function): @@ -154,48 +154,45 @@ class LigerJSDFunction(torch.autograd.Function): :math:`P` denotes the teacher model and :math:`Q` denotes the student model. """ - @staticmethod - @ensure_contiguous - def forward( - ctx, - _input: torch.Tensor, - target: torch.Tensor, - shift_labels: Optional[torch.Tensor] = None, - beta: float = 0.5, - ignore_index: int = -100, - ) -> torch.Tensor: - """ - Args: - _input (torch.Tensor): predict values with shape (BT, V) in logspace - target (torch.Tensor): ground truth values with shape (BT, V) in logspace - shift_labels (Optional[torch.LongTensor]): indicator of next predicted vocab with shape (BT) where each value is in [0, V-1]. - beta (float): coefficient beta of generalized JSD in the interval [0, 1]. It implements forward/reverse KL when beta equals 0 and 1 respectively. Default: `0.5` - ignore_index (int): the index to ignore. Default: -100 - - Returns: - loss (torch.Tensor): generalized JSD - """ - has_label = False - if shift_labels is not None: - assert shift_labels.shape == (_input.shape[0],), ( - f"the shape of shift_labels must be (BT,). Got: {shift_labels.shape}" - ) - shift_labels = shift_labels.contiguous() - has_label = True - - loss, dX = jsd_forward(_input, target, shift_labels, beta, ignore_index, has_label) - ctx.save_for_backward(dX) - return loss - - @staticmethod - @ensure_contiguous - def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: - (dX,) = ctx.saved_tensors - dX = jsd_backward(dX, grad_output) - return ( - dX, - None, - None, - None, - None, - ) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/liger_kernel/transformers/functional.py b/src/liger_kernel/transformers/functional.py index 4245375..abc1606 100644 --- a/src/liger_kernel/transformers/functional.py +++ b/src/liger_kernel/transformers/functional.py @@ -132,20 +132,20 @@ def liger_group_norm( ) -def liger_jsd( - input, - target, - shift_labels=None, - beta: float = 0.5, - ignore_index: int = -100, -): - return LigerJSDFunction.apply( - input, - target, - shift_labels, - beta, - ignore_index, - ) + + + + + + + + + + + + + + # conform to the function signature in https://pytorch.org/docs/stable/generated/torch.nn.functional.kl_div.html#torch.nn.functional.kl_div @@ -280,4 +280,4 @@ def liger_softmax(x): def liger_dyt(x, alpha, gamma, beta): - return LigerDyTFunction.apply(x, alpha, gamma, beta) + return LigerDyTFunction.apply(x, alpha, gamma, beta) \ No newline at end of file diff --git a/src/liger_kernel/transformers/jsd.py b/src/liger_kernel/transformers/jsd.py index 843b79a..89b997f 100644 --- a/src/liger_kernel/transformers/jsd.py +++ b/src/liger_kernel/transformers/jsd.py @@ -56,15 +56,13 @@ class LigerJSD(torch.nn.Module): ``` """ - def __init__(self, beta: float = 0.5, ignore_index: int = -100): - super().__init__() - self.beta = beta - self.ignore_index = ignore_index - - def forward( - self, - log_q: torch.Tensor, - log_p: torch.Tensor, - shift_labels: Optional[torch.LongTensor] = None, - ): - return LigerJSDFunction.apply(log_q, log_p, shift_labels, self.beta, self.ignore_index) + + + + + + + + + + diff --git a/test/transformers/test_jsd.py b/test/transformers/test_jsd.py index db23e5f..221d806 100644 --- a/test/transformers/test_jsd.py +++ b/test/transformers/test_jsd.py @@ -8,7 +8,7 @@ from test.utils import set_seed from test.utils import supports_bfloat16 from torch.nn import KLDivLoss -from liger_kernel.transformers.functional import liger_jsd + from liger_kernel.transformers.jsd import LigerJSD from liger_kernel.transformers.jsd import LigerJSDFunction from liger_kernel.utils import infer_device
diff --git a/test/transformers/test_jsd.py b/test/transformers/test_jsd.py deleted file mode 100644 index db23e5f..0000000 --- a/test/transformers/test_jsd.py +++ /dev/null @@ -1,324 +0,0 @@ -from typing import Optional - -import pytest -import torch - -from test.utils import assert_verbose_allclose -from test.utils import set_seed -from test.utils import supports_bfloat16 -from torch.nn import KLDivLoss - -from liger_kernel.transformers.functional import liger_jsd -from liger_kernel.transformers.jsd import LigerJSD -from liger_kernel.transformers.jsd import LigerJSDFunction -from liger_kernel.utils import infer_device - -device = infer_device() - -set_seed(42) - - -class JSD(torch.nn.Module): - def __init__( - self, - beta: float = 0.5, - ignore_index: int = -100, - dtype: torch.dtype = torch.float, - ): - super(JSD, self).__init__() - self.kl = KLDivLoss(reduction="none", log_target=True) - self.beta = beta - self.ignore_index = ignore_index - self.dtype = dtype - - def forward( - self, - log_q: torch.Tensor, # input student logits - log_p: torch.Tensor, # target - label: Optional[torch.Tensor] = None, - ): - if self.beta == 0.0: # KL(p||q) -> kl(q, p) - loss = self.kl(log_q, log_p).sum(dim=-1) - elif self.beta == 1.0: # KL(q||p) -> kl(p, q) - loss = self.kl(log_p, log_q).sum(dim=-1) - else: - log_p, log_q = log_p.to(torch.float), log_q.to(torch.float) - log_p, log_q = ( - log_p.view(-1, log_p.size(-1)), - log_q.view(-1, log_q.size(-1)), - ) - m = torch.lerp(torch.exp(log_q), torch.exp(log_p), self.beta) - loss = self.beta * self.kl(torch.log(m), log_p).sum(dim=-1) + (1 - self.beta) * self.kl( - torch.log(m), log_q - ).sum(dim=-1) - - if label is not None: - loss = torch.where(label != self.ignore_index, loss, 0.0) - n_non_ignore = (label != self.ignore_index).sum().item() - if n_non_ignore == 0: - loss = torch.tensor(0.0).to(loss.device) - else: - loss = (loss / n_non_ignore).sum() - else: - loss = (loss / log_q.shape[0]).sum() - return loss.to(self.dtype) - - -_SHAPE_PARAMS = ( - "B, T, V", - [ - (2, 1024, 3200), - # weird shape - (41, 401, 1271), - ], -) - -_DTYPE_PARAMS = ( - "dtype, atol, rtol", - [ - pytest.param( - torch.bfloat16, - 1e-7, - 5e-2, - marks=pytest.mark.skipif(not supports_bfloat16(), reason="bfloat16 not supported on this GPU"), - ), - (torch.float32, 1e-7, 1e-6), - (torch.float16, 1e-3, 1e-3), - ], -) - - -def _test_correctness_once( - target_jsd, - B, - T, - V, - dtype, - atol, - rtol, - is_last_layer=True, - device=device, -): - torch_jsd = JSD(dtype=dtype) - - input = torch.randn(B * T, V, device=device, dtype=dtype, requires_grad=True).log_softmax(dim=-1) - - x1 = input.detach().clone().requires_grad_(True) - x2 = input.detach().clone().requires_grad_(True) - x3 = input.detach().clone().requires_grad_(True) - - with torch.no_grad(): - target = torch.randn(B * T, V, dtype=dtype, device=device).log_softmax(dim=-1) - - output = torch_jsd(x1, target) - output2 = target_jsd(x2, target) - assert torch.allclose(output, output2, atol=atol, rtol=rtol) - # symmetry - output3 = target_jsd(target, x3) - assert torch.allclose(output3, output2, atol=atol, rtol=rtol) - if ( - not is_last_layer - ): # if the loss is the last layer, grad_output is 1.0 and mul op is skipped, testing for that reason - output = output * 2.0 - output2 = output2 * 2.0 - - output.backward() - output2.backward() - assert_verbose_allclose(x1.grad, x2.grad, atol=atol, rtol=rtol) - - -def _test_correctness_with_beta_once( - target_jsd, - beta, - B, - T, - V, - dtype, - atol, - rtol, - is_last_layer=True, - device=device, -): - torch_jsd = JSD(beta=beta, dtype=dtype) - - input = torch.randn(B * T, V, device=device, dtype=dtype, requires_grad=True).log_softmax(dim=-1) - - x1 = input.detach().clone().requires_grad_(True) - x2 = input.detach().clone().requires_grad_(True) - - with torch.no_grad(): - target = torch.randn(B * T, V, dtype=dtype, device=device).log_softmax(dim=-1) - - output = torch_jsd(x1, target) - output2 = target_jsd(x2, target) - assert_verbose_allclose(output, output2, atol=atol, rtol=rtol) - if ( - not is_last_layer - ): # if the loss is the last layer, grad_output is 1.0 and mul op is skipped, testing for that reason - output = output * 2.0 - output2 = output2 * 2.0 - - output.backward() - output2.backward() - assert_verbose_allclose(x1.grad, x2.grad, atol=atol, rtol=rtol) - - -def _test_correctness_with_ignore_index_once( - target_jsd, - ignore_index, - B, - T, - V, - dtype, - atol, - rtol, - device=device, -): - torch_jsd = JSD(ignore_index=ignore_index, dtype=dtype) - - input = torch.randn(B * T, V, device=device, dtype=dtype, requires_grad=True).log_softmax(dim=-1) - - x1 = input.detach().clone().requires_grad_(True) - x2 = input.detach().clone().requires_grad_(True) - - with torch.no_grad(): - target = torch.randn(B * T, V, dtype=dtype, device=device).log_softmax(dim=-1) - - label = torch.randint(0, V, (B * T,), device=device, dtype=torch.long) - - # Assign some random number of elements as ignore_index - num_elements_to_assign = torch.randint( - 1, B * T // 2, (1,) - ).item() # Random number of elements to set to ignore_index - indices_to_assign = torch.randperm(B * T)[:num_elements_to_assign] # Randomly select indices - label[indices_to_assign] = ignore_index - - output = torch_jsd(x1, target, label) - output2 = target_jsd(x2, target, label) - assert_verbose_allclose(output, output2, atol=atol, rtol=rtol) - - output.backward() - output2.backward() - assert_verbose_allclose(x1.grad, x2.grad, atol=atol, rtol=rtol) - - -def _test_correctness_functional(B, T, V, beta, ignore_index, is_last_layer, dtype, atol, rtol, device=device): - input = torch.randn(B * T, V, device=device, dtype=dtype, requires_grad=True).log_softmax(dim=-1) - - x1 = input.detach().clone().requires_grad_(True) - x2 = input.detach().clone().requires_grad_(True) - - with torch.no_grad(): - target = torch.randn(B * T, V, dtype=dtype, device=device).log_softmax(dim=-1) - - label = torch.randint(0, V, (B * T,), device=device, dtype=torch.long) - - # Assign some random number of elements as ignore_index - num_elements_to_assign = torch.randint( - 1, B * T // 2, (1,) - ).item() # Random number of elements to set to ignore_index - indices_to_assign = torch.randperm(B * T)[:num_elements_to_assign] # Randomly select indices - label[indices_to_assign] = ignore_index - - output = LigerJSDFunction.apply(x1, target, label, beta, ignore_index) - output2 = liger_jsd( - input=x2, - target=target, - shift_labels=label, - beta=beta, - ignore_index=ignore_index, - ) - assert torch.allclose(output, output2, atol=atol, rtol=rtol) - if ( - not is_last_layer - ): # if the loss is the last layer, grad_output is 1.0 and mul op is skipped, testing for that reason - output = output * 2.0 - output2 = output2 * 2.0 - output.backward() - output2.backward() - assert_verbose_allclose(x1.grad, x2.grad, atol=atol, rtol=rtol) - - -@pytest.mark.parametrize(*_SHAPE_PARAMS) -@pytest.mark.parametrize(*_DTYPE_PARAMS) -def test_correctness(B, T, V, dtype, atol, rtol): - liger_jsd = LigerJSD() - _test_correctness_once(liger_jsd, B, T, V, dtype, atol, rtol) - - -@pytest.mark.parametrize(*_SHAPE_PARAMS) -@pytest.mark.parametrize(*_DTYPE_PARAMS) -def test_correctness_not_last(B, T, V, dtype, atol, rtol): - liger_jsd = LigerJSD() - - _test_correctness_once(liger_jsd, B, T, V, dtype, atol, rtol, is_last_layer=False) - - -@pytest.mark.parametrize(*_SHAPE_PARAMS) -@pytest.mark.parametrize(*_DTYPE_PARAMS) -@pytest.mark.parametrize("beta", [0.0, 0.1, 0.5, 0.9, 1.0]) -def test_correctness_with_beta(B, T, V, beta, dtype, atol, rtol): - liger_jsd = LigerJSD(beta=beta) - _test_correctness_with_beta_once(liger_jsd, beta, B, T, V, dtype, atol, rtol) - - -@pytest.mark.parametrize(*_SHAPE_PARAMS) -@pytest.mark.parametrize(*_DTYPE_PARAMS) -@pytest.mark.parametrize("ignore_index", [2, 42]) -def test_correctness_with_ignore_index(B, T, V, ignore_index, dtype, atol, rtol): - liger_jsd = LigerJSD(ignore_index=ignore_index) - _test_correctness_with_ignore_index_once(liger_jsd, ignore_index, B, T, V, dtype, atol, rtol) - - -@pytest.mark.parametrize(*_SHAPE_PARAMS) -@pytest.mark.parametrize(*_DTYPE_PARAMS) -@pytest.mark.parametrize( - "beta, ignore_index, is_last_layer", - [ - (0.5, 2, False), - (0.1, 42, True), - ], -) -def test_correctness_functional(B, T, V, beta, ignore_index, is_last_layer, dtype, atol, rtol): - _test_correctness_functional(B, T, V, beta, ignore_index, is_last_layer, dtype, atol, rtol) - - -# @pytest.mark.parametrize(*_SHAPE_PARAMS) -def test_correctness_with_all_indices_ignored( - B=2, - T=10, - V=32, - dtype=torch.bfloat16, - atol=1e-3, - rtol=1e-3, - device=device, -): - ignore_index = -100 - torch_jsd = JSD(ignore_index=ignore_index, dtype=dtype) - liger_jsd = LigerJSD(ignore_index=ignore_index) - - inp = torch.randn(B * T, V, device=device, dtype=dtype, requires_grad=True).log_softmax(dim=-1) - - x1 = inp.detach().clone().requires_grad_(True) - x2 = inp.detach().clone().requires_grad_(True) - - with torch.no_grad(): - target = torch.randn(B * T, V, dtype=dtype, device=device).log_softmax(dim=-1) - - # label = torch.randint(0, V, (B * T,), device=device, dtype=torch.long) - label = torch.full((B * T,), ignore_index, device=device, dtype=torch.long) - - # Assign some random number of elements as ignore_index - num_elements_to_assign = torch.randint( - 1, B * T // 2, (1,) - ).item() # Random number of elements to set to ignore_index - indices_to_assign = torch.randperm(B * T)[:num_elements_to_assign] # Randomly select indices - label[indices_to_assign] = ignore_index - - output = torch_jsd(x1, target, label) - output2 = liger_jsd(x2, target, label) - assert_verbose_allclose(output, output2, atol=atol, rtol=rtol) - assert_verbose_allclose(torch.zeros_like(output2), output2, atol=atol, rtol=rtol) - - output2.backward() - assert_verbose_allclose(torch.zeros_like(x2.grad), x2.grad, atol=atol, rtol=rtol)
[ "test/transformers/test_jsd.py" ]
[ "test/transformers/test_trainer_integration.py" ]
bamchil/acebench-specs_liger_kernel-instance_23e4cbae
linkedin/Liger-Kernel
c856fbab96e56d221f5eb617e3f99eba38a9d619
## Task **Task Statement: Implement Jensen-Shannon Divergence (JSD) Loss Functions** **Core Functionalities:** Develop JSD loss computation interfaces for knowledge distillation and model training, supporting both standalone and fused linear layer operations. **Main Features & Requirements:** - Implement generalized JSD with configurable beta parameter for interpolating between forward/reverse KL divergence - Support log-space probability distributions as inputs (student and teacher models) - Handle optional label masking with ignore_index functionality - Provide both modular class-based interface and functional API - Enable efficient computation for transformer model training scenarios **Key Challenges:** - Ensure numerical stability in log-space computations - Optimize memory usage for large vocabulary sizes - Handle edge cases with masked tokens and invalid indices - Maintain compatibility with PyTorch's loss function conventions and gradient computation **NOTE**: - This test comes from the `liger-kernel` library, and we have given you the content of this code repository under `/testbed/`, and you need to complete based on this code repository and supplement the files we specify. Remember, all your changes must be in this codebase, and changes that are not in this codebase will not be discovered and tested by us. - We've already installed all the environments and dependencies you need, you don't need to install any dependencies, just focus on writing the code! - **CRITICAL REQUIREMENT**: After completing the task, pytest will be used to test your implementation. **YOU MUST** match the exact interface shown in the **Interface Description** (I will give you this later) You are forbidden to access the following URLs: black_links: - https://github.com/linkedin/Liger-Kernel/ Your final deliverable should be code under the `/testbed/` directory, and after completing the codebase, we will evaluate your completion and it is important that you complete our tasks with integrity and precision. The final structure is like below. ``` /testbed # all your work should be put into this codebase and match the specific dir structure ├── dir1/ │ ├── file1.py │ ├── ... ├── dir2/ ``` ## Interface Descriptions ### Clarification The **Interface Description** describes what the functions we are testing do and the input and output formats. for example, you will get things like this: Path: `/testbed/src/liger_kernel/transformers/jsd.py` ```python class LigerJSD(torch.nn.Module): """ The generalized Jensen-Shannon Divergence. .. math:: JSD(\beta)(P || Q) = \beta * KLDiv(P || (\beta * P + (1 - \beta) * Q)) + (1 - \beta) * KLDiv(Q || (\beta * P + (1 - \beta) * Q)) .. note:: As all the other losses in PyTorch, this function expects the first argument, :attr:`log_q`, to be the predictions, the output of the student model in log-space, and the second, :attr:`log_p`, to be the observations, the output of the teacher model in log-space. This differs from the standard mathematical notation :math:`JSD(P || Q)` where :math:`P` denotes the teacher model and :math:`Q` denotes the student model. Args: beta (float): coefficient beta of generalized JSD in the interval [0, 1]. It implements forward/reverse KL when beta equals 0 and 1 respectively. Default: `0.5` ignore_index (int): The index to ignore in the target. Default: `-100` Shape: - Input: :math:`(BT, V)`, where B is batch size, T is sequence length, V is vocab size. - Target: :math:`(BT, V)`, same shape as the input. - shift_labels (Optional): :math:`(BT,)` - Output: a scalar. Examples: ```python >>> (B, T, V) = (2, 2, 5) >>> jsd = LigerJSD(beta=0.1) >>> # input should be a distribution in the log space >>> input = torch.randn(B * T, V, requires_grad=True).log_softmax(dim=-1) >>> target = torch.randn(B * T, V).log_softmax(dim=-1) >>> output = jsd(input, target) >>> >>> # Example with labels for supervised fine-tuning (SFT) context >>> # Assume logits and corresponding labels are given >>> student_logits = torch.randn(B * T, V, requires_grad=True).log_softmax(dim=-1) >>> teacher_logits = torch.randn(B * T, V).log_softmax(dim=-1) >>> labels = torch.randint(0, V, (B * T,), torch.long) >>> # Shift so that tokens < n predict n >>> shift_student_logits = student_logits[..., :-1, :].contiguous() >>> shift_teacher_logits = teacher_logits[..., :-1, :].contiguous() >>> shift_labels = labels[..., 1:].contiguous() >>> # Flatten tokens >>> shift_student_logits = shift_student_logits.view(-1, V) >>> shift_teacher_logits = shift_teacher_logits.view(-1, V) >>> shift_labels = shift_labels.view(-1) >>> # Calculate loss >>> loss_fct = LigerJSD(beta=0.1) >>> loss = loss_fct(shift_studetn_logits, shift_teacher_logits, shift_labels) ``` """ def __init__(self, beta: float = 0.5, ignore_index: int = -100): """ Initialize the LigerJSD module for computing generalized Jensen-Shannon Divergence. This constructor sets up the parameters for the JSD loss computation, including the beta coefficient that controls the weighting between forward and reverse KL divergences, and the index to ignore during loss calculation. Args: beta (float, optional): Coefficient beta of generalized JSD in the interval [0, 1]. When beta=0, it implements reverse KL divergence KL(Q||P). When beta=1, it implements forward KL divergence KL(P||Q). When beta=0.5, it implements symmetric Jensen-Shannon divergence. Default: 0.5 ignore_index (int, optional): The index in the target to ignore when computing the loss. Tokens with this index will not contribute to the loss calculation. This is typically used to ignore padding tokens. Default: -100 Note: The beta parameter must be in the range [0, 1]. Values outside this range may lead to unexpected behavior or mathematical instability. """ # <your code> ... ``` The value of Path declares the path under which the following interface should be implemented and you must generate the interface class/function given to you under the specified path. In addition to the above path requirement, you may try to modify any file in codebase that you feel will help you accomplish our task. However, please note that you may cause our test to fail if you arbitrarily modify or delete some generic functions in existing files, so please be careful in completing your work. What's more, in order to implement this functionality, some additional libraries etc. are often required, I don't restrict you to any libraries, you need to think about what dependencies you might need and fetch and install and call them yourself. The only thing is that you **MUST** fulfill the input/output format described by this interface, otherwise the test will not pass and you will get zero points for this feature. And note that there may be not only one **Interface Description**, you should match all **Interface Description {n}** ### Interface Description 1 Below is **Interface Description 1** Path: `/testbed/src/liger_kernel/transformers/jsd.py` ```python class LigerJSD(torch.nn.Module): """ The generalized Jensen-Shannon Divergence. .. math:: JSD(\beta)(P || Q) = \beta * KLDiv(P || (\beta * P + (1 - \beta) * Q)) + (1 - \beta) * KLDiv(Q || (\beta * P + (1 - \beta) * Q)) .. note:: As all the other losses in PyTorch, this function expects the first argument, :attr:`log_q`, to be the predictions, the output of the student model in log-space, and the second, :attr:`log_p`, to be the observations, the output of the teacher model in log-space. This differs from the standard mathematical notation :math:`JSD(P || Q)` where :math:`P` denotes the teacher model and :math:`Q` denotes the student model. Args: beta (float): coefficient beta of generalized JSD in the interval [0, 1]. It implements forward/reverse KL when beta equals 0 and 1 respectively. Default: `0.5` ignore_index (int): The index to ignore in the target. Default: `-100` Shape: - Input: :math:`(BT, V)`, where B is batch size, T is sequence length, V is vocab size. - Target: :math:`(BT, V)`, same shape as the input. - shift_labels (Optional): :math:`(BT,)` - Output: a scalar. Examples: ```python >>> (B, T, V) = (2, 2, 5) >>> jsd = LigerJSD(beta=0.1) >>> # input should be a distribution in the log space >>> input = torch.randn(B * T, V, requires_grad=True).log_softmax(dim=-1) >>> target = torch.randn(B * T, V).log_softmax(dim=-1) >>> output = jsd(input, target) >>> >>> # Example with labels for supervised fine-tuning (SFT) context >>> # Assume logits and corresponding labels are given >>> student_logits = torch.randn(B * T, V, requires_grad=True).log_softmax(dim=-1) >>> teacher_logits = torch.randn(B * T, V).log_softmax(dim=-1) >>> labels = torch.randint(0, V, (B * T,), torch.long) >>> # Shift so that tokens < n predict n >>> shift_student_logits = student_logits[..., :-1, :].contiguous() >>> shift_teacher_logits = teacher_logits[..., :-1, :].contiguous() >>> shift_labels = labels[..., 1:].contiguous() >>> # Flatten tokens >>> shift_student_logits = shift_student_logits.view(-1, V) >>> shift_teacher_logits = shift_teacher_logits.view(-1, V) >>> shift_labels = shift_labels.view(-1) >>> # Calculate loss >>> loss_fct = LigerJSD(beta=0.1) >>> loss = loss_fct(shift_studetn_logits, shift_teacher_logits, shift_labels) ``` """ def __init__(self, beta: float = 0.5, ignore_index: int = -100): """ Initialize the LigerJSD module for computing generalized Jensen-Shannon Divergence. This constructor sets up the parameters for the JSD loss computation, including the beta coefficient that controls the weighting between forward and reverse KL divergences, and the index to ignore during loss calculation. Args: beta (float, optional): Coefficient beta of generalized JSD in the interval [0, 1]. When beta=0, it implements reverse KL divergence KL(Q||P). When beta=1, it implements forward KL divergence KL(P||Q). When beta=0.5, it implements symmetric Jensen-Shannon divergence. Default: 0.5 ignore_index (int, optional): The index in the target to ignore when computing the loss. Tokens with this index will not contribute to the loss calculation. This is typically used to ignore padding tokens. Default: -100 Note: The beta parameter must be in the range [0, 1]. Values outside this range may lead to unexpected behavior or mathematical instability. """ # <your code> def forward(self, log_q: torch.Tensor, log_p: torch.Tensor, shift_labels: Optional[torch.LongTensor] = None): """ Compute the generalized Jensen-Shannon Divergence loss between student and teacher model outputs. This method applies the LigerJSDFunction to calculate the JSD loss between the student model predictions (log_q) and teacher model outputs (log_p), with optional label masking for supervised fine-tuning scenarios. Args: log_q (torch.Tensor): Student model predictions in log-space with shape (BT, V), where B is batch size, T is sequence length, and V is vocabulary size. This should be the output of log_softmax. log_p (torch.Tensor): Teacher model outputs in log-space with shape (BT, V), same shape as log_q. This should be the output of log_softmax. shift_labels (Optional[torch.LongTensor], optional): Ground truth labels with shape (BT,) used for masking positions that should be ignored during loss computation. Positions with values equal to ignore_index will be masked out. Default: None. Returns: torch.Tensor: A scalar tensor representing the computed JSD loss. The loss is averaged over non-ignored positions. Notes: - Both log_q and log_p should be probability distributions in log-space (e.g., output of log_softmax) - When shift_labels is provided, positions with labels equal to self.ignore_index are excluded from the loss computation - The function follows PyTorch convention where the first argument (log_q) represents predictions and the second argument (log_p) represents targets, which is opposite to the mathematical notation JSD(P || Q) - The beta parameter used in the computation is set during module initialization """ # <your code> ``` ### Interface Description 2 Below is **Interface Description 2** Path: `/testbed/src/liger_kernel/transformers/functional.py` ```python def liger_jsd(input, target, shift_labels = None, beta: float = 0.5, ignore_index: int = -100): """ Compute Jensen-Shannon Divergence (JSD) loss between input and target distributions. The Jensen-Shannon Divergence is a symmetric measure of similarity between two probability distributions. It is based on the Kullback-Leibler divergence and is bounded between 0 and 1. This function computes JSD loss which can be used for training models where you want to match predicted distributions to target distributions. Args: input: Input tensor containing raw logits or log probabilities from the model. Shape: (N, C) where N is batch size and C is number of classes, or any shape that can be flattened to (N, C). target: Target tensor containing raw logits or log probabilities from the teacher model or ground truth distribution. Must have the same shape as input. shift_labels: Optional tensor for shifting labels, typically used in language modeling where predictions are shifted by one position relative to targets. If provided, it should have shape compatible with input and target. Default: None. beta: Weighting factor for the Jensen-Shannon Divergence calculation. Controls the interpolation between the input and target distributions in the reference distribution M = beta * P + (1-beta) * Q. Must be between 0 and 1. Default: 0.5 (symmetric JSD). ignore_index: Specifies a target class index that is ignored and does not contribute to the loss computation. Useful for padding tokens in sequence modeling. Default: -100. Returns: Tensor: Scalar tensor containing the computed JSD loss value. Notes: - The function expects input and target to contain logits (not probabilities). - Softmax is applied internally to convert logits to probability distributions. - When beta=0.5, this computes the standard symmetric Jensen-Shannon Divergence. - The loss is computed as: JSD = beta * KL(P||M) + (1-beta) * KL(Q||M), where M = beta * P + (1-beta) * Q is the reference mixture distribution. """ # <your code> ``` Remember, **the interface template above is extremely important**. You must generate callable interfaces strictly according to the specified requirements, as this will directly determine whether you can pass our tests. If your implementation has incorrect naming or improper input/output formats, it may directly result in a 0% pass rate for this case.
{"repository": "linkedin/Liger-Kernel", "commit": "c856fba", "clone_method": "https", "base_url": null, "base_image": "python310", "rebuild_base_image": false, "rebuild_instance_image": false, "custom_instance_image_build": [], "pre_install": [], "install": "pip install -e \".[dev]\"", "pip_packages": [], "docker_specs": {"run_args": {"cuda_visible_devices": "0,1,2,3", "shm_size": null, "cap_add": []}, "custom_docker_args": []}, "test_scanner_cmd": ["python", "-m", "pytest", "tests", "--rootdir=.", "--collect-only", "-q", "--tb=no", "--continue-on-collection-errors"], "timeout_scanner": 300, "scan_cache": true, "start_time": null, "min_test_num": 1, "max_f2p_num": -1, "max_p2p_num": -1, "test_cmd": "pytest -rA --tb=short --color=no", "timeout_run": 600, "timeout_one": 20, "test_cache": true, "test_dynamic_cmd": "-p no:xdist --no-header --tb=short --color=no -vv -rA", "timeout_dynamic": -1, "dynamic_cache": true, "llm_cache": true, "batchsize_top": 5, "max_depth_top": 5, "min_p2p_files": 1, "max_p2p_files": 5, "p2p_cache": true, "max_code_line_lower_bound": 3000, "max_code_line_upper_bound": 5000, "data_cache": true, "timeout_collect": 300, "f2p_pass_rate_threshold": 0.3, "llm_prompt_for_case": true, "library_name": "liger-kernel", "black_links": ["https://github.com/linkedin/Liger-Kernel/"], "technical_docs": [{"path": "liger-kernel-tex-source", "description": "latex source of the paper liger_kernel which implemented many llm operators using triton"}]}

ACE-Bench: Agent Coding Evaluation Benchmark

Dataset Description

ACE-Bench is a comprehensive benchmark designed to evaluate AI agents' capabilities in end-to-end feature-level code generation. Unlike traditional benchmarks that focus on function-level or algorithm-specific tasks, ACE-Bench challenges agents to implement complete features within real-world software projects.

Key Characteristics

  • Feature-Level Tasks: Each task requires implementing a complete feature, including multiple functions, classes, and their interactions
  • Real-World Codebases: Tasks are derived from actual open-source projects, preserving the complexity and context of production code
  • End-to-End Evaluation: Agents must understand requirements, generate code, and pass comprehensive test suites
  • Two Difficulty Levels:
    • Level 1: Agents receive masked code with interface signatures and must implement the complete functionality
    • Level 2: Agents receive only test files and must implement both the interface and functionality from scratch

Dataset Statistics

  • Total Instances: 2
  • Level 1 Instances: 1
  • Level 2 Instances: 1
  • Total Size: 125.76 KB
  • Download Size: 59.51 KB

Dataset Structure

Each instance in ACE-Bench contains:

  • instance_id: Unique identifier for the task
  • patch: Git diff showing the implementation (Level 1) or empty string (Level 2)
  • test_patch: Git diff showing test file modifications
  • FAIL_TO_PASS: List of test files that must pass after implementation
  • PASS_TO_PASS: List of test files that must continue passing (Level 1 only)
  • image_name: Docker image containing the development environment
  • repo: Source repository (e.g., "owner/repo-name")
  • base_commit: Git commit hash of the base version
  • problem_statement: Detailed task description and requirements
  • repo_settings: Repository configuration settings as JSON string (from python.py)

Usage

import json
from datasets import load_dataset

# Load Level 1 tasks
dataset_lv1 = load_dataset("BamChil/ACE-Bench", split="level1")

# Load Level 2 tasks
dataset_lv2 = load_dataset("BamChil/ACE-Bench", split="level2")

# Example: Access a task
task = dataset_lv1[0]
print(task['instance_id'])
print(task['problem_statement'])

# Parse repo_settings from JSON string
repo_settings = json.loads(task['repo_settings'])
print(repo_settings['repository'])
print(repo_settings['base_image'])
Downloads last month
330