Upload HymbaForCausalLM
Browse files- README.md +1 -1
- modeling_hymba.py +1 -1
README.md
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
---
|
| 2 |
library_name: transformers
|
| 3 |
-
pipeline_tag: text-generation
|
| 4 |
license: other
|
| 5 |
license_name: nvidia-open-model-license
|
| 6 |
license_link: https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf
|
|
|
|
| 7 |
---
|
| 8 |
|
| 9 |
|
|
|
|
| 1 |
---
|
| 2 |
library_name: transformers
|
|
|
|
| 3 |
license: other
|
| 4 |
license_name: nvidia-open-model-license
|
| 5 |
license_link: https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf
|
| 6 |
+
pipeline_tag: text-generation
|
| 7 |
---
|
| 8 |
|
| 9 |
|
modeling_hymba.py
CHANGED
|
@@ -1233,7 +1233,7 @@ class HymbaFlexAttention(HymbaFlashAttention2):
|
|
| 1233 |
|
| 1234 |
self.attn_mask = or_masks(attn_mask, register_mask)
|
| 1235 |
|
| 1236 |
-
self.block_mask = create_block_mask(self.attn_mask, B=None, H=None, Q_LEN=qk_length, KV_LEN=qk_length
|
| 1237 |
|
| 1238 |
self.flex_attention = torch.compile(flex_attention)
|
| 1239 |
|
|
|
|
| 1233 |
|
| 1234 |
self.attn_mask = or_masks(attn_mask, register_mask)
|
| 1235 |
|
| 1236 |
+
self.block_mask = create_block_mask(self.attn_mask, B=None, H=None, Q_LEN=qk_length, KV_LEN=qk_length)
|
| 1237 |
|
| 1238 |
self.flex_attention = torch.compile(flex_attention)
|
| 1239 |
|