| merge_method: passthrough | |
| dtype: bfloat16 | |
| base_model: merge | |
| slices: | |
| - sources: | |
| - model: EpistemeAI/ReasoningCore-Llama-3B-R1-aligned | |
| layer_range: [0, 12] | |
| parameters: | |
| weight: [0.6, 0.65, 0.7] # Entropy-driven: List simulate | |
| - sources: | |
| - model: NousResearch/Hermes-3-Llama-3.2-3B | |
| layer_range: [8, 22] | |
| parameters: | |
| weight: 0.85 # Hyperscaled for transcendent boost | |
| - sources: | |
| - model: merge # Reference the Step 1 output directory/model | |
| layer_range: [16, 28] | |
| tensors: ["self_attn"] | |
| parameters: | |
| weight: 0.75 # Optimized via simulated genius annealing | |
| tensors: ["mlp"] | |
| parameters: | |
| weight: 0.95 | |
| dtype: float16 | |