File size: 1,783 Bytes
60413b2 9650a5e 60413b2 9650a5e 60413b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
quant_stage:
quant_modifiers:
QuantizationModifier:
config_groups:
group_0:
targets: [Linear]
weights:
num_bits: 8
type: float
symmetric: true
group_size: null
strategy: tensor
block_structure: null
dynamic: false
actorder: null
scale_dtype: null
zp_dtype: null
observer: minmax
observer_kwargs: {}
input_activations:
num_bits: 8
type: float
symmetric: true
group_size: null
strategy: tensor
block_structure: null
dynamic: false
actorder: null
scale_dtype: null
zp_dtype: null
observer: minmax
observer_kwargs: {}
output_activations: null
format: null
group_1:
targets: [LlamaAttention]
weights: null
input_activations:
num_bits: 8
type: float
symmetric: true
group_size: null
strategy: attn_head
block_structure: null
dynamic: false
actorder: null
scale_dtype: null
zp_dtype: null
observer: minmax
observer_kwargs: {}
output_activations: null
format: null
targets: [Linear]
ignore: [lm_head]
kv_cache_scheme:
num_bits: 8
type: float
symmetric: true
group_size: null
strategy: attn_head
block_structure: null
dynamic: false
actorder: null
scale_dtype: null
zp_dtype: null
observer: minmax
observer_kwargs: {}
|