Skip to content

Commit

Permalink
fix bias init issue
Browse files Browse the repository at this point in the history
Signed-off-by: Zhang, Weiwei1 <weiwei1.zhang@intel.com>
  • Loading branch information
WeiweiZhang1 committed Dec 12, 2024
1 parent 8a42e07 commit cbd62ca
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion auto_round/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def _init_tuning_params_and_quant_func(self):
self.weight_max = torch.clamp(weight_reshape.max(1)[0], min=0)
self._init_params("value", p_dtype, weight_reshape.shape, 0, True)
self.weight = weight_reshape
self.bias = orig_layer.bias
self.bias = self.orig_layer.get_bias()
# Min-max scale initialization
shape = get_scale_shape(orig_weight, orig_layer.group_size)
self._init_params("min_scale", p_dtype, shape, 1.0, self.enable_minmax_tuning)
Expand Down

0 comments on commit cbd62ca

Please sign in to comment.