Skip to content

Commit

Permalink
LLM: fix qwen2 (#10356)
Browse files Browse the repository at this point in the history
  • Loading branch information
rnwang04 authored Mar 11, 2024
1 parent 7b358a2 commit 088d191
Showing 1 changed file with 2 additions and 1 deletion.
3 changes: 2 additions & 1 deletion python/llm/src/bigdl/llm/transformers/models/qwen2.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,8 @@ def qwen2_attention_forward_origin(
import linear_q4_0
args = [hidden_states, self.q_proj.weight, self.k_proj.weight, self.v_proj.weight,
self.q_proj.bias, self.k_proj.bias, self.v_proj.bias, position_ids, cache_k,
cache_v, self.q_proj.weight.qtype, kv_seq_len, self.head_dim, self.rotary_emb.base]
cache_v, self.q_proj.weight.qtype, self.v_proj.weight.qtype, kv_seq_len,
self.head_dim, self.rotary_emb.base]
query_states, key_states, value_states = linear_q4_0.forward_qkv_bias(*args)
kv_seq_len += 1
if self.layer_idx == 0:
Expand Down

0 comments on commit 088d191

Please sign in to comment.