Skip to content

Commit

Permalink
fix jamba slow foward for multi-gpu (#30418)
Browse files Browse the repository at this point in the history
* fix jamba slow foward for multi-gpu

* remove comm

* oups

* style
  • Loading branch information
SunMarc authored Apr 24, 2024
1 parent 5d64ae9 commit 37fa1f6
Showing 1 changed file with 2 additions and 1 deletion.
3 changes: 2 additions & 1 deletion src/transformers/models/jamba/modeling_jamba.py
Original file line number Diff line number Diff line change
Expand Up @@ -919,6 +919,8 @@ def slow_forward(self, input_states, cache_params: HybridMambaAttentionDynamicCa
else:
ssm_state = cache_params.ssm_states[self.layer_idx]

ssm_state = ssm_state.to(hidden_states.device)

if cache_params.has_previous_state and seq_len == 1 and \
cache_params.conv_states[self.layer_idx].shape[0] == batch_size:
conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size]
Expand Down Expand Up @@ -962,7 +964,6 @@ def slow_forward(self, input_states, cache_params: HybridMambaAttentionDynamicCa
discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) # [batch, intermediate_size, seq_len, ssm_state_size]
discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() # [batch, intermediade_size, seq_len, ssm_state_size]
deltaB_u = discrete_B * hidden_states[:, :, :, None].float()

# 3.c perform the recurrence y ← SSM(A, B, C)(x)
scan_outputs = []
for i in range(seq_len):
Expand Down

0 comments on commit 37fa1f6

Please sign in to comment.