From a2e9d41b2062be5b45c84d24fe2bf4527ec27cee Mon Sep 17 00:00:00 2001 From: Zack Angelo Date: Wed, 23 Oct 2024 11:07:09 -0700 Subject: [PATCH] use softmax_last_dim (metal and cuda kernel) in llama attention layer (#2572) --- candle-transformers/src/models/llama.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/candle-transformers/src/models/llama.rs b/candle-transformers/src/models/llama.rs index a7bef099d6..e77697340e 100644 --- a/candle-transformers/src/models/llama.rs +++ b/candle-transformers/src/models/llama.rs @@ -341,7 +341,8 @@ impl CausalSelfAttention { let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?; masked_fill(&att, &mask, f32::NEG_INFINITY)? }; - let att = candle_nn::ops::softmax(&att, D::Minus1)?; + + let att = candle_nn::ops::softmax_last_dim(&att)?; // Convert to contiguous as matmul doesn't support strided vs for now. att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)? };