Add support for greedy decoding
Browse filesThe current implementation of sampling only uses torch.multinomial and does not support greedy decoding when temperature is 0.0 / top-k is 0 / top-p is 1.0. This PR adds support for greedy decoding.
- modeling_llada2_moe.py +8 -0
modeling_llada2_moe.py
CHANGED
|
@@ -1218,6 +1218,14 @@ class LLaDA2MoeModelLM(LLaDA2MoePreTrainedModel, GenerationMixin):
|
|
| 1218 |
orig_shape = logits.shape[:-1]
|
| 1219 |
vocab_size = logits.shape[-1]
|
| 1220 |
logits = logits.reshape(-1, vocab_size)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1221 |
if temperature > 0 and temperature != 1.0:
|
| 1222 |
logits = logits / temperature
|
| 1223 |
logits = self._top_k_logits(logits, top_k)
|
|
|
|
| 1218 |
orig_shape = logits.shape[:-1]
|
| 1219 |
vocab_size = logits.shape[-1]
|
| 1220 |
logits = logits.reshape(-1, vocab_size)
|
| 1221 |
+
|
| 1222 |
+
# Greedy mode: temperature = 0, no top-k/p
|
| 1223 |
+
if temperature == 0.0 and (top_k in (None, 0)) and (top_p is None or top_p >= 1.0):
|
| 1224 |
+
probs = F.softmax(logits, dim=-1)
|
| 1225 |
+
token = logits.argmax(dim=-1, keepdim=True)
|
| 1226 |
+
token_prob = probs.gather(-1, token)
|
| 1227 |
+
return token.view(*orig_shape), token_prob.view(*orig_shape)
|
| 1228 |
+
|
| 1229 |
if temperature > 0 and temperature != 1.0:
|
| 1230 |
logits = logits / temperature
|
| 1231 |
logits = self._top_k_logits(logits, top_k)
|