fix bug, tested

This commit is contained in:
YeAnbang
2025-06-09 09:37:28 +08:00
parent 177144794b
commit de40c736d0
3 changed files with 6 additions and 4 deletions

View File

@@ -132,9 +132,7 @@ class BaseConsumer:
format_acc = raw_batch["format_acc"][:, :, 0]
ans_acc = raw_batch["ans_acc"][:, :, 0]
response_len = (
raw_batch["response_idx"][:, :, 1]
- raw_batch["response_idx"][:, :, 0]
+ 1
raw_batch["response_idx"][:, :, 1] - raw_batch["response_idx"][:, :, 0] + 1
).type(torch.float32)
effective_group_mask = None
if self.filter_range is not None and self.grpo_config.get("dynamic_batching", True):

View File

@@ -291,7 +291,7 @@ class BaseProducer:
reward_model_output = self.reward_model(
outputs["input_ids"].view((-1, outputs["input_ids"].size(-1))),
gt_answer=gt_answer,
response_idx=outputs["response_idx"],
response_idx=outputs["response_idx"].view((-1, 2)),
)
outputs["reward"] = (
torch.tensor([value[0] for value in reward_model_output])