mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-28 03:43:01 +00:00
* add reward related function * add simple grpo * update grpo * polish * modify data loader * grpo consumer * update loss * update reward fn * update example * update loader * add algo selection * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add save * update select algo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update grpo * update reward fn * update reward * fix reward score * add response length * detach * fix tp bug * fix consumer * convert to 8 generation * print results * setup update * fix transformers backend * [Feature] Support Distributed LogProb for GRPO Training (#6247) * [fix] fix qwen VocabParallelLMHead1D and gather output * fix tp bug * fix consumer * [feat] Support Distributed LogProb for GRPO Training * [fix] fix loss func * [fix] fix log prob plugin * [fix] fix qwen modeling param * [fix] rm comments * [fix] rm hard-code;fix non-dist version * [fix] fix test file param name and benchmark tp gather output=True/False * [fix] rm non-dist version in dist log prob * [fix] fix comments * [fix] fix dis log prob plugin * [fix] fix test case * [fix] fix qwen VocabParallelLMHead1D and gather output * [fix] fix DistLogProb comments * [fix] restore tp size * [fix] fix comments * [fix] fix comment; fix LogSoftmax usage --------- Co-authored-by: Tong Li <tong.li35271158@gmail.com> * fix vllm * fix logprob, add filtering, temperature annealing, lr descent * simplify vllm preprocessing input ids * update logging * [feat] add microbatch forwarding (#6251) * add microbatch forwarding * fix forward microbatch * fix producer OOM * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change project name * fix temperature annealing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * address conversation --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Distributed RLHF] Integration of PP (#6257) * update help information * update style * fix * minor fix * support PP training * add pp support * remove unused code * address conversation --------- Co-authored-by: Tong Li <tong.li35271158@gmail.com> * [hot-fix] Fix memory leakage bug, support TP+PP (#6258) * update help information * update style * fix * minor fix * support PP training * add pp support * remove unused code * address conversation * fix memory leakage support tp+pp * move empty cache * move empty cache --------- Co-authored-by: Tong Li <tong.li35271158@gmail.com> --------- Co-authored-by: Tong Li <tong.li35271158@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: duanjunwen <935724073@qq.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com>
62 lines
2.0 KiB
Python
62 lines
2.0 KiB
Python
import torch
|
|
|
|
from .reward_utils import extract_solution, validate_response_structure
|
|
|
|
|
|
def math_reward_fn(input_ids, gt_answer, response_idx, **kwargs):
|
|
format_score = 1.0
|
|
acc_score = 9.0
|
|
tokenizer = kwargs["tokenizer"]
|
|
reward = torch.tensor(0.0)
|
|
format_reward = torch.tensor(0.0)
|
|
acc_reward = torch.tensor(0.0)
|
|
s, e = response_idx[0], response_idx[1]
|
|
if gt_answer is None:
|
|
return reward
|
|
|
|
decoded_final_answer = tokenizer.decode(input_ids[s : e + 1], skip_special_tokens=True)
|
|
gt_answer = tokenizer.decode(gt_answer.squeeze(0), skip_special_tokens=True)
|
|
final_answer, processed_str = extract_solution(decoded_final_answer)
|
|
|
|
format_valid = validate_response_structure(processed_str, kwargs["tags"])
|
|
|
|
# Check format accuracy
|
|
if format_valid:
|
|
format_reward += format_score
|
|
reward += format_score
|
|
|
|
# Check answer accuracy
|
|
if (
|
|
final_answer is not None
|
|
and gt_answer.strip().replace(" ", "").lower() == final_answer.strip().replace(" ", "").lower()
|
|
):
|
|
acc_reward += acc_score
|
|
reward += acc_score
|
|
|
|
return torch.tensor([reward, format_reward, acc_reward]).to(input_ids.device)
|
|
|
|
|
|
def gsm8k_reward_fn(input_ids, **kwargs):
|
|
gt_answer = kwargs["gt_answer"]
|
|
tokenizer = kwargs["tokenizer"]
|
|
s, e = kwargs["response_start"], kwargs["response_end"]
|
|
reward = torch.tensor(0.0).to(input_ids.device)
|
|
if gt_answer is None:
|
|
return reward
|
|
decoded_final_answer = tokenizer.decode(input_ids[s : e + 1], skip_special_tokens=True)
|
|
final_answer, processed_str = extract_solution(decoded_final_answer)
|
|
is_valid = True
|
|
try:
|
|
int(final_answer.strip())
|
|
except Exception:
|
|
is_valid = False
|
|
|
|
format_valid = validate_response_structure(processed_str, kwargs["tags"])
|
|
if not is_valid or not format_valid:
|
|
return reward
|
|
else:
|
|
reward += 1.0
|
|
if gt_answer.strip().replace(" ", "").lower() == final_answer.strip().replace(" ", "").lower():
|
|
reward = reward + 9.0
|
|
return reward
|