mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-28 11:45:23 +00:00
* style: rename replay buffer Experience replay is typically for off policy algorithms. Use this name in PPO maybe misleading. * fix: fix wrong zero2 default arg * test: update experience tests * style: rename zero_pad fn * fix: defer init in CycledDataLoader * test: add benchmark test * style: rename internal fn of generation * style: rename internal fn of lora * fix: remove unused loss fn * fix: remove unused utils fn * refactor: remove generate_with_actor fn * fix: fix type annotation * test: add models tests * fix: skip llama due to long execution time * style: modify dataset * style: apply formatter * perf: update reward dataset * fix: fix wrong IGNORE_INDEX in sft dataset * fix: remove DataCollatorForSupervisedDataset * test: add dataset tests * style: apply formatter * style: rename test_ci to test_train * feat: add llama in inference * test: add inference tests * test: change test scripts directory * fix: update ci * fix: fix typo * fix: skip llama due to oom * fix: fix file mod * style: apply formatter * refactor: remove duplicated llama_gptq * style: apply formatter * to: update rm test * feat: add tokenizer arg * feat: add download model script * test: update train tests * fix: modify gemini load and save pretrained * test: update checkpoint io test * to: modify nproc_per_node * fix: do not remove existing dir * fix: modify save path * test: add random choice * fix: fix sft path * fix: enlarge nproc_per_node to avoid oom * fix: add num_retry * fix: make lora config of rm and critic consistent * fix: add warning about lora weights * fix: skip some gpt2 tests * fix: remove grad ckpt in rm and critic due to errors * refactor: directly use Actor in train_sft * test: add more arguments * fix: disable grad ckpt when using lora * fix: fix save_pretrained and related tests * test: enable zero2 tests * revert: remove useless fn * style: polish code * test: modify test args
69 lines
2.3 KiB
Python
69 lines
2.3 KiB
Python
from typing import Optional, Union
|
|
|
|
import torch
|
|
import torch.nn.functional as F
|
|
|
|
|
|
def _compute_approx_kl(log_probs: torch.Tensor,
|
|
log_probs_base: torch.Tensor,
|
|
action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
|
|
"""
|
|
Compute the approximate KL divergence between two distributions.
|
|
Schulman blog: http://joschu.net/blog/kl-approx.html
|
|
|
|
Args:
|
|
log_probs: Log probabilities of the new distribution.
|
|
log_probs_base: Log probabilities of the base distribution.
|
|
action_mask: Mask for actions.
|
|
"""
|
|
|
|
log_ratio = log_probs_base - log_probs
|
|
approx_kl = (log_ratio.exp() - 1) - log_ratio
|
|
if action_mask is not None:
|
|
approx_kl = masked_mean(approx_kl, action_mask, dim=1)
|
|
return approx_kl
|
|
approx_kl = approx_kl.mean(dim=1)
|
|
return approx_kl
|
|
|
|
|
|
def compute_reward(r: Union[torch.Tensor, float],
|
|
kl_coef: float,
|
|
log_probs: torch.Tensor,
|
|
log_probs_base: torch.Tensor,
|
|
action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
|
|
if kl_coef <= 0.0:
|
|
return r
|
|
kl = _compute_approx_kl(log_probs, log_probs_base, action_mask=action_mask)
|
|
reward = r - kl_coef * kl
|
|
return reward
|
|
|
|
|
|
def _log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
|
|
log_probs = F.log_softmax(logits, dim=-1)
|
|
log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1))
|
|
return log_probs_labels.squeeze(-1)
|
|
|
|
|
|
def calc_action_log_probs(output: torch.Tensor, sequences: torch.LongTensor, num_actions: int) -> torch.Tensor:
|
|
"""Calculate action log probs.
|
|
|
|
Args:
|
|
output (torch.Tensor): Output tensor of Actor.forward.
|
|
sequences (torch.LongTensor): Input sequences.
|
|
num_actions (int): Number of actions.
|
|
|
|
Returns:
|
|
torch.Tensor: Action log probs.
|
|
"""
|
|
logits = output['logits']
|
|
log_probs = _log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:])
|
|
return log_probs[:, -num_actions:]
|
|
|
|
|
|
def masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1) -> torch.Tensor:
|
|
tensor = tensor * mask
|
|
tensor = tensor.sum(dim=dim)
|
|
mask_sum = mask.sum(dim=dim)
|
|
mean = tensor / (mask_sum + 1e-8)
|
|
return mean
|