mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-21 09:29:47 +00:00
[chat] polish code note typo (#3612)
This commit is contained in:
@@ -18,7 +18,7 @@ class Experience:
|
||||
action_log_probs: (B, A)
|
||||
values: (B)
|
||||
reward: (B)
|
||||
advatanges: (B)
|
||||
advantages: (B)
|
||||
attention_mask: (B, S)
|
||||
action_mask: (B, A)
|
||||
|
||||
|
@@ -108,7 +108,7 @@ def convert_to_lora_recursively(module: nn.Module, lora_rank: int) -> None:
|
||||
|
||||
class LoRAModule(nn.Module):
|
||||
"""A LoRA module base class. All derived classes should call `convert_to_lora()` at the bottom of `__init__()`.
|
||||
This calss will convert all torch.nn.Linear layer to LoraLinear layer.
|
||||
This class will convert all torch.nn.Linear layer to LoraLinear layer.
|
||||
|
||||
Args:
|
||||
lora_rank (int, optional): LoRA rank. 0 means LoRA is not applied. Defaults to 0.
|
||||
|
@@ -29,7 +29,7 @@ class DetachedPPOTrainer(DetachedTrainer):
|
||||
lora_rank (int) : for actor / critic init
|
||||
train_batch_size (int, defaults to 8): the batch size to use for training
|
||||
train_batch_size (int, defaults to 8): the batch size to use for training
|
||||
buffer_limit (int, defaults to 0): the max_size limitaiton of replay buffer
|
||||
buffer_limit (int, defaults to 0): the max_size limitation of replay buffer
|
||||
buffer_cpu_offload (bool, defaults to True): whether to offload replay buffer to cpu
|
||||
eps_clip (float, defaults to 0.2): the clip coefficient of policy loss
|
||||
value_clip (float, defaults to 0.4): the clip coefficient of value loss
|
||||
|
@@ -83,7 +83,7 @@ class ExperienceMakerHolder:
|
||||
chosen_trainer = None
|
||||
min_length = None
|
||||
if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True:
|
||||
print("[maker] choosing tartget trainer")
|
||||
print("[maker] choosing target trainer")
|
||||
while chosen_trainer is None:
|
||||
for target_trainer in self.target_trainer_list:
|
||||
try:
|
||||
|
@@ -15,7 +15,7 @@ class BufferItem:
|
||||
action_log_probs: (A)
|
||||
values: (1)
|
||||
reward: (1)
|
||||
advatanges: (1)
|
||||
advantages: (1)
|
||||
attention_mask: (S)
|
||||
action_mask: (A)
|
||||
|
||||
|
@@ -114,7 +114,7 @@ class PerformanceEvaluator(Callback):
|
||||
|
||||
# actor forward-backward, 3 means forward(1) + backward(2)
|
||||
self.learn_flop += self.actor_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint))
|
||||
# critic foward-backward
|
||||
# critic forward-backward
|
||||
self.learn_flop += self.critic_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint))
|
||||
|
||||
def on_fit_end(self) -> None:
|
||||
|
@@ -28,12 +28,12 @@ class PPOTrainer(Trainer):
|
||||
actor (Actor): the actor model in ppo algorithm
|
||||
critic (Critic): the critic model in ppo algorithm
|
||||
reward_model (nn.Module): the reward model in rlhf algorithm to make reward of sentences
|
||||
initial_model (Actor): the initial model in rlhf algorithm to generate reference logits to limit the update of actor
|
||||
initial_model (Actor): the initial model in rlhf algorithm to generate reference logics to limit the update of actor
|
||||
actor_optim (Optimizer): the optimizer to use for actor model
|
||||
critic_optim (Optimizer): the optimizer to use for critic model
|
||||
kl_coef (float, defaults to 0.1): the coefficient of kl divergence loss
|
||||
train_batch_size (int, defaults to 8): the batch size to use for training
|
||||
buffer_limit (int, defaults to 0): the max_size limitaiton of replay buffer
|
||||
buffer_limit (int, defaults to 0): the max_size limitation of replay buffer
|
||||
buffer_cpu_offload (bool, defaults to True): whether to offload replay buffer to cpu
|
||||
eps_clip (float, defaults to 0.2): the clip coefficient of policy loss
|
||||
vf_coef (float, defaults to 1.0): the coefficient of value loss
|
||||
@@ -41,7 +41,7 @@ class PPOTrainer(Trainer):
|
||||
value_clip (float, defaults to 0.4): the clip coefficient of value loss
|
||||
experience_batch_size (int, defaults to 8): the batch size to use for experience generation
|
||||
max_epochs (int, defaults to 1): the number of epochs of training process
|
||||
tokenier (Callable, optional): the tokenizer to use for tokenizing the input
|
||||
tokenizer (Callable, optional): the tokenizer to use for tokenizing the input
|
||||
sample_replay_buffer (bool, defaults to False): whether to sample from replay buffer
|
||||
dataloader_pin_memory (bool, defaults to True): whether to pin memory for data loader
|
||||
callbacks (List[Callback], defaults to []): the callbacks to call during training process
|
||||
|
Reference in New Issue
Block a user