From 0672b5afacf06735fe669fe568aadab3cbd9d8aa Mon Sep 17 00:00:00 2001 From: BlueRum <70618399+ht-zhou@users.noreply.github.com> Date: Mon, 13 Mar 2023 10:37:41 +0800 Subject: [PATCH] [chatgpt] fix lora support for gpt (#3113) * fix gpt-actor * fix gpt-critic * fix opt-critic --- applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py | 8 ++++++-- applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py | 7 +++++-- applications/ChatGPT/chatgpt/models/opt/opt_critic.py | 2 +- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py index da24685e1..6a53ad40b 100644 --- a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py +++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py @@ -14,12 +14,16 @@ class GPTActor(Actor): pretrained (str): Pretrained model name or path. config (GPT2Config): Model config. checkpoint (bool): Enable gradient checkpointing. + lora_rank (int): Rank of the LoRa layer. + lora_train_bias (str): Bias training strategy for the LoRa layer. """ def __init__(self, pretrained: Optional[str] = None, config: Optional[GPT2Config] = None, - checkpoint: bool = False) -> None: + checkpoint: bool = False, + lora_rank: int = 0, + lora_train_bias: str = 'none') -> None: if pretrained is not None: model = GPT2LMHeadModel.from_pretrained(pretrained) elif config is not None: @@ -28,4 +32,4 @@ class GPTActor(Actor): model = GPT2LMHeadModel(GPT2Config()) if checkpoint: model.gradient_checkpointing_enable() - super().__init__(model) + super().__init__(model, lora_rank, lora_train_bias) diff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py index 01e824386..25bb1ed94 100644 --- a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py +++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py @@ -15,13 +15,16 @@ class GPTCritic(Critic): pretrained (str): Pretrained model name or path. config (GPT2Config): Model config. checkpoint (bool): Enable gradient checkpointing. + lora_rank (int): Rank of the LO-RA decomposition. + lora_train_bias (str): LoRA bias training mode. """ def __init__(self, pretrained: Optional[str] = None, config: Optional[GPT2Config] = None, checkpoint: bool = False, - **kwargs) -> None: + lora_rank: int = 0, + lora_train_bias: str = 'none') -> None: if pretrained is not None: model = GPT2Model.from_pretrained(pretrained) elif config is not None: @@ -31,4 +34,4 @@ class GPTCritic(Critic): if checkpoint: model.gradient_checkpointing_enable() value_head = nn.Linear(model.config.n_embd, 1) - super().__init__(model, value_head, **kwargs) + super().__init__(model, value_head, lora_rank, lora_train_bias) diff --git a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py index 847813332..fcfebd8a8 100644 --- a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py +++ b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py @@ -34,5 +34,5 @@ class OPTCritic(Critic): model = OPTModel(OPTConfig()) if checkpoint: model.gradient_checkpointing_enable() - value_head = nn.Linear(model.config.hidden_size, 1) + value_head = nn.Linear(model.config.word_embed_proj_dim, 1) super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)