[exampe] fix llama example' loss error when using gemini plugin (#5060)

fix llama example
This commit is contained in:
flybird11111
2023-11-18 18:41:58 +08:00
committed by GitHub
parent 3c08f17348
commit bc09b95f50

View File

@@ -58,6 +58,7 @@ def tokenize_batch_for_finetune(batch, tokenizer: Optional[LlamaTokenizer] = Non
def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
tensor = tensor.data
tensor.div_(dist.get_world_size())
return tensor