From aea44eb24677fc6f27b3ca2acfefafd49a044293 Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Wed, 7 Jun 2023 22:56:57 -0700 Subject: [PATCH] format --- langchain/llms/base.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/langchain/llms/base.py b/langchain/llms/base.py index f5e76e310aa..c30799816c2 100644 --- a/langchain/llms/base.py +++ b/langchain/llms/base.py @@ -108,7 +108,9 @@ class BaseLLM(BaseLanguageModel, ABC): else: return verbose - def _flatten_llm_result(self, prompts: List[str], result: LLMResult) -> List[LLMResult]: + def _flatten_llm_result( + self, prompts: List[str], result: LLMResult + ) -> List[LLMResult]: """Flatten the LLMResult into a list of LLMResults for batched runs.""" if len(result.generations) != len(prompts): raise ValueError( @@ -119,7 +121,9 @@ class BaseLLM(BaseLanguageModel, ABC): for prompt, gens in zip(prompts, result.generations): try: llm_output = { - "completion_tokens": self.get_num_tokens("".join([gen.text for gen in gens])), + "completion_tokens": self.get_num_tokens( + "".join([gen.text for gen in gens]) + ), "prompt_tokens": self.get_num_tokens(prompt), } llm_output["total_tokens"] = (