mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-27 08:58:48 +00:00
Harrison/fix caching bug (#788)
Co-authored-by: thepok <richterthepok@yahoo.de>
This commit is contained in:
parent
248c297f1b
commit
5f73d06502
@ -92,6 +92,7 @@ class BaseLLM(BaseModel, ABC):
|
|||||||
else:
|
else:
|
||||||
missing_prompts.append(prompt)
|
missing_prompts.append(prompt)
|
||||||
missing_prompt_idxs.append(i)
|
missing_prompt_idxs.append(i)
|
||||||
|
if len(missing_prompts) > 0:
|
||||||
self.callback_manager.on_llm_start(
|
self.callback_manager.on_llm_start(
|
||||||
{"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose
|
{"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose
|
||||||
)
|
)
|
||||||
@ -105,8 +106,11 @@ class BaseLLM(BaseModel, ABC):
|
|||||||
existing_prompts[missing_prompt_idxs[i]] = result
|
existing_prompts[missing_prompt_idxs[i]] = result
|
||||||
prompt = prompts[missing_prompt_idxs[i]]
|
prompt = prompts[missing_prompt_idxs[i]]
|
||||||
langchain.llm_cache.update(prompt, llm_string, result)
|
langchain.llm_cache.update(prompt, llm_string, result)
|
||||||
|
llm_output = new_results.llm_output
|
||||||
|
else:
|
||||||
|
llm_output = {}
|
||||||
generations = [existing_prompts[i] for i in range(len(prompts))]
|
generations = [existing_prompts[i] for i in range(len(prompts))]
|
||||||
return LLMResult(generations=generations, llm_output=new_results.llm_output)
|
return LLMResult(generations=generations, llm_output=llm_output)
|
||||||
|
|
||||||
def get_num_tokens(self, text: str) -> int:
|
def get_num_tokens(self, text: str) -> int:
|
||||||
"""Get the number of tokens present in the text."""
|
"""Get the number of tokens present in the text."""
|
||||||
|
Loading…
Reference in New Issue
Block a user