patch: remove usage of llm, chat model __call__ (#20788)

- `llm(prompt)` -> `llm.invoke(prompt)`
- `llm(prompt=prompt` -> `llm.invoke(prompt)` (same with `messages=`)
- `llm(prompt, callbacks=callbacks)` -> `llm.invoke(prompt,
config={"callbacks": callbacks})`
- `llm(prompt, **kwargs)` -> `llm.invoke(prompt, **kwargs)`
This commit is contained in:
ccurme
2024-04-24 19:39:23 -04:00
committed by GitHub
parent 9b7fb381a4
commit 481d3855dc
181 changed files with 395 additions and 403 deletions

View File

@@ -170,13 +170,13 @@ class Anthropic(LLM, _AnthropicCommon):
# Simplest invocation, automatically wrapped with HUMAN_PROMPT
# and AI_PROMPT.
response = model("What are the biggest risks facing humanity?")
response = model.invoke("What are the biggest risks facing humanity?")
# Or if you want to use the chat mode, build a few-shot-prompt, or
# put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT:
raw_prompt = "What are the biggest risks facing humanity?"
prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}"
response = model(prompt)
response = model.invoke(prompt)
"""
class Config:
@@ -236,7 +236,7 @@ class Anthropic(LLM, _AnthropicCommon):
prompt = "What are the biggest risks facing humanity?"
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
response = model(prompt)
response = model.invoke(prompt)
"""
if self.streaming: