mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-08 06:23:20 +00:00
patch: remove usage of llm, chat model __call__ (#20788)
- `llm(prompt)` -> `llm.invoke(prompt)` - `llm(prompt=prompt` -> `llm.invoke(prompt)` (same with `messages=`) - `llm(prompt, callbacks=callbacks)` -> `llm.invoke(prompt, config={"callbacks": callbacks})` - `llm(prompt, **kwargs)` -> `llm.invoke(prompt, **kwargs)`
This commit is contained in:
@@ -13,7 +13,7 @@ def test_llm_with_callbacks() -> None:
|
||||
"""Test LLM callbacks."""
|
||||
handler = FakeCallbackHandler()
|
||||
llm = FakeListLLM(callbacks=[handler], verbose=True, responses=["foo"])
|
||||
output = llm("foo")
|
||||
output = llm.invoke("foo")
|
||||
assert output == "foo"
|
||||
assert handler.starts == 1
|
||||
assert handler.ends == 1
|
||||
@@ -26,7 +26,7 @@ def test_chat_model_with_v1_callbacks() -> None:
|
||||
llm = FakeListChatModel(
|
||||
callbacks=[handler], verbose=True, responses=["fake response"]
|
||||
)
|
||||
output = llm([HumanMessage(content="foo")])
|
||||
output = llm.invoke([HumanMessage(content="foo")])
|
||||
assert output.content == "fake response"
|
||||
assert handler.starts == 1
|
||||
assert handler.ends == 1
|
||||
@@ -41,7 +41,7 @@ def test_chat_model_with_v2_callbacks() -> None:
|
||||
llm = FakeListChatModel(
|
||||
callbacks=[handler], verbose=True, responses=["fake response"]
|
||||
)
|
||||
output = llm([HumanMessage(content="foo")])
|
||||
output = llm.invoke([HumanMessage(content="foo")])
|
||||
assert output.content == "fake response"
|
||||
assert handler.starts == 1
|
||||
assert handler.ends == 1
|
||||
|
Reference in New Issue
Block a user