mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-05 04:55:14 +00:00
patch: remove usage of llm, chat model __call__ (#20788)
- `llm(prompt)` -> `llm.invoke(prompt)` - `llm(prompt=prompt` -> `llm.invoke(prompt)` (same with `messages=`) - `llm(prompt, callbacks=callbacks)` -> `llm.invoke(prompt, config={"callbacks": callbacks})` - `llm(prompt, **kwargs)` -> `llm.invoke(prompt, **kwargs)`
This commit is contained in:
@@ -18,7 +18,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import (
|
||||
def test_openai_call() -> None:
|
||||
"""Test valid call to openai."""
|
||||
llm = OpenAI()
|
||||
output = llm("Say something nice:")
|
||||
output = llm.invoke("Say something nice:")
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
@@ -34,9 +34,9 @@ def test_openai_stop_valid() -> None:
|
||||
"""Test openai stop logic on valid configuration."""
|
||||
query = "write an ordered list of five items"
|
||||
first_llm = OpenAI(stop="3", temperature=0)
|
||||
first_output = first_llm(query)
|
||||
first_output = first_llm.invoke(query)
|
||||
second_llm = OpenAI(temperature=0)
|
||||
second_output = second_llm(query, stop=["3"])
|
||||
second_output = second_llm.invoke(query, stop=["3"])
|
||||
# Because it stops on new lines, shouldn't return anything
|
||||
assert first_output == second_output
|
||||
|
||||
@@ -45,7 +45,7 @@ def test_openai_stop_error() -> None:
|
||||
"""Test openai stop logic on bad configuration."""
|
||||
llm = OpenAI(stop="3", temperature=0)
|
||||
with pytest.raises(ValueError):
|
||||
llm("write an ordered list of five items", stop=["\n"])
|
||||
llm.invoke("write an ordered list of five items", stop=["\n"])
|
||||
|
||||
|
||||
def test_saving_loading_llm(tmp_path: Path) -> None:
|
||||
@@ -158,7 +158,7 @@ def test_openai_streaming_multiple_prompts_error() -> None:
|
||||
def test_openai_streaming_call() -> None:
|
||||
"""Test valid call to openai."""
|
||||
llm = OpenAI(max_tokens=10, streaming=True)
|
||||
output = llm("Say foo:")
|
||||
output = llm.invoke("Say foo:")
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
@@ -173,7 +173,7 @@ def test_openai_streaming_callback() -> None:
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
llm("Write me a sentence with 100 words.")
|
||||
llm.invoke("Write me a sentence with 100 words.")
|
||||
assert callback_handler.llm_streams == 10
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user