mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-24 20:09:01 +00:00
patch: remove usage of llm, chat model __call__ (#20788)
- `llm(prompt)` -> `llm.invoke(prompt)` - `llm(prompt=prompt` -> `llm.invoke(prompt)` (same with `messages=`) - `llm(prompt, callbacks=callbacks)` -> `llm.invoke(prompt, config={"callbacks": callbacks})` - `llm(prompt, **kwargs)` -> `llm.invoke(prompt, **kwargs)`
This commit is contained in:
@@ -14,7 +14,7 @@ def test_konko_chat_test() -> None:
|
||||
"""Evaluate basic ChatKonko functionality."""
|
||||
chat_instance = ChatKonko(max_tokens=10)
|
||||
msg = HumanMessage(content="Hi")
|
||||
chat_response = chat_instance([msg])
|
||||
chat_response = chat_instance.invoke([msg])
|
||||
assert isinstance(chat_response, BaseMessage)
|
||||
assert isinstance(chat_response.content, str)
|
||||
|
||||
@@ -23,7 +23,7 @@ def test_konko_chat_test_openai() -> None:
|
||||
"""Evaluate basic ChatKonko functionality."""
|
||||
chat_instance = ChatKonko(max_tokens=10, model="meta-llama/llama-2-70b-chat")
|
||||
msg = HumanMessage(content="Hi")
|
||||
chat_response = chat_instance([msg])
|
||||
chat_response = chat_instance.invoke([msg])
|
||||
assert isinstance(chat_response, BaseMessage)
|
||||
assert isinstance(chat_response.content, str)
|
||||
|
||||
@@ -48,7 +48,7 @@ def test_konko_system_msg_test() -> None:
|
||||
chat_instance = ChatKonko(max_tokens=10)
|
||||
sys_msg = SystemMessage(content="Initiate user chat.")
|
||||
user_msg = HumanMessage(content="Hi there")
|
||||
chat_response = chat_instance([sys_msg, user_msg])
|
||||
chat_response = chat_instance.invoke([sys_msg, user_msg])
|
||||
assert isinstance(chat_response, BaseMessage)
|
||||
assert isinstance(chat_response.content, str)
|
||||
|
||||
@@ -92,7 +92,7 @@ def test_konko_streaming_callback_test() -> None:
|
||||
verbose=True,
|
||||
)
|
||||
msg = HumanMessage(content="Hi")
|
||||
chat_response = chat_instance([msg])
|
||||
chat_response = chat_instance.invoke([msg])
|
||||
assert callback_instance.llm_streams > 0
|
||||
assert isinstance(chat_response, BaseMessage)
|
||||
|
||||
|
@@ -72,7 +72,7 @@ def test_wasm_chat_without_service_url() -> None:
|
||||
messages = [system_message, user_message]
|
||||
|
||||
with pytest.raises(ValueError) as e:
|
||||
chat(messages)
|
||||
chat.invoke(messages)
|
||||
|
||||
assert "Error code: 503" in str(e)
|
||||
assert "reason: The IP address or port of the chat service is incorrect." in str(e)
|
||||
|
@@ -15,7 +15,7 @@ def test_konko_call() -> None:
|
||||
temperature=0.2,
|
||||
max_tokens=250,
|
||||
)
|
||||
output = llm("Say foo:")
|
||||
output = llm.invoke("Say foo:")
|
||||
|
||||
assert llm._llm_type == "konko"
|
||||
assert isinstance(output, str)
|
||||
|
@@ -13,7 +13,7 @@ def test_llm_with_callbacks() -> None:
|
||||
"""Test LLM callbacks."""
|
||||
handler = FakeCallbackHandler()
|
||||
llm = FakeListLLM(callbacks=[handler], verbose=True, responses=["foo"])
|
||||
output = llm("foo")
|
||||
output = llm.invoke("foo")
|
||||
assert output == "foo"
|
||||
assert handler.starts == 1
|
||||
assert handler.ends == 1
|
||||
@@ -26,7 +26,7 @@ def test_chat_model_with_v1_callbacks() -> None:
|
||||
llm = FakeListChatModel(
|
||||
callbacks=[handler], verbose=True, responses=["fake response"]
|
||||
)
|
||||
output = llm([HumanMessage(content="foo")])
|
||||
output = llm.invoke([HumanMessage(content="foo")])
|
||||
assert output.content == "fake response"
|
||||
assert handler.starts == 1
|
||||
assert handler.ends == 1
|
||||
@@ -41,7 +41,7 @@ def test_chat_model_with_v2_callbacks() -> None:
|
||||
llm = FakeListChatModel(
|
||||
callbacks=[handler], verbose=True, responses=["fake response"]
|
||||
)
|
||||
output = llm([HumanMessage(content="foo")])
|
||||
output = llm.invoke([HumanMessage(content="foo")])
|
||||
assert output.content == "fake response"
|
||||
assert handler.starts == 1
|
||||
assert handler.ends == 1
|
||||
|
@@ -64,7 +64,7 @@ def test_gradient_llm_sync(mocker: MockerFixture, setup: dict) -> None:
|
||||
assert llm.gradient_workspace_id == _GRADIENT_WORKSPACE_ID
|
||||
assert llm.model_id == _MODEL_ID
|
||||
|
||||
response = llm("Say foo:")
|
||||
response = llm.invoke("Say foo:")
|
||||
want = "bar"
|
||||
|
||||
assert response == want
|
||||
|
@@ -46,7 +46,7 @@ def test_pass_headers_if_provided(monkeypatch: MonkeyPatch) -> None:
|
||||
|
||||
monkeypatch.setattr(requests, "post", mock_post)
|
||||
|
||||
llm("Test prompt")
|
||||
llm.invoke("Test prompt")
|
||||
|
||||
|
||||
def test_handle_if_headers_not_provided(monkeypatch: MonkeyPatch) -> None:
|
||||
@@ -65,7 +65,7 @@ def test_handle_if_headers_not_provided(monkeypatch: MonkeyPatch) -> None:
|
||||
|
||||
monkeypatch.setattr(requests, "post", mock_post)
|
||||
|
||||
llm("Test prompt")
|
||||
llm.invoke("Test prompt")
|
||||
|
||||
|
||||
def test_handle_kwargs_top_level_parameters(monkeypatch: MonkeyPatch) -> None:
|
||||
@@ -109,7 +109,7 @@ def test_handle_kwargs_top_level_parameters(monkeypatch: MonkeyPatch) -> None:
|
||||
|
||||
monkeypatch.setattr(requests, "post", mock_post)
|
||||
|
||||
llm("Test prompt", model="test-model", system="Test system prompt")
|
||||
llm.invoke("Test prompt", model="test-model", system="Test system prompt")
|
||||
|
||||
|
||||
def test_handle_kwargs_with_unknown_param(monkeypatch: MonkeyPatch) -> None:
|
||||
@@ -157,7 +157,7 @@ def test_handle_kwargs_with_unknown_param(monkeypatch: MonkeyPatch) -> None:
|
||||
|
||||
monkeypatch.setattr(requests, "post", mock_post)
|
||||
|
||||
llm("Test prompt", unknown="Unknown parameter value", temperature=0.8)
|
||||
llm.invoke("Test prompt", unknown="Unknown parameter value", temperature=0.8)
|
||||
|
||||
|
||||
def test_handle_kwargs_with_options(monkeypatch: MonkeyPatch) -> None:
|
||||
@@ -189,7 +189,7 @@ def test_handle_kwargs_with_options(monkeypatch: MonkeyPatch) -> None:
|
||||
|
||||
monkeypatch.setattr(requests, "post", mock_post)
|
||||
|
||||
llm(
|
||||
llm.invoke(
|
||||
"Test prompt",
|
||||
model="test-another-model",
|
||||
options={"unknown_option": "Unknown option value"},
|
||||
|
Reference in New Issue
Block a user