patch: remove usage of llm, chat model __call__ (#20788)

- `llm(prompt)` -> `llm.invoke(prompt)`
- `llm(prompt=prompt` -> `llm.invoke(prompt)` (same with `messages=`)
- `llm(prompt, callbacks=callbacks)` -> `llm.invoke(prompt,
config={"callbacks": callbacks})`
- `llm(prompt, **kwargs)` -> `llm.invoke(prompt, **kwargs)`
This commit is contained in:
ccurme
2024-04-24 19:39:23 -04:00
committed by GitHub
parent 9b7fb381a4
commit 481d3855dc
181 changed files with 395 additions and 403 deletions

View File

@@ -98,7 +98,7 @@ def test_old_sqlite_llm_caching() -> None:
with Session(llm_cache.engine) as session, session.begin():
for item in items:
session.merge(item)
assert llm(prompt) == cached_response
assert llm.invoke(prompt) == cached_response
async def test_chat_model_caching() -> None:
@@ -114,7 +114,7 @@ async def test_chat_model_caching() -> None:
llm_string=llm._get_llm_string(),
return_val=[ChatGeneration(message=cached_message)],
)
result = llm(prompt)
result = llm.invoke(prompt)
assert isinstance(result, AIMessage)
assert result.content == cached_response
@@ -147,8 +147,8 @@ async def test_chat_model_caching_params() -> None:
llm_string=llm._get_llm_string(functions=[]),
return_val=[ChatGeneration(message=cached_message)],
)
result = llm(prompt, functions=[])
result_no_params = llm(prompt)
result = llm.invoke(prompt, functions=[])
result_no_params = llm.invoke(prompt)
assert isinstance(result, AIMessage)
assert result.content == cached_response
assert isinstance(result_no_params, AIMessage)
@@ -186,7 +186,7 @@ async def test_llm_cache_clear() -> None:
return_val=[Generation(text=cached_response)],
)
llm_cache.clear()
response = llm(prompt)
response = llm.invoke(prompt)
assert response == expected_response
# async test