mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-07 22:11:51 +00:00
patch: remove usage of llm, chat model __call__ (#20788)
- `llm(prompt)` -> `llm.invoke(prompt)` - `llm(prompt=prompt` -> `llm.invoke(prompt)` (same with `messages=`) - `llm(prompt, callbacks=callbacks)` -> `llm.invoke(prompt, config={"callbacks": callbacks})` - `llm(prompt, **kwargs)` -> `llm.invoke(prompt, **kwargs)`
This commit is contained in:
@@ -35,7 +35,7 @@ def test_llamacpp_inference() -> None:
|
||||
"""Test valid llama.cpp inference."""
|
||||
model_path = get_model()
|
||||
llm = LlamaCpp(model_path=model_path)
|
||||
output = llm("Say foo:")
|
||||
output = llm.invoke("Say foo:")
|
||||
assert isinstance(output, str)
|
||||
assert len(output) > 1
|
||||
|
||||
@@ -68,7 +68,7 @@ def test_llamacpp_streaming_callback() -> None:
|
||||
verbose=True,
|
||||
max_tokens=MAX_TOKENS,
|
||||
)
|
||||
llm("Q: Can you count to 10? A:'1, ")
|
||||
llm.invoke("Q: Can you count to 10? A:'1, ")
|
||||
assert callback_handler.llm_streams <= MAX_TOKENS + OFF_BY_ONE
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user