mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-01 12:38:45 +00:00
- `llm(prompt)` -> `llm.invoke(prompt)` - `llm(prompt=prompt` -> `llm.invoke(prompt)` (same with `messages=`) - `llm(prompt, callbacks=callbacks)` -> `llm.invoke(prompt, config={"callbacks": callbacks})` - `llm(prompt, **kwargs)` -> `llm.invoke(prompt, **kwargs)`
26 lines
784 B
Python
26 lines
784 B
Python
"""Test IPEX LLM"""
|
|
from langchain_core.outputs import LLMResult
|
|
|
|
from langchain_community.llms.ipex_llm import IpexLLM
|
|
|
|
|
|
def test_call() -> None:
|
|
"""Test valid call to ipex-llm."""
|
|
llm = IpexLLM.from_model_id(
|
|
model_id="lmsys/vicuna-7b-v1.5",
|
|
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
|
|
)
|
|
output = llm.invoke("Hello!")
|
|
assert isinstance(output, str)
|
|
|
|
|
|
def test_generate() -> None:
|
|
"""Test valid call to ipex-llm."""
|
|
llm = IpexLLM.from_model_id(
|
|
model_id="lmsys/vicuna-7b-v1.5",
|
|
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
|
|
)
|
|
output = llm.generate(["Hello!"])
|
|
assert isinstance(output, LLMResult)
|
|
assert isinstance(output.generations, list)
|