mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-10 07:21:03 +00:00
(OpenAI) Add model_name to LLMResult.llm_output (#1713)
Given that different models have very different latencies and pricings, it's benefitial to pass the information about the model that generated the response. Such information allows implementing custom callback managers and track usage and price per model. Addresses https://github.com/hwchase17/langchain/issues/1557.
This commit is contained in:
@@ -35,6 +35,14 @@ def test_openai_extra_kwargs() -> None:
|
||||
OpenAI(foo=3, model_kwargs={"foo": 2})
|
||||
|
||||
|
||||
def test_openai_llm_output_contains_model_name() -> None:
|
||||
"""Test llm_output contains model_name."""
|
||||
llm = OpenAI(max_tokens=10)
|
||||
llm_result = llm.generate(["Hello, how are you?"])
|
||||
assert llm_result.llm_output is not None
|
||||
assert llm_result.llm_output["model_name"] == llm.model_name
|
||||
|
||||
|
||||
def test_openai_stop_valid() -> None:
|
||||
"""Test openai stop logic on valid configuration."""
|
||||
query = "write an ordered list of five items"
|
||||
|
Reference in New Issue
Block a user