mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-18 08:03:36 +00:00
Better custom model handling OpenAICallbackHandler (#4009)
Thanks @maykcaldas for flagging! think this should resolve #3988. Let me know if you still see issues after next release.
This commit is contained in:
46
tests/unit_tests/callbacks/test_openai_info.py
Normal file
46
tests/unit_tests/callbacks/test_openai_info.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import pytest
|
||||
|
||||
from langchain.callbacks import OpenAICallbackHandler
|
||||
from langchain.llms.openai import BaseOpenAI
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def handler() -> OpenAICallbackHandler:
|
||||
return OpenAICallbackHandler()
|
||||
|
||||
|
||||
def test_on_llm_end(handler: OpenAICallbackHandler) -> None:
|
||||
response = LLMResult(
|
||||
generations=[],
|
||||
llm_output={
|
||||
"token_usage": {
|
||||
"prompt_tokens": 2,
|
||||
"completion_tokens": 1,
|
||||
"total_tokens": 3,
|
||||
},
|
||||
"model_name": BaseOpenAI.__fields__["model_name"].default,
|
||||
},
|
||||
)
|
||||
handler.on_llm_end(response)
|
||||
assert handler.successful_requests == 1
|
||||
assert handler.total_tokens == 3
|
||||
assert handler.prompt_tokens == 2
|
||||
assert handler.completion_tokens == 1
|
||||
assert handler.total_cost > 0
|
||||
|
||||
|
||||
def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None:
|
||||
response = LLMResult(
|
||||
generations=[],
|
||||
llm_output={
|
||||
"token_usage": {
|
||||
"prompt_tokens": 2,
|
||||
"completion_tokens": 1,
|
||||
"total_tokens": 3,
|
||||
},
|
||||
"model_name": "foo-bar",
|
||||
},
|
||||
)
|
||||
handler.on_llm_end(response)
|
||||
assert handler.total_cost == 0
|
Reference in New Issue
Block a user