mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-14 14:05:37 +00:00
community[minor]: add bedrock anthropic callback for token usage counting (#19864)
**Description:** add bedrock anthropic callback for token usage counting, consulted openai callback. --------- Co-authored-by: Massimiliano Pronesti <massimiliano.pronesti@gmail.com>
This commit is contained in:
@@ -7,6 +7,7 @@ from langchain_core.outputs import LLMResult
|
||||
from langchain_core.tracers.langchain import LangChainTracer, wait_for_all_tracers
|
||||
|
||||
from langchain_community.callbacks import get_openai_callback
|
||||
from langchain_community.callbacks.manager import get_bedrock_anthropic_callback
|
||||
from langchain_community.llms.openai import BaseOpenAI
|
||||
|
||||
|
||||
@@ -77,6 +78,37 @@ def test_callback_manager_configure_context_vars(
|
||||
)
|
||||
mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response)
|
||||
|
||||
# The callback handler has been updated
|
||||
assert cb.successful_requests == 1
|
||||
assert cb.total_tokens == 3
|
||||
assert cb.prompt_tokens == 2
|
||||
assert cb.completion_tokens == 1
|
||||
assert cb.total_cost > 0
|
||||
|
||||
with get_bedrock_anthropic_callback() as cb:
|
||||
# This is a new empty callback handler
|
||||
assert cb.successful_requests == 0
|
||||
assert cb.total_tokens == 0
|
||||
|
||||
# configure adds this bedrock anthropic cb,
|
||||
# but doesn't modify the group manager
|
||||
mngr = CallbackManager.configure(group_manager)
|
||||
assert mngr.handlers == [tracer, cb]
|
||||
assert group_manager.handlers == [tracer]
|
||||
|
||||
response = LLMResult(
|
||||
generations=[],
|
||||
llm_output={
|
||||
"usage": {
|
||||
"prompt_tokens": 2,
|
||||
"completion_tokens": 1,
|
||||
"total_tokens": 3,
|
||||
},
|
||||
"model_id": "anthropic.claude-instant-v1",
|
||||
},
|
||||
)
|
||||
mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response)
|
||||
|
||||
# The callback handler has been updated
|
||||
assert cb.successful_requests == 1
|
||||
assert cb.total_tokens == 3
|
||||
|
@@ -58,3 +58,32 @@ def test_different_models_bedrock(model_id: str) -> None:
|
||||
|
||||
# should not throw an error
|
||||
model.invoke("hello there")
|
||||
|
||||
|
||||
def test_bedrock_combine_llm_output() -> None:
|
||||
model_id = "anthropic.claude-3-haiku-20240307-v1:0"
|
||||
client = MagicMock()
|
||||
llm_outputs = [
|
||||
{
|
||||
"model_id": "anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"usage": {
|
||||
"completion_tokens": 1,
|
||||
"prompt_tokens": 2,
|
||||
"total_tokens": 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
"model_id": "anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"usage": {
|
||||
"completion_tokens": 1,
|
||||
"prompt_tokens": 2,
|
||||
"total_tokens": 3,
|
||||
},
|
||||
},
|
||||
]
|
||||
model = BedrockChat(model_id=model_id, client=client)
|
||||
final_output = model._combine_llm_outputs(llm_outputs)
|
||||
assert final_output["model_id"] == model_id
|
||||
assert final_output["usage"]["completion_tokens"] == 2
|
||||
assert final_output["usage"]["prompt_tokens"] == 4
|
||||
assert final_output["usage"]["total_tokens"] == 6
|
||||
|
Reference in New Issue
Block a user