From 7c0459faf2b4e056f70f8988dc54af5b10ec344a Mon Sep 17 00:00:00 2001 From: Jirka Lhotka Date: Fri, 24 May 2024 17:25:17 +0200 Subject: [PATCH] community: Update costs of openai finetuned models (#22124) - **Description:** Update costs of finetuned models and add gpt-3-turbo-0125. Source: https://openai.com/api/pricing/ - **Issue:** N/A - **Dependencies:** None --- .../langchain_community/callbacks/openai_info.py | 11 +++++++---- .../tests/unit_tests/callbacks/test_openai_info.py | 5 +++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/libs/community/langchain_community/callbacks/openai_info.py b/libs/community/langchain_community/callbacks/openai_info.py index 532f133b269..a551b723ea8 100644 --- a/libs/community/langchain_community/callbacks/openai_info.py +++ b/libs/community/langchain_community/callbacks/openai_info.py @@ -1,4 +1,5 @@ """Callback Handler that prints to std out.""" + import threading from typing import Any, Dict, List @@ -87,13 +88,15 @@ MODEL_COST_PER_1K_TOKENS = { # Fine Tuned input "babbage-002-finetuned": 0.0016, "davinci-002-finetuned": 0.012, - "gpt-3.5-turbo-0613-finetuned": 0.012, - "gpt-3.5-turbo-1106-finetuned": 0.012, + "gpt-3.5-turbo-0613-finetuned": 0.003, + "gpt-3.5-turbo-1106-finetuned": 0.003, + "gpt-3.5-turbo-0125-finetuned": 0.003, # Fine Tuned output "babbage-002-finetuned-completion": 0.0016, "davinci-002-finetuned-completion": 0.012, - "gpt-3.5-turbo-0613-finetuned-completion": 0.016, - "gpt-3.5-turbo-1106-finetuned-completion": 0.016, + "gpt-3.5-turbo-0613-finetuned-completion": 0.006, + "gpt-3.5-turbo-1106-finetuned-completion": 0.006, + "gpt-3.5-turbo-0125-finetuned-completion": 0.006, # Azure Fine Tuned input "babbage-002-azure-finetuned": 0.0004, "davinci-002-azure-finetuned": 0.002, diff --git a/libs/community/tests/unit_tests/callbacks/test_openai_info.py b/libs/community/tests/unit_tests/callbacks/test_openai_info.py index 64d0ae57d15..b139435fc71 100644 --- a/libs/community/tests/unit_tests/callbacks/test_openai_info.py +++ b/libs/community/tests/unit_tests/callbacks/test_openai_info.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock from uuid import uuid4 +import numpy as np import pytest from langchain_core.outputs import LLMResult @@ -58,7 +59,7 @@ def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None: ("davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04", 0.24), ("ft:babbage-002:your-org:custom-model-name:1abcdefg", 0.0032), ("ft:davinci-002:your-org:custom-model-name:1abcdefg", 0.024), - ("ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", 0.028), + ("ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", 0.009), ("babbage-002.ft-0123456789abcdefghijklmnopqrstuv", 0.0008), ("davinci-002.ft-0123456789abcdefghijklmnopqrstuv", 0.004), ("gpt-35-turbo-0613.ft-0123456789abcdefghijklmnopqrstuv", 0.0035), @@ -79,7 +80,7 @@ def test_on_llm_end_finetuned_model( }, ) handler.on_llm_end(response) - assert handler.total_cost == expected_cost + assert np.isclose(handler.total_cost, expected_cost) @pytest.mark.parametrize(