diff --git a/libs/langchain/langchain/callbacks/openai_info.py b/libs/langchain/langchain/callbacks/openai_info.py index 25060a73e33..6e9c6e1c7e5 100644 --- a/libs/langchain/langchain/callbacks/openai_info.py +++ b/libs/langchain/langchain/callbacks/openai_info.py @@ -65,6 +65,14 @@ MODEL_COST_PER_1K_TOKENS = { "babbage-002-finetuned-completion": 0.0016, "davinci-002-finetuned-completion": 0.012, "gpt-3.5-turbo-0613-finetuned-completion": 0.016, + # Azure Fine Tuned input + "babbage-002-azure-finetuned": 0.0004, + "davinci-002-azure-finetuned": 0.002, + "gpt-35-turbo-0613-azure-finetuned": 0.0015, + # Azure Fine Tuned output + "babbage-002-azure-finetuned-completion": 0.0004, + "davinci-002-azure-finetuned-completion": 0.002, + "gpt-35-turbo-0613-azure-finetuned-completion": 0.002, # Legacy fine-tuned models "ada-finetuned-legacy": 0.0016, "babbage-finetuned-legacy": 0.0024, @@ -90,7 +98,9 @@ def standardize_model_name( """ model_name = model_name.lower() - if "ft-" in model_name: + if ".ft-" in model_name: + return model_name.split(".ft-")[0] + "-azure-finetuned" + if ":ft-" in model_name: return model_name.split(":")[0] + "-finetuned-legacy" if "ft:" in model_name: return model_name.split(":")[1] + "-finetuned" diff --git a/libs/langchain/tests/unit_tests/callbacks/test_openai_info.py b/libs/langchain/tests/unit_tests/callbacks/test_openai_info.py index 68775812421..ba019fbbb6b 100644 --- a/libs/langchain/tests/unit_tests/callbacks/test_openai_info.py +++ b/libs/langchain/tests/unit_tests/callbacks/test_openai_info.py @@ -59,6 +59,9 @@ def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None: "ft:babbage-002:your-org:custom-model-name:1abcdefg", "ft:davinci-002:your-org:custom-model-name:1abcdefg", "ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", + "babbage-002.ft-0123456789abcdefghijklmnopqrstuv", + "davinci-002.ft-0123456789abcdefghijklmnopqrstuv", + "gpt-35-turbo-0613.ft-0123456789abcdefghijklmnopqrstuv", ], ) def test_on_llm_end_finetuned_model(