community: Update costs of openai finetuned models (#22124)

- **Description:** Update costs of finetuned models and add
gpt-3-turbo-0125. Source: https://openai.com/api/pricing/
  - **Issue:** N/A
  - **Dependencies:** None
This commit is contained in:
Jirka Lhotka 2024-05-24 17:25:17 +02:00 committed by GitHub
parent d3db83abe3
commit 7c0459faf2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 10 additions and 6 deletions

View File

@ -1,4 +1,5 @@
"""Callback Handler that prints to std out."""
import threading
from typing import Any, Dict, List
@ -87,13 +88,15 @@ MODEL_COST_PER_1K_TOKENS = {
# Fine Tuned input
"babbage-002-finetuned": 0.0016,
"davinci-002-finetuned": 0.012,
"gpt-3.5-turbo-0613-finetuned": 0.012,
"gpt-3.5-turbo-1106-finetuned": 0.012,
"gpt-3.5-turbo-0613-finetuned": 0.003,
"gpt-3.5-turbo-1106-finetuned": 0.003,
"gpt-3.5-turbo-0125-finetuned": 0.003,
# Fine Tuned output
"babbage-002-finetuned-completion": 0.0016,
"davinci-002-finetuned-completion": 0.012,
"gpt-3.5-turbo-0613-finetuned-completion": 0.016,
"gpt-3.5-turbo-1106-finetuned-completion": 0.016,
"gpt-3.5-turbo-0613-finetuned-completion": 0.006,
"gpt-3.5-turbo-1106-finetuned-completion": 0.006,
"gpt-3.5-turbo-0125-finetuned-completion": 0.006,
# Azure Fine Tuned input
"babbage-002-azure-finetuned": 0.0004,
"davinci-002-azure-finetuned": 0.002,

View File

@ -1,6 +1,7 @@
from unittest.mock import MagicMock
from uuid import uuid4
import numpy as np
import pytest
from langchain_core.outputs import LLMResult
@ -58,7 +59,7 @@ def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None:
("davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04", 0.24),
("ft:babbage-002:your-org:custom-model-name:1abcdefg", 0.0032),
("ft:davinci-002:your-org:custom-model-name:1abcdefg", 0.024),
("ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", 0.028),
("ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", 0.009),
("babbage-002.ft-0123456789abcdefghijklmnopqrstuv", 0.0008),
("davinci-002.ft-0123456789abcdefghijklmnopqrstuv", 0.004),
("gpt-35-turbo-0613.ft-0123456789abcdefghijklmnopqrstuv", 0.0035),
@ -79,7 +80,7 @@ def test_on_llm_end_finetuned_model(
},
)
handler.on_llm_end(response)
assert handler.total_cost == expected_cost
assert np.isclose(handler.total_cost, expected_cost)
@pytest.mark.parametrize(