mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-23 15:19:33 +00:00
community: Update costs of openai finetuned models (#22124)
- **Description:** Update costs of finetuned models and add gpt-3-turbo-0125. Source: https://openai.com/api/pricing/ - **Issue:** N/A - **Dependencies:** None
This commit is contained in:
parent
d3db83abe3
commit
7c0459faf2
@ -1,4 +1,5 @@
|
|||||||
"""Callback Handler that prints to std out."""
|
"""Callback Handler that prints to std out."""
|
||||||
|
|
||||||
import threading
|
import threading
|
||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
@ -87,13 +88,15 @@ MODEL_COST_PER_1K_TOKENS = {
|
|||||||
# Fine Tuned input
|
# Fine Tuned input
|
||||||
"babbage-002-finetuned": 0.0016,
|
"babbage-002-finetuned": 0.0016,
|
||||||
"davinci-002-finetuned": 0.012,
|
"davinci-002-finetuned": 0.012,
|
||||||
"gpt-3.5-turbo-0613-finetuned": 0.012,
|
"gpt-3.5-turbo-0613-finetuned": 0.003,
|
||||||
"gpt-3.5-turbo-1106-finetuned": 0.012,
|
"gpt-3.5-turbo-1106-finetuned": 0.003,
|
||||||
|
"gpt-3.5-turbo-0125-finetuned": 0.003,
|
||||||
# Fine Tuned output
|
# Fine Tuned output
|
||||||
"babbage-002-finetuned-completion": 0.0016,
|
"babbage-002-finetuned-completion": 0.0016,
|
||||||
"davinci-002-finetuned-completion": 0.012,
|
"davinci-002-finetuned-completion": 0.012,
|
||||||
"gpt-3.5-turbo-0613-finetuned-completion": 0.016,
|
"gpt-3.5-turbo-0613-finetuned-completion": 0.006,
|
||||||
"gpt-3.5-turbo-1106-finetuned-completion": 0.016,
|
"gpt-3.5-turbo-1106-finetuned-completion": 0.006,
|
||||||
|
"gpt-3.5-turbo-0125-finetuned-completion": 0.006,
|
||||||
# Azure Fine Tuned input
|
# Azure Fine Tuned input
|
||||||
"babbage-002-azure-finetuned": 0.0004,
|
"babbage-002-azure-finetuned": 0.0004,
|
||||||
"davinci-002-azure-finetuned": 0.002,
|
"davinci-002-azure-finetuned": 0.002,
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from unittest.mock import MagicMock
|
from unittest.mock import MagicMock
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
from langchain_core.outputs import LLMResult
|
from langchain_core.outputs import LLMResult
|
||||||
|
|
||||||
@ -58,7 +59,7 @@ def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None:
|
|||||||
("davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04", 0.24),
|
("davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04", 0.24),
|
||||||
("ft:babbage-002:your-org:custom-model-name:1abcdefg", 0.0032),
|
("ft:babbage-002:your-org:custom-model-name:1abcdefg", 0.0032),
|
||||||
("ft:davinci-002:your-org:custom-model-name:1abcdefg", 0.024),
|
("ft:davinci-002:your-org:custom-model-name:1abcdefg", 0.024),
|
||||||
("ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", 0.028),
|
("ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", 0.009),
|
||||||
("babbage-002.ft-0123456789abcdefghijklmnopqrstuv", 0.0008),
|
("babbage-002.ft-0123456789abcdefghijklmnopqrstuv", 0.0008),
|
||||||
("davinci-002.ft-0123456789abcdefghijklmnopqrstuv", 0.004),
|
("davinci-002.ft-0123456789abcdefghijklmnopqrstuv", 0.004),
|
||||||
("gpt-35-turbo-0613.ft-0123456789abcdefghijklmnopqrstuv", 0.0035),
|
("gpt-35-turbo-0613.ft-0123456789abcdefghijklmnopqrstuv", 0.0035),
|
||||||
@ -79,7 +80,7 @@ def test_on_llm_end_finetuned_model(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
handler.on_llm_end(response)
|
handler.on_llm_end(response)
|
||||||
assert handler.total_cost == expected_cost
|
assert np.isclose(handler.total_cost, expected_cost)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
|
Loading…
Reference in New Issue
Block a user