mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-28 17:38:36 +00:00
**Description:** Add cost calculation for fine tuned models (new and legacy), this is required after OpenAI added new models for fine tuning and separated the costs of I/O for fine tuned models. Also I updated the relevant unit tests see https://platform.openai.com/docs/guides/fine-tuning for more information. issue: https://github.com/langchain-ai/langchain/issues/11715 - **Issue:** 11715 - **Twitter handle:** @nirkopler
This commit is contained in:
parent
a2840a2b42
commit
d3744175bf
@ -57,10 +57,19 @@ MODEL_COST_PER_1K_TOKENS = {
|
|||||||
"text-davinci-003": 0.02,
|
"text-davinci-003": 0.02,
|
||||||
"text-davinci-002": 0.02,
|
"text-davinci-002": 0.02,
|
||||||
"code-davinci-002": 0.02,
|
"code-davinci-002": 0.02,
|
||||||
"ada-finetuned": 0.0016,
|
# Fine Tuned input
|
||||||
"babbage-finetuned": 0.0024,
|
"babbage-002-finetuned": 0.0016,
|
||||||
"curie-finetuned": 0.012,
|
"davinci-002-finetuned": 0.012,
|
||||||
"davinci-finetuned": 0.12,
|
"gpt-3.5-turbo-0613-finetuned": 0.012,
|
||||||
|
# Fine Tuned output
|
||||||
|
"babbage-002-finetuned-completion": 0.0016,
|
||||||
|
"davinci-002-finetuned-completion": 0.012,
|
||||||
|
"gpt-3.5-turbo-0613-finetuned-completion": 0.016,
|
||||||
|
# Legacy fine-tuned models
|
||||||
|
"ada-finetuned-legacy": 0.0016,
|
||||||
|
"babbage-finetuned-legacy": 0.0024,
|
||||||
|
"curie-finetuned-legacy": 0.012,
|
||||||
|
"davinci-finetuned-legacy": 0.12,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -82,11 +91,14 @@ def standardize_model_name(
|
|||||||
"""
|
"""
|
||||||
model_name = model_name.lower()
|
model_name = model_name.lower()
|
||||||
if "ft-" in model_name:
|
if "ft-" in model_name:
|
||||||
return model_name.split(":")[0] + "-finetuned"
|
return model_name.split(":")[0] + "-finetuned-legacy"
|
||||||
|
if "ft:" in model_name:
|
||||||
|
return model_name.split(":")[1] + "-finetuned"
|
||||||
elif is_completion and (
|
elif is_completion and (
|
||||||
model_name.startswith("gpt-4")
|
model_name.startswith("gpt-4")
|
||||||
or model_name.startswith("gpt-3.5")
|
or model_name.startswith("gpt-3.5")
|
||||||
or model_name.startswith("gpt-35")
|
or model_name.startswith("gpt-35")
|
||||||
|
or ("finetuned" in model_name and "legacy" not in model_name)
|
||||||
):
|
):
|
||||||
return model_name + "-completion"
|
return model_name + "-completion"
|
||||||
else:
|
else:
|
||||||
|
@ -49,7 +49,21 @@ def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None:
|
|||||||
assert handler.total_cost == 0
|
assert handler.total_cost == 0
|
||||||
|
|
||||||
|
|
||||||
def test_on_llm_end_finetuned_model(handler: OpenAICallbackHandler) -> None:
|
@pytest.mark.parametrize(
|
||||||
|
"model_name",
|
||||||
|
[
|
||||||
|
"ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
|
||||||
|
"babbage:ft-your-org:custom-model-name-2022-02-15-04-21-04",
|
||||||
|
"curie:ft-your-org:custom-model-name-2022-02-15-04-21-04",
|
||||||
|
"davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04",
|
||||||
|
"ft:babbage-002:your-org:custom-model-name:1abcdefg",
|
||||||
|
"ft:davinci-002:your-org:custom-model-name:1abcdefg",
|
||||||
|
"ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_on_llm_end_finetuned_model(
|
||||||
|
handler: OpenAICallbackHandler, model_name: str
|
||||||
|
) -> None:
|
||||||
response = LLMResult(
|
response = LLMResult(
|
||||||
generations=[],
|
generations=[],
|
||||||
llm_output={
|
llm_output={
|
||||||
@ -58,7 +72,7 @@ def test_on_llm_end_finetuned_model(handler: OpenAICallbackHandler) -> None:
|
|||||||
"completion_tokens": 1,
|
"completion_tokens": 1,
|
||||||
"total_tokens": 3,
|
"total_tokens": 3,
|
||||||
},
|
},
|
||||||
"model_name": "ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
|
"model_name": model_name,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
handler.on_llm_end(response)
|
handler.on_llm_end(response)
|
||||||
|
Loading…
Reference in New Issue
Block a user