core, partners: implement standard tracing params for LLMs (#25410)

This commit is contained in:
ccurme
2024-08-16 13:18:09 -04:00
committed by GitHub
parent 9f0c76bf89
commit b83f1eb0d5
17 changed files with 298 additions and 36 deletions

View File

@@ -0,0 +1,23 @@
from typing import Any
from langchain_openai import AzureOpenAI
def test_azure_model_param(monkeypatch: Any) -> None:
monkeypatch.delenv("OPENAI_API_BASE", raising=False)
llm = AzureOpenAI(
openai_api_key="secret-api-key", # type: ignore[call-arg]
azure_endpoint="endpoint",
api_version="version",
azure_deployment="gpt-35-turbo-instruct",
)
# Test standard tracing params
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "azure",
"ls_model_type": "llm",
"ls_model_name": "gpt-35-turbo-instruct",
"ls_temperature": 0.7,
"ls_max_tokens": 256,
}

View File

@@ -14,6 +14,16 @@ def test_openai_model_param() -> None:
llm = OpenAI(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
# Test standard tracing params
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "openai",
"ls_model_type": "llm",
"ls_model_name": "foo",
"ls_temperature": 0.7,
"ls_max_tokens": 256,
}
def test_openai_model_kwargs() -> None:
llm = OpenAI(model_kwargs={"foo": "bar"})