mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-18 17:11:25 +00:00
xai[patch]: support dedicated structured output feature (#29853)
https://docs.x.ai/docs/guides/structured-outputs Interface appears identical to OpenAI's. ```python from langchain.chat_models import init_chat_model from pydantic import BaseModel class Joke(BaseModel): setup: str punchline: str llm = init_chat_model("xai:grok-2").with_structured_output( Joke, method="json_schema" ) llm.invoke("Tell me a joke about cats.") ```
This commit is contained in:
parent
9d6fcd0bfb
commit
83dcef234d
@ -348,9 +348,14 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override]
|
|||||||
self.client = openai.OpenAI(
|
self.client = openai.OpenAI(
|
||||||
**client_params, **sync_specific
|
**client_params, **sync_specific
|
||||||
).chat.completions
|
).chat.completions
|
||||||
|
self.root_client = openai.OpenAI(**client_params, **sync_specific)
|
||||||
if not (self.async_client or None):
|
if not (self.async_client or None):
|
||||||
async_specific: dict = {"http_client": self.http_async_client}
|
async_specific: dict = {"http_client": self.http_async_client}
|
||||||
self.async_client = openai.AsyncOpenAI(
|
self.async_client = openai.AsyncOpenAI(
|
||||||
**client_params, **async_specific
|
**client_params, **async_specific
|
||||||
).chat.completions
|
).chat.completions
|
||||||
|
self.root_async_client = openai.AsyncOpenAI(
|
||||||
|
**client_params,
|
||||||
|
**async_specific,
|
||||||
|
)
|
||||||
return self
|
return self
|
||||||
|
Loading…
Reference in New Issue
Block a user