mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-31 00:29:57 +00:00
docs: Update chat model providers include package information (#20336)
Include package information
This commit is contained in:
parent
56fe4ab382
commit
de938a4451
@ -20,12 +20,13 @@ CHAT_MODEL_FEAT_TABLE_CORRECTION = {
|
||||
"ChatMLflowAIGateway": {"_agenerate": False},
|
||||
"PromptLayerChatOpenAI": {"_stream": False, "_astream": False},
|
||||
"ChatKonko": {"_astream": False, "_agenerate": False},
|
||||
"ChatOpenAI": {"tool_calling": True},
|
||||
"ChatAnthropic": {"tool_calling": True},
|
||||
"ChatMistralAI": {"tool_calling": True},
|
||||
"ChatVertexAI": {"tool_calling": True},
|
||||
"ChatFireworks": {"tool_calling": True},
|
||||
"ChatGroq": {"tool_calling": True},
|
||||
"ChatAnthropic": {"tool_calling": True, "package": "langchain-anthropic"},
|
||||
"ChatMistralAI": {"tool_calling": True, "package": "langchain-mistralai"},
|
||||
"ChatFireworks": {"tool_calling": True, "package": "langchain-fireworks"},
|
||||
"ChatOpenAI": {"tool_calling": True, "package": "langchain-openai"},
|
||||
"ChatVertexAI": {"tool_calling": True, "package": "langchain-google-vertexai"},
|
||||
"ChatGroq": {"tool_calling": "partial", "package": "langchain-groq"},
|
||||
"ChatCohere": {"tool_calling": "partial", "package": "langchain-cohere"},
|
||||
}
|
||||
|
||||
|
||||
@ -64,7 +65,8 @@ All ChatModels implement the Runnable interface, which comes with default implem
|
||||
- *Batch* support defaults to calling the underlying ChatModel in parallel for each input by making use of a thread pool executor (in the sync batch case) or `asyncio.gather` (in the async batch case). The concurrency can be controlled with the `max_concurrency` key in `RunnableConfig`.
|
||||
|
||||
Each ChatModel integration can optionally provide native implementations to truly enable async or streaming.
|
||||
The table shows, for each integration, which features have been implemented with native support.
|
||||
The table shows, for each integration, which features have been implemented with native support.
|
||||
Yellow circles (🟡) indicates partial support - for example, if the model supports tool calling but not tool messages for agents.
|
||||
|
||||
{table}
|
||||
|
||||
@ -144,7 +146,14 @@ def get_chat_model_table() -> str:
|
||||
for k, v in {**feat_table, **CHAT_MODEL_FEAT_TABLE_CORRECTION}.items()
|
||||
if k not in CHAT_MODEL_IGNORE
|
||||
}
|
||||
header = ["model", "_agenerate", "_stream", "_astream", "tool_calling"]
|
||||
header = [
|
||||
"model",
|
||||
"_agenerate",
|
||||
"_stream",
|
||||
"_astream",
|
||||
"tool_calling",
|
||||
"package",
|
||||
]
|
||||
title = [
|
||||
"Model",
|
||||
"Invoke",
|
||||
@ -152,10 +161,25 @@ def get_chat_model_table() -> str:
|
||||
"Stream",
|
||||
"Async stream",
|
||||
"Tool calling",
|
||||
"Python Package",
|
||||
]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for llm, feats in sorted(final_feats.items()):
|
||||
rows += [[llm, "✅"] + ["✅" if feats.get(h) else "❌" for h in header[1:]]]
|
||||
# Fields are in the order of the header
|
||||
row = [llm, "✅"]
|
||||
for h in header[1:]:
|
||||
value = feats.get(h)
|
||||
index = header.index(h)
|
||||
if h == "package":
|
||||
row.append(value or "langchain-community")
|
||||
else:
|
||||
if value == "partial":
|
||||
row.append("🟡")
|
||||
elif value is True:
|
||||
row.append("✅")
|
||||
else:
|
||||
row.append("❌")
|
||||
rows.append(row)
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user