mirror of
https://github.com/hwchase17/langchain.git
synced 2025-05-31 12:09:58 +00:00
In collaboration with @rlouf I build an [outlines](https://dottxt-ai.github.io/outlines/latest/) integration for langchain! I think this is really useful for doing any type of structured output locally. [Dottxt](https://dottxt.co) spend alot of work optimising this process at a lower level ([outlines-core](https://pypi.org/project/outlines-core/0.1.14/) written in rust) so I think this is a better alternative over all current approaches in langchain to do structured output. It also implements the `.with_structured_output` method so it should be a drop in replacement for a lot of applications. The integration includes: - **Outlines LLM class** - **ChatOutlines class** - **Tutorial Cookbooks** - **Documentation Page** - **Validation and error messages** - **Exposes Outlines Structured output features** - **Support for multiple backends** - **Integration and Unit Tests** Dependencies: `outlines` + additional (depending on backend used) I am not sure if the unit-tests comply with all requirements, if not I suggest to just remove them since I don't see a useful way to do it differently. ### Quick overview: Chat Models: <img width="698" alt="image" src="https://github.com/user-attachments/assets/05a499b9-858c-4397-a9ff-165c2b3e7acc"> Structured Output: <img width="955" alt="image" src="https://github.com/user-attachments/assets/b9fcac11-d3e5-4698-b1ae-8c4cb3d54c45"> --------- Co-authored-by: Vadym Barda <vadym@langchain.dev>
116 lines
2.2 KiB
Python
116 lines
2.2 KiB
Python
from langchain_core.language_models.llms import BaseLLM
|
|
|
|
from langchain_community import llms
|
|
|
|
EXPECT_ALL = [
|
|
"AI21",
|
|
"AlephAlpha",
|
|
"AmazonAPIGateway",
|
|
"Anthropic",
|
|
"Anyscale",
|
|
"Aphrodite",
|
|
"Arcee",
|
|
"Aviary",
|
|
"AzureMLOnlineEndpoint",
|
|
"AzureOpenAI",
|
|
"BaichuanLLM",
|
|
"Banana",
|
|
"Baseten",
|
|
"Beam",
|
|
"Bedrock",
|
|
"CTransformers",
|
|
"CTranslate2",
|
|
"CerebriumAI",
|
|
"ChatGLM",
|
|
"Clarifai",
|
|
"Cohere",
|
|
"Databricks",
|
|
"DeepInfra",
|
|
"DeepSparse",
|
|
"EdenAI",
|
|
"FakeListLLM",
|
|
"Fireworks",
|
|
"ForefrontAI",
|
|
"Friendli",
|
|
"GigaChat",
|
|
"GPT4All",
|
|
"GooglePalm",
|
|
"GooseAI",
|
|
"GradientLLM",
|
|
"HuggingFaceEndpoint",
|
|
"HuggingFaceHub",
|
|
"HuggingFacePipeline",
|
|
"HuggingFaceTextGenInference",
|
|
"HumanInputLLM",
|
|
"IpexLLM",
|
|
"KoboldApiLLM",
|
|
"Konko",
|
|
"LlamaCpp",
|
|
"Llamafile",
|
|
"TextGen",
|
|
"ManifestWrapper",
|
|
"Minimax",
|
|
"Mlflow",
|
|
"MlflowAIGateway",
|
|
"MLXPipeline",
|
|
"Modal",
|
|
"MosaicML",
|
|
"Nebula",
|
|
"OCIModelDeploymentLLM",
|
|
"OCIModelDeploymentTGI",
|
|
"OCIModelDeploymentVLLM",
|
|
"OCIGenAI",
|
|
"NIBittensorLLM",
|
|
"NLPCloud",
|
|
"Ollama",
|
|
"OpenAI",
|
|
"OpenAIChat",
|
|
"OpenLLM",
|
|
"OpenLM",
|
|
"Outlines",
|
|
"PaiEasEndpoint",
|
|
"Petals",
|
|
"PipelineAI",
|
|
"Predibase",
|
|
"PredictionGuard",
|
|
"PromptLayerOpenAI",
|
|
"PromptLayerOpenAIChat",
|
|
"OpaquePrompts",
|
|
"RWKV",
|
|
"Replicate",
|
|
"SagemakerEndpoint",
|
|
"SambaNovaCloud",
|
|
"SambaStudio",
|
|
"SelfHostedHuggingFaceLLM",
|
|
"SelfHostedPipeline",
|
|
"StochasticAI",
|
|
"TitanTakeoff",
|
|
"TitanTakeoffPro",
|
|
"Together",
|
|
"Tongyi",
|
|
"VertexAI",
|
|
"VertexAIModelGarden",
|
|
"VLLM",
|
|
"VLLMOpenAI",
|
|
"WeightOnlyQuantPipeline",
|
|
"Writer",
|
|
"OctoAIEndpoint",
|
|
"Xinference",
|
|
"JavelinAIGateway",
|
|
"QianfanLLMEndpoint",
|
|
"YandexGPT",
|
|
"Yuan2",
|
|
"YiLLM",
|
|
"You",
|
|
"VolcEngineMaasLLM",
|
|
"WatsonxLLM",
|
|
"SparkLLM",
|
|
]
|
|
|
|
|
|
def test_all_imports() -> None:
|
|
"""Simple test to make sure all things can be imported."""
|
|
for cls in llms.__all__:
|
|
assert issubclass(getattr(llms, cls), BaseLLM)
|
|
assert set(llms.__all__) == set(EXPECT_ALL)
|