mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-21 14:43:07 +00:00
Merge branch 'master' into nc/20dec/runnable-chain
This commit is contained in:
@@ -13,8 +13,8 @@ build:
|
||||
- python -mvirtualenv $READTHEDOCS_VIRTUALENV_PATH
|
||||
- python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
- python -m pip install --upgrade --no-cache-dir sphinx readthedocs-sphinx-ext
|
||||
- python -m pip install --exists-action=w --no-cache-dir -r docs/api_reference/requirements.txt
|
||||
- python -m pip install ./libs/partners/*
|
||||
- python -m pip install --exists-action=w --no-cache-dir -r docs/api_reference/requirements.txt
|
||||
- python docs/api_reference/create_api_rst.py
|
||||
- cat docs/api_reference/conf.py
|
||||
- python -m sphinx -T -E -b html -d _build/doctrees -c docs/api_reference docs/api_reference $READTHEDOCS_OUTPUT/html -j auto
|
||||
|
||||
@@ -14,7 +14,7 @@ There are many ways to contribute to LangChain. Here are some common ways people
|
||||
|
||||
- [**Documentation**](./documentation): Help improve our docs, including this one!
|
||||
- [**Code**](./code): Help us write code, fix bugs, or improve our infrastructure.
|
||||
- [**Integrations**](./integration): Help us integrate with your favorite vendors and tools.
|
||||
- [**Integrations**](./integrations): Help us integrate with your favorite vendors and tools.
|
||||
|
||||
### 🚩GitHub Issues
|
||||
|
||||
|
||||
456
docs/docs/integrations/chat/huggingface.ipynb
Normal file
456
docs/docs/integrations/chat/huggingface.ipynb
Normal file
@@ -0,0 +1,456 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Hugging Face Chat Wrapper\n",
|
||||
"\n",
|
||||
"This notebook shows how to get started using Hugging Face LLM's as chat models.\n",
|
||||
"\n",
|
||||
"In particular, we will:\n",
|
||||
"1. Utilize the [HuggingFaceTextGenInference](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/llms/huggingface_text_gen_inference.py), [HuggingFaceEndpoint](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/llms/huggingface_endpoint.py), or [HuggingFaceHub](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/llms/huggingface_hub.py) integrations to instantiate an `LLM`.\n",
|
||||
"2. Utilize the `ChatHuggingFace` class to enable any of these LLMs to interface with LangChain's [Chat Messages](https://python.langchain.com/docs/modules/model_io/chat/#messages) abstraction.\n",
|
||||
"3. Demonstrate how to use an open-source LLM to power an `ChatAgent` pipeline\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"> Note: To get started, you'll need to have a [Hugging Face Access Token](https://huggingface.co/docs/hub/security-tokens) saved as an environment variable: `HUGGINGFACEHUB_API_TOKEN`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 23.3.1 is available.\n",
|
||||
"You should consider upgrading via the '/Users/jacoblee/langchain/langchain/libs/langchain/.venv/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
|
||||
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -q text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Instantiate an LLM\n",
|
||||
"\n",
|
||||
"There are three LLM options to choose from."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### `HuggingFaceTextGenInference`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/jacoblee/langchain/langchain/libs/langchain/.venv/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.llms import HuggingFaceTextGenInference\n",
|
||||
"\n",
|
||||
"ENDPOINT_URL = \"<YOUR_ENDPOINT_URL_HERE>\"\n",
|
||||
"HF_TOKEN = os.getenv(\"HUGGINGFACEHUB_API_TOKEN\")\n",
|
||||
"\n",
|
||||
"llm = HuggingFaceTextGenInference(\n",
|
||||
" inference_server_url=ENDPOINT_URL,\n",
|
||||
" max_new_tokens=512,\n",
|
||||
" top_k=50,\n",
|
||||
" temperature=0.1,\n",
|
||||
" repetition_penalty=1.03,\n",
|
||||
" server_kwargs={\n",
|
||||
" \"headers\": {\n",
|
||||
" \"Authorization\": f\"Bearer {HF_TOKEN}\",\n",
|
||||
" \"Content-Type\": \"application/json\",\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### `HuggingFaceEndpoint`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import HuggingFaceEndpoint\n",
|
||||
"\n",
|
||||
"ENDPOINT_URL = \"<YOUR_ENDPOINT_URL_HERE>\"\n",
|
||||
"llm = HuggingFaceEndpoint(\n",
|
||||
" endpoint_url=ENDPOINT_URL,\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" model_kwargs={\n",
|
||||
" \"max_new_tokens\": 512,\n",
|
||||
" \"top_k\": 50,\n",
|
||||
" \"temperature\": 0.1,\n",
|
||||
" \"repetition_penalty\": 1.03,\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### `HuggingFaceHub`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/jacoblee/langchain/langchain/libs/langchain/.venv/lib/python3.10/site-packages/huggingface_hub/utils/_deprecation.py:127: FutureWarning: '__init__' (from 'huggingface_hub.inference_api') is deprecated and will be removed from version '1.0'. `InferenceApi` client is deprecated in favor of the more feature-complete `InferenceClient`. Check out this guide to learn how to convert your script to use it: https://huggingface.co/docs/huggingface_hub/guides/inference#legacy-inferenceapi-client.\n",
|
||||
" warnings.warn(warning_message, FutureWarning)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.llms import HuggingFaceHub\n",
|
||||
"\n",
|
||||
"llm = HuggingFaceHub(\n",
|
||||
" repo_id=\"HuggingFaceH4/zephyr-7b-beta\",\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" model_kwargs={\n",
|
||||
" \"max_new_tokens\": 512,\n",
|
||||
" \"top_k\": 30,\n",
|
||||
" \"temperature\": 0.1,\n",
|
||||
" \"repetition_penalty\": 1.03,\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Instantiate the `ChatHuggingFace` to apply chat templates"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Instantiate the chat model and some messages to pass."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING! repo_id is not default parameter.\n",
|
||||
" repo_id was transferred to model_kwargs.\n",
|
||||
" Please confirm that repo_id is what you intended.\n",
|
||||
"WARNING! task is not default parameter.\n",
|
||||
" task was transferred to model_kwargs.\n",
|
||||
" Please confirm that task is what you intended.\n",
|
||||
"WARNING! huggingfacehub_api_token is not default parameter.\n",
|
||||
" huggingfacehub_api_token was transferred to model_kwargs.\n",
|
||||
" Please confirm that huggingfacehub_api_token is what you intended.\n",
|
||||
"None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models.huggingface import ChatHuggingFace\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You're a helpful assistant\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"What happens when an unstoppable force meets an immovable object?\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chat_model = ChatHuggingFace(llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Inspect which model and corresponding chat template is being used."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'HuggingFaceH4/zephyr-7b-beta'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_model.model_id"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Inspect how the chat messages are formatted for the LLM call."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"<|system|>\\nYou're a helpful assistant</s>\\n<|user|>\\nWhat happens when an unstoppable force meets an immovable object?</s>\\n<|assistant|>\\n\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_model._to_chat_prompt(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"According to a popular philosophical paradox, when an unstoppable force meets an immovable object, it is impossible to determine which one will prevail because both are defined as being completely unyielding and unmovable. The paradox suggests that the very concepts of \"unstoppable force\" and \"immovable object\" are inherently contradictory, and therefore, it is illogical to imagine a scenario where they would meet and interact. However, in practical terms, it is highly unlikely for such a scenario to occur in the real world, as the concepts of \"unstoppable force\" and \"immovable object\" are often used metaphorically to describe hypothetical situations or abstract concepts, rather than physical objects or forces.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"res = chat_model.invoke(messages)\n",
|
||||
"print(res.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Take it for a spin as an agent!\n",
|
||||
"\n",
|
||||
"Here we'll test out `Zephyr-7B-beta` as a zero-shot ReAct Agent. The example below is taken from [here](https://python.langchain.com/docs/modules/agents/agent_types/react#using-chat-models).\n",
|
||||
"\n",
|
||||
"> Note: To run this section, you'll need to have a [SerpAPI Token](https://serpapi.com/) saved as an environment variable: `SERPAPI_API_KEY`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"from langchain.agents import AgentExecutor, load_tools\n",
|
||||
"from langchain.agents.format_scratchpad import format_log_to_str\n",
|
||||
"from langchain.agents.output_parsers import (\n",
|
||||
" ReActJsonSingleInputOutputParser,\n",
|
||||
")\n",
|
||||
"from langchain.tools.render import render_text_description\n",
|
||||
"from langchain.utilities import SerpAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Configure the agent with a `react-json` style prompt and access to a search engine and calculator."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# setup tools\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
|
||||
"\n",
|
||||
"# setup ReAct style prompt\n",
|
||||
"prompt = hub.pull(\"hwchase17/react-json\")\n",
|
||||
"prompt = prompt.partial(\n",
|
||||
" tools=render_text_description(tools),\n",
|
||||
" tool_names=\", \".join([t.name for t in tools]),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# define the agent\n",
|
||||
"chat_model_with_stop = chat_model.bind(stop=[\"\\nObservation\"])\n",
|
||||
"agent = (\n",
|
||||
" {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | chat_model_with_stop\n",
|
||||
" | ReActJsonSingleInputOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# instantiate AgentExecutor\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mQuestion: Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\n",
|
||||
"\n",
|
||||
"Thought: I need to use the Search tool to find out who Leo DiCaprio's current girlfriend is. Then, I can use the Calculator tool to raise her current age to the power of 0.43.\n",
|
||||
"\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Search\",\n",
|
||||
" \"action_input\": \"leo dicaprio girlfriend\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mLeonardo DiCaprio may have found The One in Vittoria Ceretti. “They are in love,” a source exclusively reveals in the latest issue of Us Weekly. “Leo was clearly very proud to be showing Vittoria off and letting everyone see how happy they are together.”\u001b[0m\u001b[32;1m\u001b[1;3mNow that we know Leo DiCaprio's current girlfriend is Vittoria Ceretti, let's find out her current age.\n",
|
||||
"\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Search\",\n",
|
||||
" \"action_input\": \"vittoria ceretti age\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m25 years\u001b[0m\u001b[32;1m\u001b[1;3mNow that we know Vittoria Ceretti's current age is 25, let's use the Calculator tool to raise it to the power of 0.43.\n",
|
||||
"\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Calculator\",\n",
|
||||
" \"action_input\": \"25^0.43\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\u001b[0m\u001b[33;1m\u001b[1;3mAnswer: 3.991298452658078\u001b[0m\u001b[32;1m\u001b[1;3mFinal Answer: Vittoria Ceretti, Leo DiCaprio's current girlfriend, when raised to the power of 0.43 is approximately 4.0 rounded to two decimal places. Her current age is 25 years old.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\",\n",
|
||||
" 'output': \"Vittoria Ceretti, Leo DiCaprio's current girlfriend, when raised to the power of 0.43 is approximately 4.0 rounded to two decimal places. Her current age is 25 years old.\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.invoke(\n",
|
||||
" {\n",
|
||||
" \"input\": \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Wahoo! Our open-source 7b parameter Zephyr model was able to:\n",
|
||||
"\n",
|
||||
"1. Plan out a series of actions: `I need to use the Search tool to find out who Leo DiCaprio's current girlfriend is. Then, I can use the Calculator tool to raise her current age to the power of 0.43.`\n",
|
||||
"2. Then execute a search using the SerpAPI tool to find who Leo DiCaprio's current girlfriend is\n",
|
||||
"3. Execute another search to find her age\n",
|
||||
"4. And finally use a calculator tool to calculate her age raised to the power of 0.43\n",
|
||||
"\n",
|
||||
"It's exciting to see how far open-source LLM's can go as general purpose reasoning agents. Give it a try yourself!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -32,6 +32,7 @@ from langchain_community.chat_models.fireworks import ChatFireworks
|
||||
from langchain_community.chat_models.gigachat import GigaChat
|
||||
from langchain_community.chat_models.google_palm import ChatGooglePalm
|
||||
from langchain_community.chat_models.gpt_router import GPTRouter
|
||||
from langchain_community.chat_models.huggingface import ChatHuggingFace
|
||||
from langchain_community.chat_models.human import HumanInputChatModel
|
||||
from langchain_community.chat_models.hunyuan import ChatHunyuan
|
||||
from langchain_community.chat_models.javelin_ai_gateway import ChatJavelinAIGateway
|
||||
@@ -65,6 +66,7 @@ __all__ = [
|
||||
"ChatOllama",
|
||||
"ChatVertexAI",
|
||||
"JinaChat",
|
||||
"ChatHuggingFace",
|
||||
"HumanInputChatModel",
|
||||
"MiniMaxChat",
|
||||
"ChatAnyscale",
|
||||
|
||||
166
libs/community/langchain_community/chat_models/huggingface.py
Normal file
166
libs/community/langchain_community/chat_models/huggingface.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Hugging Face Chat Wrapper."""
|
||||
from typing import Any, List, Optional, Union
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
from langchain_core.outputs import (
|
||||
ChatGeneration,
|
||||
ChatResult,
|
||||
LLMResult,
|
||||
)
|
||||
|
||||
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
|
||||
from langchain_community.llms.huggingface_hub import HuggingFaceHub
|
||||
from langchain_community.llms.huggingface_text_gen_inference import (
|
||||
HuggingFaceTextGenInference,
|
||||
)
|
||||
|
||||
DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful, and honest assistant."""
|
||||
|
||||
|
||||
class ChatHuggingFace(BaseChatModel):
|
||||
"""
|
||||
Wrapper for using Hugging Face LLM's as ChatModels.
|
||||
|
||||
Works with `HuggingFaceTextGenInference`, `HuggingFaceEndpoint`,
|
||||
and `HuggingFaceHub` LLMs.
|
||||
|
||||
Upon instantiating this class, the model_id is resolved from the url
|
||||
provided to the LLM, and the appropriate tokenizer is loaded from
|
||||
the HuggingFace Hub.
|
||||
|
||||
Adapted from: https://python.langchain.com/docs/integrations/chat/llama2_chat
|
||||
"""
|
||||
|
||||
llm: Union[HuggingFaceTextGenInference, HuggingFaceEndpoint, HuggingFaceHub]
|
||||
system_message: SystemMessage = SystemMessage(content=DEFAULT_SYSTEM_PROMPT)
|
||||
tokenizer: Any = None
|
||||
model_id: str = None # type: ignore
|
||||
|
||||
def __init__(self, **kwargs: Any):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
self._resolve_model_id()
|
||||
self.tokenizer = (
|
||||
AutoTokenizer.from_pretrained(self.model_id)
|
||||
if self.tokenizer is None
|
||||
else self.tokenizer
|
||||
)
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
llm_input = self._to_chat_prompt(messages)
|
||||
llm_result = self.llm._generate(
|
||||
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
return self._to_chat_result(llm_result)
|
||||
|
||||
async def _agenerate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
llm_input = self._to_chat_prompt(messages)
|
||||
llm_result = await self.llm._agenerate(
|
||||
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
return self._to_chat_result(llm_result)
|
||||
|
||||
def _to_chat_prompt(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
) -> str:
|
||||
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
|
||||
if not messages:
|
||||
raise ValueError("at least one HumanMessage must be provided")
|
||||
|
||||
if not isinstance(messages[-1], HumanMessage):
|
||||
raise ValueError("last message must be a HumanMessage")
|
||||
|
||||
messages_dicts = [self._to_chatml_format(m) for m in messages]
|
||||
|
||||
return self.tokenizer.apply_chat_template(
|
||||
messages_dicts, tokenize=False, add_generation_prompt=True
|
||||
)
|
||||
|
||||
def _to_chatml_format(self, message: BaseMessage) -> dict:
|
||||
"""Convert LangChain message to ChatML format."""
|
||||
|
||||
if isinstance(message, SystemMessage):
|
||||
role = "system"
|
||||
elif isinstance(message, AIMessage):
|
||||
role = "assistant"
|
||||
elif isinstance(message, HumanMessage):
|
||||
role = "user"
|
||||
else:
|
||||
raise ValueError(f"Unknown message type: {type(message)}")
|
||||
|
||||
return {"role": role, "content": message.content}
|
||||
|
||||
@staticmethod
|
||||
def _to_chat_result(llm_result: LLMResult) -> ChatResult:
|
||||
chat_generations = []
|
||||
|
||||
for g in llm_result.generations[0]:
|
||||
chat_generation = ChatGeneration(
|
||||
message=AIMessage(content=g.text), generation_info=g.generation_info
|
||||
)
|
||||
chat_generations.append(chat_generation)
|
||||
|
||||
return ChatResult(
|
||||
generations=chat_generations, llm_output=llm_result.llm_output
|
||||
)
|
||||
|
||||
def _resolve_model_id(self) -> None:
|
||||
"""Resolve the model_id from the LLM's inference_server_url"""
|
||||
|
||||
from huggingface_hub import list_inference_endpoints
|
||||
|
||||
available_endpoints = list_inference_endpoints("*")
|
||||
|
||||
if isinstance(self.llm, HuggingFaceTextGenInference):
|
||||
endpoint_url = self.llm.inference_server_url
|
||||
|
||||
elif isinstance(self.llm, HuggingFaceEndpoint):
|
||||
endpoint_url = self.llm.endpoint_url
|
||||
|
||||
elif isinstance(self.llm, HuggingFaceHub):
|
||||
# no need to look up model_id for HuggingFaceHub LLM
|
||||
self.model_id = self.llm.repo_id
|
||||
return
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unknown LLM type: {type(self.llm)}")
|
||||
|
||||
for endpoint in available_endpoints:
|
||||
if endpoint.url == endpoint_url:
|
||||
self.model_id = endpoint.repository
|
||||
|
||||
if not self.model_id:
|
||||
raise ValueError(
|
||||
"Failed to resolve model_id"
|
||||
f"Could not find model id for inference server provided: {endpoint_url}"
|
||||
"Make sure that your Hugging Face token has access to the endpoint."
|
||||
)
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "huggingface-chat-wrapper"
|
||||
@@ -29,21 +29,28 @@ class VertexAIEmbeddings(_VertexAICommon, Embeddings):
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validates that the python package exists in environment."""
|
||||
cls._try_init_vertexai(values)
|
||||
if values["model_name"] == "textembedding-gecko-default":
|
||||
logger.warning(
|
||||
"Model_name will become a required arg for VertexAIEmbeddings "
|
||||
"starting from Feb-01-2024. Currently the default is set to "
|
||||
"textembedding-gecko@001"
|
||||
)
|
||||
values["model_name"] = "textembedding-gecko@001"
|
||||
try:
|
||||
from vertexai.language_models import TextEmbeddingModel
|
||||
|
||||
values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"])
|
||||
except ImportError:
|
||||
raise_vertex_import_error()
|
||||
values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"])
|
||||
return values
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
# the default value would be removed after Feb-01-2024
|
||||
model_name: str = "textembedding-gecko-default",
|
||||
project: Optional[str] = None,
|
||||
location: str = "us-central1",
|
||||
request_parallelism: int = 5,
|
||||
max_retries: int = 6,
|
||||
model_name: str = "textembedding-gecko",
|
||||
credentials: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
|
||||
@@ -62,7 +62,7 @@ class SurrealDBStore(VectorStore):
|
||||
self.db = kwargs.pop("db", "database")
|
||||
self.dburl = kwargs.pop("dburl", "ws://localhost:8000/rpc")
|
||||
self.embedding_function = embedding_function
|
||||
self.sdb = Surreal()
|
||||
self.sdb = Surreal(self.dburl)
|
||||
self.kwargs = kwargs
|
||||
|
||||
async def initialize(self) -> None:
|
||||
@@ -103,8 +103,12 @@ class SurrealDBStore(VectorStore):
|
||||
embeddings = self.embedding_function.embed_documents(list(texts))
|
||||
ids = []
|
||||
for idx, text in enumerate(texts):
|
||||
data = {"text": text, "embedding": embeddings[idx]}
|
||||
if metadatas is not None and idx < len(metadatas):
|
||||
data["metadata"] = metadatas[idx]
|
||||
record = await self.sdb.create(
|
||||
self.collection, {"text": text, "embedding": embeddings[idx]}
|
||||
self.collection,
|
||||
data,
|
||||
)
|
||||
ids.append(record[0]["id"])
|
||||
return ids
|
||||
@@ -123,7 +127,16 @@ class SurrealDBStore(VectorStore):
|
||||
Returns:
|
||||
List of ids for the newly inserted documents
|
||||
"""
|
||||
return asyncio.run(self.aadd_texts(texts, metadatas, **kwargs))
|
||||
|
||||
async def _add_texts(
|
||||
texts: Iterable[str],
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[str]:
|
||||
await self.initialize()
|
||||
return await self.aadd_texts(texts, metadatas, **kwargs)
|
||||
|
||||
return asyncio.run(_add_texts(texts, metadatas, **kwargs))
|
||||
|
||||
async def adelete(
|
||||
self,
|
||||
@@ -195,7 +208,7 @@ class SurrealDBStore(VectorStore):
|
||||
"k": k,
|
||||
"score_threshold": kwargs.get("score_threshold", 0),
|
||||
}
|
||||
query = """select id, text,
|
||||
query = """select id, text, metadata,
|
||||
vector::similarity::cosine(embedding,{embedding}) as similarity
|
||||
from {collection}
|
||||
where vector::similarity::cosine(embedding,{embedding}) >= {score_threshold}
|
||||
@@ -208,7 +221,10 @@ class SurrealDBStore(VectorStore):
|
||||
|
||||
return [
|
||||
(
|
||||
Document(page_content=result["text"], metadata={"id": result["id"]}),
|
||||
Document(
|
||||
page_content=result["text"],
|
||||
metadata={"id": result["id"], **result["metadata"]},
|
||||
),
|
||||
result["similarity"],
|
||||
)
|
||||
for result in results[0]["result"]
|
||||
@@ -401,7 +417,7 @@ class SurrealDBStore(VectorStore):
|
||||
|
||||
sdb = cls(embedding, **kwargs)
|
||||
await sdb.initialize()
|
||||
await sdb.aadd_texts(texts)
|
||||
await sdb.aadd_texts(texts, metadatas, **kwargs)
|
||||
return sdb
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -5,6 +5,8 @@ pip install google-cloud-aiplatform>=1.35.0
|
||||
Your end-user credentials would be used to make the calls (make sure you've run
|
||||
`gcloud auth login` first).
|
||||
"""
|
||||
import pytest
|
||||
|
||||
from langchain_community.embeddings import VertexAIEmbeddings
|
||||
|
||||
|
||||
@@ -15,6 +17,7 @@ def test_embedding_documents() -> None:
|
||||
assert len(output) == 1
|
||||
assert len(output[0]) == 768
|
||||
assert model.model_name == model.client._model_id
|
||||
assert model.model_name == "textembedding-gecko@001"
|
||||
|
||||
|
||||
def test_embedding_query() -> None:
|
||||
@@ -50,3 +53,15 @@ def test_paginated_texts() -> None:
|
||||
assert len(output) == 8
|
||||
assert len(output[0]) == 768
|
||||
assert model.model_name == model.client._model_id
|
||||
|
||||
|
||||
def test_warning(caplog: pytest.LogCaptureFixture) -> None:
|
||||
_ = VertexAIEmbeddings()
|
||||
assert len(caplog.records) == 1
|
||||
record = caplog.records[0]
|
||||
assert record.levelname == "WARNING"
|
||||
expected_message = (
|
||||
"Model_name will become a required arg for VertexAIEmbeddings starting from "
|
||||
"Feb-01-2024. Currently the default is set to textembedding-gecko@001"
|
||||
)
|
||||
assert record.message == expected_message
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
"""Test HuggingFace Chat wrapper."""
|
||||
from importlib import import_module
|
||||
|
||||
|
||||
def test_import_class() -> None:
|
||||
"""Test that the class can be imported."""
|
||||
module_name = "langchain_community.chat_models.huggingface"
|
||||
class_name = "ChatHuggingFace"
|
||||
|
||||
module = import_module(module_name)
|
||||
assert hasattr(module, class_name)
|
||||
@@ -11,6 +11,7 @@ EXPECTED_ALL = [
|
||||
"ChatCohere",
|
||||
"ChatDatabricks",
|
||||
"ChatGooglePalm",
|
||||
"ChatHuggingFace",
|
||||
"ChatMlflow",
|
||||
"ChatMLflowAIGateway",
|
||||
"ChatOllama",
|
||||
|
||||
@@ -2,12 +2,25 @@ from __future__ import annotations
|
||||
|
||||
import re
|
||||
from abc import abstractmethod
|
||||
from typing import List
|
||||
from collections import deque
|
||||
from typing import AsyncIterator, Deque, Iterator, List, TypeVar, Union
|
||||
|
||||
from langchain_core.output_parsers.base import BaseOutputParser
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.output_parsers.transform import BaseTransformOutputParser
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class ListOutputParser(BaseOutputParser[List[str]]):
|
||||
def droplastn(iter: Iterator[T], n: int) -> Iterator[T]:
|
||||
"""Drop the last n elements of an iterator."""
|
||||
buffer: Deque[T] = deque()
|
||||
for item in iter:
|
||||
buffer.append(item)
|
||||
if len(buffer) > n:
|
||||
yield buffer.popleft()
|
||||
|
||||
|
||||
class ListOutputParser(BaseTransformOutputParser[List[str]]):
|
||||
"""Parse the output of an LLM call to a list."""
|
||||
|
||||
@property
|
||||
@@ -18,6 +31,74 @@ class ListOutputParser(BaseOutputParser[List[str]]):
|
||||
def parse(self, text: str) -> List[str]:
|
||||
"""Parse the output of an LLM call."""
|
||||
|
||||
def parse_iter(self, text: str) -> Iterator[re.Match]:
|
||||
"""Parse the output of an LLM call."""
|
||||
raise NotImplementedError
|
||||
|
||||
def _transform(
|
||||
self, input: Iterator[Union[str, BaseMessage]]
|
||||
) -> Iterator[List[str]]:
|
||||
buffer = ""
|
||||
for chunk in input:
|
||||
if isinstance(chunk, BaseMessage):
|
||||
# extract text
|
||||
chunk_content = chunk.content
|
||||
if not isinstance(chunk_content, str):
|
||||
continue
|
||||
chunk = chunk_content
|
||||
# add current chunk to buffer
|
||||
buffer += chunk
|
||||
# parse buffer into a list of parts
|
||||
try:
|
||||
done_idx = 0
|
||||
# yield only complete parts
|
||||
for m in droplastn(self.parse_iter(buffer), 1):
|
||||
done_idx = m.end()
|
||||
yield [m.group(1)]
|
||||
buffer = buffer[done_idx:]
|
||||
except NotImplementedError:
|
||||
parts = self.parse(buffer)
|
||||
# yield only complete parts
|
||||
if len(parts) > 1:
|
||||
for part in parts[:-1]:
|
||||
yield [part]
|
||||
buffer = parts[-1]
|
||||
# yield the last part
|
||||
for part in self.parse(buffer):
|
||||
yield [part]
|
||||
|
||||
async def _atransform(
|
||||
self, input: AsyncIterator[Union[str, BaseMessage]]
|
||||
) -> AsyncIterator[List[str]]:
|
||||
buffer = ""
|
||||
async for chunk in input:
|
||||
if isinstance(chunk, BaseMessage):
|
||||
# extract text
|
||||
chunk_content = chunk.content
|
||||
if not isinstance(chunk_content, str):
|
||||
continue
|
||||
chunk = chunk_content
|
||||
# add current chunk to buffer
|
||||
buffer += chunk
|
||||
# parse buffer into a list of parts
|
||||
try:
|
||||
done_idx = 0
|
||||
# yield only complete parts
|
||||
for m in droplastn(self.parse_iter(buffer), 1):
|
||||
done_idx = m.end()
|
||||
yield [m.group(1)]
|
||||
buffer = buffer[done_idx:]
|
||||
except NotImplementedError:
|
||||
parts = self.parse(buffer)
|
||||
# yield only complete parts
|
||||
if len(parts) > 1:
|
||||
for part in parts[:-1]:
|
||||
yield [part]
|
||||
buffer = parts[-1]
|
||||
# yield the last part
|
||||
for part in self.parse(buffer):
|
||||
yield [part]
|
||||
|
||||
|
||||
class CommaSeparatedListOutputParser(ListOutputParser):
|
||||
"""Parse the output of an LLM call to a comma-separated list."""
|
||||
@@ -49,6 +130,8 @@ class CommaSeparatedListOutputParser(ListOutputParser):
|
||||
class NumberedListOutputParser(ListOutputParser):
|
||||
"""Parse a numbered list."""
|
||||
|
||||
pattern = r"\d+\.\s([^\n]+)"
|
||||
|
||||
def get_format_instructions(self) -> str:
|
||||
return (
|
||||
"Your response should be a numbered list with each item on a new line. "
|
||||
@@ -57,11 +140,11 @@ class NumberedListOutputParser(ListOutputParser):
|
||||
|
||||
def parse(self, text: str) -> List[str]:
|
||||
"""Parse the output of an LLM call."""
|
||||
pattern = r"\d+\.\s([^\n]+)"
|
||||
return re.findall(self.pattern, text)
|
||||
|
||||
# Extract the text of each item
|
||||
matches = re.findall(pattern, text)
|
||||
return matches
|
||||
def parse_iter(self, text: str) -> Iterator[re.Match]:
|
||||
"""Parse the output of an LLM call."""
|
||||
return re.finditer(self.pattern, text)
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
@@ -71,13 +154,18 @@ class NumberedListOutputParser(ListOutputParser):
|
||||
class MarkdownListOutputParser(ListOutputParser):
|
||||
"""Parse a markdown list."""
|
||||
|
||||
pattern = r"-\s([^\n]+)"
|
||||
|
||||
def get_format_instructions(self) -> str:
|
||||
return "Your response should be a markdown list, " "eg: `- foo\n- bar\n- baz`"
|
||||
|
||||
def parse(self, text: str) -> List[str]:
|
||||
"""Parse the output of an LLM call."""
|
||||
pattern = r"-\s([^\n]+)"
|
||||
return re.findall(pattern, text)
|
||||
return re.findall(self.pattern, text)
|
||||
|
||||
def parse_iter(self, text: str) -> Iterator[re.Match]:
|
||||
"""Parse the output of an LLM call."""
|
||||
return re.finditer(self.pattern, text)
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain-core"
|
||||
version = "0.1.2"
|
||||
version = "0.1.3"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
|
||||
268
libs/core/tests/unit_tests/output_parsers/test_list_parser.py
Normal file
268
libs/core/tests/unit_tests/output_parsers/test_list_parser.py
Normal file
@@ -0,0 +1,268 @@
|
||||
from typing import AsyncIterator, Iterable, List, TypeVar, cast
|
||||
|
||||
from langchain_core.output_parsers.list import (
|
||||
CommaSeparatedListOutputParser,
|
||||
MarkdownListOutputParser,
|
||||
NumberedListOutputParser,
|
||||
)
|
||||
from langchain_core.runnables.utils import aadd, add
|
||||
|
||||
|
||||
def test_single_item() -> None:
|
||||
"""Test that a string with a single item is parsed to a list with that item."""
|
||||
parser = CommaSeparatedListOutputParser()
|
||||
text = "foo"
|
||||
expected = ["foo"]
|
||||
|
||||
assert parser.parse(text) == expected
|
||||
assert add(parser.transform(t for t in text)) == expected
|
||||
assert list(parser.transform(t for t in text)) == [[a] for a in expected]
|
||||
assert list(parser.transform(t for t in text.splitlines(keepends=True))) == [
|
||||
[a] for a in expected
|
||||
]
|
||||
assert list(
|
||||
parser.transform(" " + t if i > 0 else t for i, t in enumerate(text.split(" ")))
|
||||
) == [[a] for a in expected]
|
||||
assert list(parser.transform(iter([text]))) == [[a] for a in expected]
|
||||
|
||||
|
||||
def test_multiple_items() -> None:
|
||||
"""Test that a string with multiple comma-separated items is parsed to a list."""
|
||||
parser = CommaSeparatedListOutputParser()
|
||||
text = "foo, bar, baz"
|
||||
expected = ["foo", "bar", "baz"]
|
||||
|
||||
assert parser.parse(text) == expected
|
||||
assert add(parser.transform(t for t in text)) == expected
|
||||
assert list(parser.transform(t for t in text)) == [[a] for a in expected]
|
||||
assert list(parser.transform(t for t in text.splitlines(keepends=True))) == [
|
||||
[a] for a in expected
|
||||
]
|
||||
assert list(
|
||||
parser.transform(" " + t if i > 0 else t for i, t in enumerate(text.split(" ")))
|
||||
) == [[a] for a in expected]
|
||||
assert list(parser.transform(iter([text]))) == [[a] for a in expected]
|
||||
|
||||
|
||||
def test_numbered_list() -> None:
|
||||
parser = NumberedListOutputParser()
|
||||
text1 = (
|
||||
"Your response should be a numbered list with each item on a new line. "
|
||||
"For example: \n\n1. foo\n\n2. bar\n\n3. baz"
|
||||
)
|
||||
|
||||
text2 = "Items:\n\n1. apple\n\n2. banana\n\n3. cherry"
|
||||
|
||||
text3 = "No items in the list."
|
||||
|
||||
for text, expected in [
|
||||
(text1, ["foo", "bar", "baz"]),
|
||||
(text2, ["apple", "banana", "cherry"]),
|
||||
(text3, []),
|
||||
]:
|
||||
expectedlist = [[a] for a in cast(List[str], expected)]
|
||||
assert parser.parse(text) == expected
|
||||
assert add(parser.transform(t for t in text)) == (expected or None)
|
||||
assert list(parser.transform(t for t in text)) == expectedlist
|
||||
assert (
|
||||
list(parser.transform(t for t in text.splitlines(keepends=True)))
|
||||
== expectedlist
|
||||
)
|
||||
assert (
|
||||
list(
|
||||
parser.transform(
|
||||
" " + t if i > 0 else t for i, t in enumerate(text.split(" "))
|
||||
)
|
||||
)
|
||||
== expectedlist
|
||||
)
|
||||
assert list(parser.transform(iter([text]))) == expectedlist
|
||||
|
||||
|
||||
def test_markdown_list() -> None:
|
||||
parser = MarkdownListOutputParser()
|
||||
text1 = (
|
||||
"Your response should be a numbered list with each item on a new line."
|
||||
"For example: \n- foo\n- bar\n- baz"
|
||||
)
|
||||
|
||||
text2 = "Items:\n- apple\n- banana\n- cherry"
|
||||
|
||||
text3 = "No items in the list."
|
||||
|
||||
for text, expected in [
|
||||
(text1, ["foo", "bar", "baz"]),
|
||||
(text2, ["apple", "banana", "cherry"]),
|
||||
(text3, []),
|
||||
]:
|
||||
expectedlist = [[a] for a in cast(List[str], expected)]
|
||||
assert parser.parse(text) == expected
|
||||
assert add(parser.transform(t for t in text)) == (expected or None)
|
||||
assert list(parser.transform(t for t in text)) == expectedlist
|
||||
assert (
|
||||
list(parser.transform(t for t in text.splitlines(keepends=True)))
|
||||
== expectedlist
|
||||
)
|
||||
assert (
|
||||
list(
|
||||
parser.transform(
|
||||
" " + t if i > 0 else t for i, t in enumerate(text.split(" "))
|
||||
)
|
||||
)
|
||||
== expectedlist
|
||||
)
|
||||
assert list(parser.transform(iter([text]))) == expectedlist
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
async def aiter_from_iter(iterable: Iterable[T]) -> AsyncIterator[T]:
|
||||
for item in iterable:
|
||||
yield item
|
||||
|
||||
|
||||
async def test_single_item_async() -> None:
|
||||
"""Test that a string with a single item is parsed to a list with that item."""
|
||||
parser = CommaSeparatedListOutputParser()
|
||||
text = "foo"
|
||||
expected = ["foo"]
|
||||
|
||||
assert await parser.aparse(text) == expected
|
||||
assert await aadd(parser.atransform(aiter_from_iter(t for t in text))) == expected
|
||||
assert [a async for a in parser.atransform(aiter_from_iter(t for t in text))] == [
|
||||
[a] for a in expected
|
||||
]
|
||||
assert [
|
||||
a
|
||||
async for a in parser.atransform(
|
||||
aiter_from_iter(t for t in text.splitlines(keepends=True))
|
||||
)
|
||||
] == [[a] for a in expected]
|
||||
assert [
|
||||
a
|
||||
async for a in parser.atransform(
|
||||
aiter_from_iter(
|
||||
" " + t if i > 0 else t for i, t in enumerate(text.split(" "))
|
||||
)
|
||||
)
|
||||
] == [[a] for a in expected]
|
||||
assert [a async for a in parser.atransform(aiter_from_iter([text]))] == [
|
||||
[a] for a in expected
|
||||
]
|
||||
|
||||
|
||||
async def test_multiple_items_async() -> None:
|
||||
"""Test that a string with multiple comma-separated items is parsed to a list."""
|
||||
parser = CommaSeparatedListOutputParser()
|
||||
text = "foo, bar, baz"
|
||||
expected = ["foo", "bar", "baz"]
|
||||
|
||||
assert await parser.aparse(text) == expected
|
||||
assert await aadd(parser.atransform(aiter_from_iter(t for t in text))) == expected
|
||||
assert [a async for a in parser.atransform(aiter_from_iter(t for t in text))] == [
|
||||
[a] for a in expected
|
||||
]
|
||||
assert [
|
||||
a
|
||||
async for a in parser.atransform(
|
||||
aiter_from_iter(t for t in text.splitlines(keepends=True))
|
||||
)
|
||||
] == [[a] for a in expected]
|
||||
assert [
|
||||
a
|
||||
async for a in parser.atransform(
|
||||
aiter_from_iter(
|
||||
" " + t if i > 0 else t for i, t in enumerate(text.split(" "))
|
||||
)
|
||||
)
|
||||
] == [[a] for a in expected]
|
||||
assert [a async for a in parser.atransform(aiter_from_iter([text]))] == [
|
||||
[a] for a in expected
|
||||
]
|
||||
|
||||
|
||||
async def test_numbered_list_async() -> None:
|
||||
parser = NumberedListOutputParser()
|
||||
text1 = (
|
||||
"Your response should be a numbered list with each item on a new line. "
|
||||
"For example: \n\n1. foo\n\n2. bar\n\n3. baz"
|
||||
)
|
||||
|
||||
text2 = "Items:\n\n1. apple\n\n2. banana\n\n3. cherry"
|
||||
|
||||
text3 = "No items in the list."
|
||||
|
||||
for text, expected in [
|
||||
(text1, ["foo", "bar", "baz"]),
|
||||
(text2, ["apple", "banana", "cherry"]),
|
||||
(text3, []),
|
||||
]:
|
||||
expectedlist = [[a] for a in cast(List[str], expected)]
|
||||
assert await parser.aparse(text) == expected
|
||||
assert await aadd(parser.atransform(aiter_from_iter(t for t in text))) == (
|
||||
expected or None
|
||||
)
|
||||
assert [
|
||||
a async for a in parser.atransform(aiter_from_iter(t for t in text))
|
||||
] == expectedlist
|
||||
assert [
|
||||
a
|
||||
async for a in parser.atransform(
|
||||
aiter_from_iter(t for t in text.splitlines(keepends=True))
|
||||
)
|
||||
] == expectedlist
|
||||
assert [
|
||||
a
|
||||
async for a in parser.atransform(
|
||||
aiter_from_iter(
|
||||
" " + t if i > 0 else t for i, t in enumerate(text.split(" "))
|
||||
)
|
||||
)
|
||||
] == expectedlist
|
||||
assert [
|
||||
a async for a in parser.atransform(aiter_from_iter([text]))
|
||||
] == expectedlist
|
||||
|
||||
|
||||
async def test_markdown_list_async() -> None:
|
||||
parser = MarkdownListOutputParser()
|
||||
text1 = (
|
||||
"Your response should be a numbered list with each item on a new line."
|
||||
"For example: \n- foo\n- bar\n- baz"
|
||||
)
|
||||
|
||||
text2 = "Items:\n- apple\n- banana\n- cherry"
|
||||
|
||||
text3 = "No items in the list."
|
||||
|
||||
for text, expected in [
|
||||
(text1, ["foo", "bar", "baz"]),
|
||||
(text2, ["apple", "banana", "cherry"]),
|
||||
(text3, []),
|
||||
]:
|
||||
expectedlist = [[a] for a in cast(List[str], expected)]
|
||||
assert await parser.aparse(text) == expected
|
||||
assert await aadd(parser.atransform(aiter_from_iter(t for t in text))) == (
|
||||
expected or None
|
||||
)
|
||||
assert [
|
||||
a async for a in parser.atransform(aiter_from_iter(t for t in text))
|
||||
] == expectedlist
|
||||
assert [
|
||||
a
|
||||
async for a in parser.atransform(
|
||||
aiter_from_iter(t for t in text.splitlines(keepends=True))
|
||||
)
|
||||
] == expectedlist
|
||||
assert [
|
||||
a
|
||||
async for a in parser.atransform(
|
||||
aiter_from_iter(
|
||||
" " + t if i > 0 else t for i, t in enumerate(text.split(" "))
|
||||
)
|
||||
)
|
||||
] == expectedlist
|
||||
assert [
|
||||
a async for a in parser.atransform(aiter_from_iter([text]))
|
||||
] == expectedlist
|
||||
@@ -1,13 +1,15 @@
|
||||
import re
|
||||
import xml.etree.ElementTree as ET
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union
|
||||
|
||||
from langchain_core.output_parsers import BaseOutputParser
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.output_parsers.transform import BaseTransformOutputParser
|
||||
from langchain_core.runnables.utils import AddableDict
|
||||
|
||||
from langchain.output_parsers.format_instructions import XML_FORMAT_INSTRUCTIONS
|
||||
|
||||
|
||||
class XMLOutputParser(BaseOutputParser):
|
||||
class XMLOutputParser(BaseTransformOutputParser):
|
||||
"""Parse an output using xml format."""
|
||||
|
||||
tags: Optional[List[str]] = None
|
||||
@@ -33,6 +35,70 @@ class XMLOutputParser(BaseOutputParser):
|
||||
else:
|
||||
raise ValueError(f"Could not parse output: {text}")
|
||||
|
||||
def _transform(
|
||||
self, input: Iterator[Union[str, BaseMessage]]
|
||||
) -> Iterator[AddableDict]:
|
||||
parser = ET.XMLPullParser(["start", "end"])
|
||||
current_path: List[str] = []
|
||||
current_path_has_children = False
|
||||
for chunk in input:
|
||||
if isinstance(chunk, BaseMessage):
|
||||
# extract text
|
||||
chunk_content = chunk.content
|
||||
if not isinstance(chunk_content, str):
|
||||
continue
|
||||
chunk = chunk_content
|
||||
# pass chunk to parser
|
||||
parser.feed(chunk)
|
||||
# yield all events
|
||||
for event, elem in parser.read_events():
|
||||
if event == "start":
|
||||
# update current path
|
||||
current_path.append(elem.tag)
|
||||
current_path_has_children = False
|
||||
elif event == "end":
|
||||
# remove last element from current path
|
||||
current_path.pop()
|
||||
# yield element
|
||||
if not current_path_has_children:
|
||||
yield nested_element(current_path, elem)
|
||||
# prevent yielding of parent element
|
||||
current_path_has_children = True
|
||||
# close parser
|
||||
parser.close()
|
||||
|
||||
async def _atransform(
|
||||
self, input: AsyncIterator[Union[str, BaseMessage]]
|
||||
) -> AsyncIterator[AddableDict]:
|
||||
parser = ET.XMLPullParser(["start", "end"])
|
||||
current_path: List[str] = []
|
||||
current_path_has_children = False
|
||||
async for chunk in input:
|
||||
if isinstance(chunk, BaseMessage):
|
||||
# extract text
|
||||
chunk_content = chunk.content
|
||||
if not isinstance(chunk_content, str):
|
||||
continue
|
||||
chunk = chunk_content
|
||||
# pass chunk to parser
|
||||
parser.feed(chunk)
|
||||
# yield all events
|
||||
for event, elem in parser.read_events():
|
||||
if event == "start":
|
||||
# update current path
|
||||
current_path.append(elem.tag)
|
||||
current_path_has_children = False
|
||||
elif event == "end":
|
||||
# remove last element from current path
|
||||
current_path.pop()
|
||||
# yield element
|
||||
if not current_path_has_children:
|
||||
yield nested_element(current_path, elem)
|
||||
# prevent yielding of parent element
|
||||
current_path_has_children = True
|
||||
# close parser
|
||||
parser.close()
|
||||
|
||||
def _root_to_dict(self, root: ET.Element) -> Dict[str, List[Any]]:
|
||||
"""Converts xml tree to python dictionary."""
|
||||
result: Dict[str, List[Any]] = {root.tag: []}
|
||||
@@ -46,3 +112,11 @@ class XMLOutputParser(BaseOutputParser):
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
return "xml"
|
||||
|
||||
|
||||
def nested_element(path: List[str], elem: ET.Element) -> Any:
|
||||
"""Get nested element from path."""
|
||||
if len(path) == 0:
|
||||
return AddableDict({elem.tag: elem.text})
|
||||
else:
|
||||
return AddableDict({path[0]: [nested_element(path[1:], elem)]})
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
from langchain.output_parsers.list import (
|
||||
CommaSeparatedListOutputParser,
|
||||
MarkdownListOutputParser,
|
||||
NumberedListOutputParser,
|
||||
)
|
||||
|
||||
|
||||
def test_single_item() -> None:
|
||||
"""Test that a string with a single item is parsed to a list with that item."""
|
||||
parser = CommaSeparatedListOutputParser()
|
||||
assert parser.parse("foo") == ["foo"]
|
||||
|
||||
|
||||
def test_multiple_items() -> None:
|
||||
"""Test that a string with multiple comma-separated items is parsed to a list."""
|
||||
parser = CommaSeparatedListOutputParser()
|
||||
assert parser.parse("foo, bar, baz") == ["foo", "bar", "baz"]
|
||||
|
||||
|
||||
def test_numbered_list() -> None:
|
||||
parser = NumberedListOutputParser()
|
||||
text1 = (
|
||||
"Your response should be a numbered list with each item on a new line. "
|
||||
"For example: \n\n1. foo\n\n2. bar\n\n3. baz"
|
||||
)
|
||||
|
||||
text2 = "Items:\n\n1. apple\n\n2. banana\n\n3. cherry"
|
||||
|
||||
text3 = "No items in the list."
|
||||
|
||||
assert parser.parse(text1) == ["foo", "bar", "baz"]
|
||||
assert parser.parse(text2) == ["apple", "banana", "cherry"]
|
||||
assert parser.parse(text3) == []
|
||||
|
||||
|
||||
def test_markdown_list() -> None:
|
||||
parser = MarkdownListOutputParser()
|
||||
text1 = (
|
||||
"Your response should be a numbered list with each item on a new line."
|
||||
"For example: \n- foo\n- bar\n- baz"
|
||||
)
|
||||
|
||||
text2 = "Items:\n- apple\n- banana\n- cherry"
|
||||
|
||||
text3 = "No items in the list."
|
||||
|
||||
assert parser.parse(text1) == ["foo", "bar", "baz"]
|
||||
assert parser.parse(text2) == ["apple", "banana", "cherry"]
|
||||
assert parser.parse(text3) == []
|
||||
@@ -31,6 +31,11 @@ def test_xml_output_parser(result: str) -> None:
|
||||
|
||||
xml_result = xml_parser.parse(result)
|
||||
assert DEF_RESULT_EXPECTED == xml_result
|
||||
assert list(xml_parser.transform(iter(result))) == [
|
||||
{"foo": [{"bar": [{"baz": None}]}]},
|
||||
{"foo": [{"bar": [{"baz": "slim.shady"}]}]},
|
||||
{"foo": [{"baz": "tag"}]},
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("result", ["foo></foo>", "<foo></foo", "foo></foo", "foofoo"])
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# RAG with Mulitple Indexes (Fusion)
|
||||
# RAG with Multiple Indexes (Fusion)
|
||||
|
||||
A QA application that queries multiple domain-specific retrievers and selects the most relevant documents from across all retrieved results.
|
||||
|
||||
@@ -70,4 +70,4 @@ We can access the template from code with:
|
||||
from langserve.client import RemoteRunnable
|
||||
|
||||
runnable = RemoteRunnable("http://localhost:8000/rag-multi-index-fusion")
|
||||
```
|
||||
```
|
||||
|
||||
Reference in New Issue
Block a user