mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-06 19:48:26 +00:00
Merge branch 'master' into sqldocstore_postgres_compat
This commit is contained in:
commit
ab643d236e
@ -54,7 +54,7 @@
|
||||
" \"Enter your Contextual API key: \"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# intialize Contextual llm\n",
|
||||
"# initialize Contextual llm\n",
|
||||
"llm = ChatContextual(\n",
|
||||
" model=\"v1\",\n",
|
||||
" api_key=\"\",\n",
|
||||
|
@ -7,39 +7,28 @@
|
||||
"source": [
|
||||
"# MLflow\n",
|
||||
"\n",
|
||||
">[MLflow](https://mlflow.org/) is a versatile, open-source platform for managing workflows and artifacts across the machine learning lifecycle. It has built-in integrations with many popular ML libraries, but can be used with any library, algorithm, or deployment tool. It is designed to be extensible, so you can write plugins to support new workflows, libraries, and tools.\n",
|
||||
">[MLflow](https://mlflow.org/) is a versatile, open-source platform for managing workflows and artifacts across the machine learning and generative AI lifecycle. It has built-in integrations with many popular AI and ML libraries, but can be used with any library, algorithm, or deployment tool.\n",
|
||||
"\n",
|
||||
"In the context of LangChain integration, MLflow provides the following capabilities:\n",
|
||||
"MLflow's [LangChain integration](https://mlflow.org/docs/latest/llms/langchain/autologging.html) provides the following capabilities:\n",
|
||||
"\n",
|
||||
"- **Experiment Tracking**: MLflow tracks and stores artifacts from your LangChain experiments, including models, code, prompts, metrics, and more.\n",
|
||||
"- **Dependency Management**: MLflow automatically records model dependencies, ensuring consistency between development and production environments.\n",
|
||||
"- **Model Evaluation** MLflow offers native capabilities for evaluating LangChain applications.\n",
|
||||
"- **Tracing**: MLflow allows you to visually trace data flows through your LangChain chain, agent, retriever, or other components.\n",
|
||||
"- **[Tracing](https://mlflow.org/docs/latest/llms/langchain/autologging.html)**: Visualize data flows through your LangChain components with one line of code (`mlflow.langchain.autolog()`)\n",
|
||||
"- **[Experiment Tracking](https://mlflow.org/docs/latest/llms/langchain/index.html#experiment-tracking)**: Log artifacts, code, and metrics from your LangChain runs\n",
|
||||
"- **[Model Management](https://mlflow.org/docs/latest/model-registry.html)**: Version and deploy LangChain applications with dependency tracking\n",
|
||||
"- **[Evaluation](https://mlflow.org/docs/latest/llms/langchain/index.html#mlflow-evaluate)**: Measure the performance of your LangChain applications\n",
|
||||
"\n",
|
||||
"**Note**: MLflow tracing is available in MLflow versions 2.14.0 and later.\n",
|
||||
"\n",
|
||||
"**Note**: The tracing capability is only available in MLflow versions 2.14.0 and later.\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to track your LangChain experiments using MLflow. For more information about this feature and to explore tutorials and examples of using LangChain with MLflow, please refer to the [MLflow documentation for LangChain integration](https://mlflow.org/docs/latest/llms/langchain/index.html)."
|
||||
"This short guide focuses on MLflow's tracing capability for LangChain and LangGraph applications. You'll see how to enable tracing with one line of code and view the execution flow of your applications. For information about MLflow's other capabilities and to explore additional tutorials, please refer to the [MLflow documentation for LangChain](https://mlflow.org/docs/latest/llms/langchain/index.html). If you're new to MLflow, check out the [Getting Started with MLflow](https://mlflow.org/docs/latest/getting-started/index.html) guide."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e0cbd74b-1542-45a4-a72b-b2eedeffd2e0",
|
||||
"id": "da942c89",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Install MLflow Python package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7fb27b941602401d91542211134fc71a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install google-search-results num"
|
||||
"To get started with MLflow tracing for LangChain, install the MLflow Python package. We will also use the `langchain-openai` package.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -49,7 +38,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install mlflow -qU"
|
||||
"%pip install mlflow langchain-openai langgraph -qU"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -57,22 +46,12 @@
|
||||
"id": "8e626bb4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This example utilizes the OpenAI LLM. Feel free to skip the command below and proceed with a different LLM if desired."
|
||||
"Next, set the MLflow tracking URI and OpenAI API key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ca7bd72f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install langchain-openai -qU"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"id": "7e87b21d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -86,10 +65,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84616d96",
|
||||
"id": "7bc78387",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To begin, let's create a dedicated MLflow experiment in order track our model and artifacts. While you can opt to skip this step and use the default experiment, we strongly recommend organizing your runs and artifacts into separate experiments to avoid clutter and maintain a clean, structured workflow."
|
||||
"## MLflow Tracing\n",
|
||||
"\n",
|
||||
"MLflow's tracing capability helps you visualize the execution flow of your LangChain applications. Here's how to enable it."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -101,77 +82,41 @@
|
||||
"source": [
|
||||
"import mlflow\n",
|
||||
"\n",
|
||||
"mlflow.set_experiment(\"LangChain MLflow Integration\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48accc76",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"# Optional: Set an experiment to organize your traces\n",
|
||||
"mlflow.set_experiment(\"LangChain MLflow Integration\")\n",
|
||||
"\n",
|
||||
"Integrate MLflow with your LangChain Application using one of the following methods:\n",
|
||||
"# Enable tracing\n",
|
||||
"mlflow.langchain.autolog()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "105a77e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: Tracing a LangChain Application\n",
|
||||
"\n",
|
||||
"1. **Autologging**: Enable seamless tracking with the `mlflow.langchain.autolog()` command, our recommended first option for leveraging the LangChain MLflow integration.\n",
|
||||
"2. **Manual Logging**: Use MLflow APIs to log LangChain chains and agents, providing fine-grained control over what to track in your experiment.\n",
|
||||
"3. **Custom Callbacks**: Pass MLflow callbacks manually when invoking chains, allowing for semi-automated customization of your workload, such as tracking specific invocations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3f10055",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Scenario 1: MLFlow Autologging"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "71118a27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To get started with autologging, simply call `mlflow.langchain.autolog()`. In this example, we set the `log_models` parameter to `True`, which allows the chain definition and its dependency libraries to be recorded as an MLflow model, providing a comprehensive tracking experience."
|
||||
"Here's a complete example showing MLflow tracing with LangChain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "5b08145f",
|
||||
"execution_count": 4,
|
||||
"id": "99cab27c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import mlflow\n",
|
||||
"\n",
|
||||
"mlflow.langchain.autolog(\n",
|
||||
" # These are optional configurations to control what information should be logged automatically (default: False)\n",
|
||||
" # For the full list of the arguments, refer to https://mlflow.org/docs/latest/llms/langchain/autologging.html#id1\n",
|
||||
" log_models=True,\n",
|
||||
" log_input_examples=True,\n",
|
||||
" log_model_signatures=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0570c18",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Define a Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "1b2627ef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4o\", temperature=0)\n",
|
||||
"# Enable MLflow tracing\n",
|
||||
"mlflow.langchain.autolog()\n",
|
||||
"\n",
|
||||
"# Create a simple chain\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4o\")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
@ -181,421 +126,86 @@
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"chain = prompt | llm | parser"
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"# Run the chain\n",
|
||||
"result = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a5b38bae",
|
||||
"id": "6240cf2d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Invoke the Chain\n",
|
||||
"To view the trace, run `mlflow ui` in your terminal and navigate to the Traces tab in the MLflow UI.\n",
|
||||
"\n",
|
||||
"Note that this step may take a few seconds longer than usual, as MLflow runs several background tasks in the background to log models, traces, and artifacts to the tracking server."
|
||||
"## Example: Tracing a LangGraph Application\n",
|
||||
"\n",
|
||||
"MLflow also supports tracing LangGraph applications:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "a1df4bc8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Ich liebe das Programmieren.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 41,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"test_input = {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"chain.invoke(test_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5173cdd4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Take a moment to explore the MLflow Tracking UI, where you can gain a deeper understanding of what information are being logged.\n",
|
||||
"* **Traces** - Navigate to the \"Traces\" tab in the experiment and click the request ID link of the first row. The displayed trace tree visualizes the call stack of your chain invocation, providing you with a deep insight into how each component is executed within the chain.\n",
|
||||
"* **MLflow Model** - As we set `log_model=True`, MLflow automatically creates an MLflow Run to track your chain definition. Navigate to the newest Run page and open the \"Artifacts\" tab, which lists file artifacts logged as an MLflow Model, including dependencies, input examples, model signatures, and more.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36179573",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Invoke the Logged Chain\n",
|
||||
"\n",
|
||||
"Next, let's load the model back and verify that we can reproduce the same prediction, ensuring consistency and reliability.\n",
|
||||
"\n",
|
||||
"There are two ways to load the model\n",
|
||||
"1. `mlflow.langchain.load_model(MODEL_URI)` - This loads the model as the original LangChain object.\n",
|
||||
"2. `mlflow.pyfunc.load_model(MODEL_URI)` - This loads the model within the `PythonModel` wrapper and encapsulates the prediction logic with the `predict()` API, which contains additional logic such as schema enforcement."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "a8e39d72",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Ich liebe Programmieren.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Replace YOUR_RUN_ID with the Run ID displayed on the MLflow UI\n",
|
||||
"loaded_model = mlflow.langchain.load_model(\"runs:/{YOUR_RUN_ID}/model\")\n",
|
||||
"loaded_model.invoke(test_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "9619356d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['Ich liebe das Programmieren.']"
|
||||
]
|
||||
},
|
||||
"execution_count": 57,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pyfunc_model = mlflow.pyfunc.load_model(\"runs:/{YOUR_RUN_ID}/model\")\n",
|
||||
"pyfunc_model.predict(test_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eb23a78c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure Autologging\n",
|
||||
"\n",
|
||||
"The `mlflow.langchain.autolog()` function offers several parameters that allow for fine-grained control over the artifacts logged to MLflow. For a comprehensive list of available configurations, please refer to the latest [MLflow LangChain Autologging Documentation](https://mlflow.org/docs/latest/llms/langchain/autologging.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1bf6bb02",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Scenario 2: Manually Logging an Agent from Code"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6e447a02",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"#### Prerequisites\n",
|
||||
"\n",
|
||||
"This example uses `SerpAPI`, a search engine API, as a tool for the agent to retrieve Google Search results. LangChain is natively integrated with `SerpAPI`, allowing you to configure the tool for your agent with just one line of code.\n",
|
||||
"\n",
|
||||
"To get started:\n",
|
||||
"\n",
|
||||
"* Install the required Python package via pip: `pip install google-search-results numexpr`.\n",
|
||||
"* Create an account at [SerpAPI's Official Website](https://serpapi.com/) and retrieve an API key.\n",
|
||||
"* Set the API key in the environment variable: `os.environ[\"SERPAPI_API_KEY\"] = \"YOUR_API_KEY\"`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c914e3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Define an Agent\n",
|
||||
"\n",
|
||||
"In this example, we will log the agent definition **as code**, rather than directly feeding the Python object and saving it in a serialized format. This approach offers several benefits:\n",
|
||||
"\n",
|
||||
"1. **No serialization required**: By saving the model as code, we avoid the need for serialization, which can be problematic when working with components that don't natively support it. This approach also eliminates the risk of incompatibility issues when deserializing the model in a different environment.\n",
|
||||
"2. **Better transparency**: By inspecting the saved code file, you can gain valuable insights into what the model does. This is in contrast to serialized formats like pickle, where the model's behavior remains opaque until it's loaded back, potentially exposing security risks such as remote code execution.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9190a609",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, create a separate `.py` file that defines the agent instance.\n",
|
||||
"\n",
|
||||
"In the interest of time, you can run the following cell to generate a Python file `agent.py`, which contains the agent definition code. In actual dev scenario, you would define it in another notebook or hand-crafted python script."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 64,
|
||||
"id": "62b20e17",
|
||||
"execution_count": 5,
|
||||
"id": "f703c505",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"script_content = \"\"\"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"import mlflow\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4o\", temperature=0)\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)\n",
|
||||
"\n",
|
||||
"# IMPORTANT: call set_model() to register the instance to be logged.\n",
|
||||
"mlflow.models.set_model(agent)\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"with open(\"agent.py\", \"w\") as f:\n",
|
||||
" f.write(script_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "82a21f06",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Log the Agent\n",
|
||||
"\n",
|
||||
"Return to the original notebook and run the following cell to log the agent you've defined in the `agent.py` file.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "cd5b8bcc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The agent is successfully logged to MLflow!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question = \"How long would it take to drive to the Moon with F1 racing cars?\"\n",
|
||||
"\n",
|
||||
"with mlflow.start_run(run_name=\"search-math-agent\") as run:\n",
|
||||
" info = mlflow.langchain.log_model(\n",
|
||||
" lc_model=\"agent.py\", # Specify the relative code path to the agent definition\n",
|
||||
" artifact_path=\"model\",\n",
|
||||
" input_example=question,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(\"The agent is successfully logged to MLflow!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4687052",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, open the MLflow UI and navigate to the \"Artifacts\" tab in the Run detail page. You should see that the `agent.py` file has been successfully logged, along with other model artifacts, such as dependencies, input examples, and more."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9011db62",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Invoke the Logged Agent\n",
|
||||
"\n",
|
||||
"Now load the agent back and invoke it. There are two ways to load the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"id": "b634b69d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Downloading artifacts: 100%|██████████| 10/10 [00:00<00:00, 331.57it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['It would take approximately 1194.5 hours to drive to the Moon with an F1 racing car.']"
|
||||
]
|
||||
},
|
||||
"execution_count": 53,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Let's turn on the autologging with default configuration, so we can see the trace for the agent invocation.\n",
|
||||
"# Enable MLflow tracing\n",
|
||||
"mlflow.langchain.autolog()\n",
|
||||
"\n",
|
||||
"# Load the model back\n",
|
||||
"agent = mlflow.pyfunc.load_model(info.model_uri)\n",
|
||||
"\n",
|
||||
"# Invoke\n",
|
||||
"agent.predict(question)"
|
||||
"# Define a tool\n",
|
||||
"@tool\n",
|
||||
"def count_words(text: str) -> str:\n",
|
||||
" \"\"\"Counts the number of words in a text.\"\"\"\n",
|
||||
" word_count = len(text.split())\n",
|
||||
" return f\"This text contains {word_count} words.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create a LangGraph agent\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o\")\n",
|
||||
"tools = [count_words]\n",
|
||||
"graph = create_react_agent(llm, tools)\n",
|
||||
"\n",
|
||||
"# Run the agent\n",
|
||||
"result = graph.invoke(\n",
|
||||
" {\"messages\": [{\"role\": \"user\", \"content\": \"Write me a 71-word story about a cat.\"}]}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "30bf6133",
|
||||
"id": "19c2f3ff",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Navigate to the **\"Traces\"** tab in the experiment and click the request ID link of the first row. The trace visualizes how the agent operate multiple tasks within the single prediction call:\n",
|
||||
"1. Determine what subtasks are required to answer the questions.\n",
|
||||
"2. Search for the speed of an F1 racing car.\n",
|
||||
"3. Search for the distance from Earth to Moon.\n",
|
||||
"4. Compute the division using LLM."
|
||||
"To view the trace, run `mlflow ui` in your terminal and navigate to the Traces tab in the MLflow UI."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cbd10f34",
|
||||
"id": "48accc76",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Scenario 3. Using MLflow Callbacks\n",
|
||||
"## Resources\n",
|
||||
"\n",
|
||||
"**MLflow Callbacks** provide a semi-automated way to track your LangChain application in MLflow. There are two primary callbacks available:\n",
|
||||
"For more information on using MLflow with LangChain, please visit:\n",
|
||||
"\n",
|
||||
"1. **`MlflowLangchainTracer`:** Primarily used for generating traces, available in `mlflow >= 2.14.0`.\n",
|
||||
"2. **`MLflowCallbackHandler`:** Logs metrics and artifacts to the MLflow tracking server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d013d309",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### MlflowLangchainTracer\n",
|
||||
"\n",
|
||||
"When the chain or agent is invoked with the `MlflowLangchainTracer` callback, MLflow will automatically generate a trace for the call stack and log it to the MLflow tracking server. The outcome is exactly same as `mlflow.langchain.autolog()`, but this is particularly useful when you want to only trace specific invocation. Autologging is applied to all invocation in the same notebook/script, on the other hand."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "46d48044",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from mlflow.langchain.langchain_tracer import MlflowLangchainTracer\n",
|
||||
"\n",
|
||||
"mlflow_tracer = MlflowLangchainTracer()\n",
|
||||
"\n",
|
||||
"# This call generates a trace\n",
|
||||
"chain.invoke(test_input, config={\"callbacks\": [mlflow_tracer]})\n",
|
||||
"\n",
|
||||
"# This call does not generate a trace\n",
|
||||
"chain.invoke(test_input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "acb6692c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Where to Pass the Callback\n",
|
||||
" LangChain supports two ways of passing callback instances: (1) Request time callbacks - pass them to the `invoke` method or bind with `with_config()` (2) Constructor callbacks - set them in the chain constructor. When using the `MlflowLangchainTracer` as a callback, you **must use request time callbacks**. Setting it in the constructor instead will only apply the callback to the top-level object, preventing it from being propagated to child components, resulting in incomplete traces. For more information on this behavior, please refer to [Callbacks Documentation](https://python.langchain.com/docs/concepts/callbacks) for more details.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# OK\n",
|
||||
"chain.invoke(test_input, config={\"callbacks\": [mlflow_tracer]})\n",
|
||||
"chain.with_config(callbacks=[mlflow_tracer])\n",
|
||||
"# NG\n",
|
||||
"chain = TheNameOfSomeChain(callbacks=[mlflow_tracer])\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6a60ba7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Supported Methods\n",
|
||||
"\n",
|
||||
"`MlflowLangchainTracer` supports the following invocation methods from the [Runnable Interfaces](https://python.langchain.com/v0.1/docs/expression_language/interface/).\n",
|
||||
"- Standard interfaces: `invoke`, `stream`, `batch`\n",
|
||||
"- Async interfaces: `astream`, `ainvoke`, `abatch`, `astream_log`, `astream_events`\n",
|
||||
"\n",
|
||||
"Other methods are not guaranteed to be fully compatible."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a72e8854",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### MlflowCallbackHandler\n",
|
||||
"\n",
|
||||
"`MlflowCallbackHandler` is a callback handler that resides in the LangChain Community code base.\n",
|
||||
"\n",
|
||||
"This callback can be passed for chain/agent invocation, but it must be explicitly finished by calling the `flush_tracker()` method.\n",
|
||||
"\n",
|
||||
"When a chain is invoked with the callback, it performs the following actions:\n",
|
||||
"\n",
|
||||
"1. Creates a new MLflow Run or retrieves an active one if available within the active MLflow Experiment.\n",
|
||||
"2. Logs metrics such as the number of LLM calls, token usage, and other relevant metrics. If the chain/agent includes LLM call and you have `spacy` library installed, it logs text complexity metrics such as `flesch_kincaid_grade`.\n",
|
||||
"3. Logs internal steps as a JSON file (this is a legacy version of traces).\n",
|
||||
"4. Logs chain input and output as a Pandas Dataframe.\n",
|
||||
"5. Calls the `flush_tracker()` method with a chain/agent instance, logging the chain/agent as an MLflow Model.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b579aae1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.callbacks import MlflowCallbackHandler\n",
|
||||
"\n",
|
||||
"mlflow_callback = MlflowCallbackHandler()\n",
|
||||
"\n",
|
||||
"chain.invoke(\"What is LangChain callback?\", config={\"callbacks\": [mlflow_callback]})\n",
|
||||
"\n",
|
||||
"mlflow_callback.flush_tracker()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84924e35",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## References\n",
|
||||
"To learn more about the feature and visit tutorials and examples of using LangChain with MLflow, please refer to the [MLflow documentation for LangChain integration](https://mlflow.org/docs/latest/llms/langchain/index.html).\n",
|
||||
"\n",
|
||||
"`MLflow` also provides several [tutorials](https://mlflow.org/docs/latest/llms/langchain/index.html#getting-started-with-the-mlflow-langchain-flavor-tutorials-and-guides) and [examples](https://github.com/mlflow/mlflow/tree/master/examples/langchain) for the `LangChain` integration:\n",
|
||||
"- [Quick Start](https://mlflow.org/docs/latest/llms/langchain/notebooks/langchain-quickstart.html)\n",
|
||||
"- [RAG Tutorial](https://mlflow.org/docs/latest/llms/langchain/notebooks/langchain-retriever.html)\n",
|
||||
"- [Agent Example](https://github.com/mlflow/mlflow/blob/master/examples/langchain/simple_agent.py)"
|
||||
"- [MLflow LangChain Integration Documentation](https://mlflow.org/docs/latest/llms/langchain/index.html)\n",
|
||||
"- [MLflow Tracing Documentation](https://mlflow.org/docs/latest/llms/tracing/index.html)\n",
|
||||
"- [Logging LangChain and LangGraph Models](https://mlflow.org/docs/latest/llms/langchain/index.html#logging-models-from-code)\n",
|
||||
"- [Evaluating LangChain and LangGraph Models](https://mlflow.org/docs/latest/llms/langchain/index.html#how-can-i-evaluate-a-langgraph-agent)"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -604,9 +214,9 @@
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "tracing",
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "tracing"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@ -618,7 +228,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
17
docs/docs/integrations/providers/tavily.mdx
Normal file
17
docs/docs/integrations/providers/tavily.mdx
Normal file
@ -0,0 +1,17 @@
|
||||
# Tavily
|
||||
|
||||
[Tavily](https://tavily.com) is a search engine, specifically designed for AI agents.
|
||||
Tavily provides both a search and extract API, AI developers can effortlessly integrate their
|
||||
applications with realtime online information. Tavily’s primary mission is to provide factual
|
||||
and reliable information from trusted sources, enhancing the accuracy and reliability of AI
|
||||
generated content and reasoning.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install langchain-tavily
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
See detail on available tools [tavily_search](/docs/integrations/tools/tavily_search) and [tavily_extract](/docs/integrations/tools/tavily_extract).
|
35
docs/docs/integrations/providers/zotero.ipynb
Normal file
35
docs/docs/integrations/providers/zotero.ipynb
Normal file
@ -0,0 +1,35 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Zotero\n",
|
||||
"\n",
|
||||
"[Zotero](https://www.zotero.org/) is an open source reference management system intended for managing bibliographic data and related research materials. You can connect to your personal library, as well as shared group libraries, via the [API](https://www.zotero.org/support/dev/web_api/v3/start). This retriever implementation utilizes [PyZotero](https://github.com/urschrei/pyzotero) to access libraries. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install pyzotero\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Retriever\n",
|
||||
"\n",
|
||||
"See a [usage example](/docs/integrations/retrievers/zotero).\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from langchain_zotero_retriever.retrievers import ZoteroRetriever\n",
|
||||
"```"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
260
docs/docs/integrations/retrievers/zotero.ipynb
Normal file
260
docs/docs/integrations/retrievers/zotero.ipynb
Normal file
@ -0,0 +1,260 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Zotero\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ZoteroRetriever\n",
|
||||
"\n",
|
||||
"This will help you getting started with the Zotero [retriever](/docs/concepts/retrievers). For detailed documentation of all ZoteroRetriever features and configurations head to the [Github page](https://github.com/TimBMK/langchain-zotero-retriever).\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Retriever | Source | Package |\n",
|
||||
"| :--- | :--- | :---: |\n",
|
||||
"[ZoteroRetriever](https://github.com/TimBMK/langchain-zotero-retriever) | [Zotero API](https://www.zotero.org/support/dev/web_api/v3/start) | langchain-community |\n",
|
||||
"\n",
|
||||
"## Setup\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This retriever lives in the `langchain-zotero-retriever` package. We also require the `pyzotero` dependency:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-zotero-retriever pyzotero"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"`ZoteroRetriever` parameters include:\n",
|
||||
"- `k`: Number of results to include (Default: 50)\n",
|
||||
"- `type`: Type of search to perform. \"Top\" retrieves top level Zotero library items, \"items\" returns any Zotero library items. (Default: top)\n",
|
||||
"- `get_fulltext`: Retrieves full texts if they are attached to the items in the library. If False, or no text is attached, returns an empty string as page_content. (Default: True)\n",
|
||||
"- `library_id`: ID of the Zotero library to search. Required to connect to a library.\n",
|
||||
"- `library_type`: Type of library to search. \"user\" for personal library, \"group\" for shared group libraries. (Default: user)\n",
|
||||
"- `api_key`: Zotero API key if not set as an environment variable. Optional, required to access non-public group libraries or your personal library. Fetched automatically if provided as ZOTERO_API_KEY environment variable.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "70cc8e65-2a02-408a-bbc6-8ef649057d82",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_zotero_retriever.retrievers import ZoteroRetriever\n",
|
||||
"\n",
|
||||
"retriever = ZoteroRetriever(\n",
|
||||
" k=10,\n",
|
||||
" library_id=\"2319375\", # a public group library that does not require an API key for access\n",
|
||||
" library_type=\"group\", # set this to \"user\" if you are using a personal library. Personal libraries require an API key\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c5f2839-4020-424e-9fc9-07777eede442",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"Apart from the `query`, the retriever provides these additional search parameters:\n",
|
||||
"- `itemType`: Type of item to search for (e.g. \"book\" or \"journalArticle\")\n",
|
||||
"- `tag`: for searching over tags attached to library items (see search syntax for combining multiple tags)\n",
|
||||
"- `qmode`: Search mode to use. Changes what the query searches over. \"everything\" includes full-text content. \"titleCreatorYear\" to search over title, authors and year.\n",
|
||||
"- `since`: Return only objects modified after the specified library version. Defaults to return everything.\n",
|
||||
"\n",
|
||||
"For Search Syntax, see Zotero API Documentation: https://www.zotero.org/support/dev/web_api/v3/basics#search_syntax\n",
|
||||
"\n",
|
||||
"For the full API schema (including available itemTypes) see: https://github.com/zotero/zotero-schema"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "51a60dbe-9f2e-4e04-bb62-23968f17164a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"Zuboff\"\n",
|
||||
"\n",
|
||||
"retriever.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "add7bbb0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tags = [\n",
|
||||
" \"Surveillance\",\n",
|
||||
" \"Digital Capitalism\",\n",
|
||||
"] # note that providing tags as a list will result in a logical AND operation\n",
|
||||
"\n",
|
||||
"retriever.invoke(\"\", tag=tags)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within a chain\n",
|
||||
"\n",
|
||||
"Due to the way the Zotero API search operates, directly passing a user question to the ZoteroRetriever will often not return satisfactory results. For use in chains or agentic frameworks, it is recommended to turn the ZoteroRetriever into a [tool](https://python.langchain.com/docs/how_to/custom_tools/#creating-tools-from-functions). This way, the LLM can turn the user query into a more concise search query for the API. Furthermore, this allows the LLM to fill in additional search parameters, such as tag or item type."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0b19a04d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Optional, Union\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import PydanticToolsParser\n",
|
||||
"from langchain_core.tools import StructuredTool, tool\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def retrieve(\n",
|
||||
" query: str,\n",
|
||||
" itemType: Optional[str],\n",
|
||||
" tag: Optional[Union[str, List[str]]],\n",
|
||||
" qmode: str = \"everything\",\n",
|
||||
" since: Optional[int] = None,\n",
|
||||
"):\n",
|
||||
" retrieved_docs = retriever.invoke(\n",
|
||||
" query, itemType=itemType, tag=tag, qmode=qmode, since=since\n",
|
||||
" )\n",
|
||||
" serialized_docs = \"\\n\\n\".join(\n",
|
||||
" (\n",
|
||||
" f\"Metadata: { {key: doc.metadata[key] for key in doc.metadata if key != 'abstractNote'} }\\n\"\n",
|
||||
" f\"Abstract: {doc.metadata['abstractNote']}\\n\"\n",
|
||||
" )\n",
|
||||
" for doc in retrieved_docs\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return serialized_docs, retrieved_docs\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"description = \"\"\"Search and return relevant documents from a Zotero library. The following search parameters can be used:\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" query: str: The search query to be used. Try to keep this specific and short, e.g. a specific topic or author name\n",
|
||||
" itemType: Optional. Type of item to search for (e.g. \"book\" or \"journalArticle\"). Multiple types can be passed as a string seperated by \"||\", e.g. \"book || journalArticle\". Defaults to all types.\n",
|
||||
" tag: Optional. For searching over tags attached to library items. If documents tagged with multiple tags are to be retrieved, pass them as a list. If documents with any of the tags are to be retrieved, pass them as a string separated by \"||\", e.g. \"tag1 || tag2\"\n",
|
||||
" qmode: Search mode to use. Changes what the query searches over. \"everything\" includes full-text content. \"titleCreatorYear\" to search over title, authors and year. Defaults to \"everything\".\n",
|
||||
" since: Return only objects modified after the specified library version. Defaults to return everything.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"retriever_tool = StructuredTool.from_function(\n",
|
||||
" func=retrieve,\n",
|
||||
" name=\"retrieve\",\n",
|
||||
" description=description,\n",
|
||||
" return_direct=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini-2024-07-18\")\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([retrieve])\n",
|
||||
"\n",
|
||||
"q = \"What journal articles do I have on Surveillance in the zotero library?\"\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | PydanticToolsParser(tools=[retrieve])\n",
|
||||
"\n",
|
||||
"chain.invoke(q)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ZoteroRetriever features and configurations head to the [Github page](https://github.com/TimBMK/langchain-zotero-retriever).\n",
|
||||
"\n",
|
||||
"For detailed documentation on the Zotero API, head to the [Zotero API reference](https://www.zotero.org/support/dev/web_api/v3/start)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -130,16 +130,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools=tools,\n",
|
||||
" llm=llm,\n",
|
||||
" verbose=True,\n",
|
||||
" agent=AgentType.OPENAI_FUNCTIONS,\n",
|
||||
")"
|
||||
"agent = create_react_agent(model=llm, tools=tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -199,8 +194,10 @@
|
||||
],
|
||||
"source": [
|
||||
"print(\n",
|
||||
" agent.run(\n",
|
||||
" f\"Create an app in the AINetwork Blockchain database with the name {appName}\"\n",
|
||||
" agent.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": f\"Create an app in the AINetwork Blockchain database with the name {appName}\"\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
|
348
docs/docs/integrations/tools/tavily_extract.ipynb
Normal file
348
docs/docs/integrations/tools/tavily_extract.ipynb
Normal file
File diff suppressed because one or more lines are too long
@ -18,29 +18,29 @@
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/tavily_search) | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [TavilySearchResults](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.tavily_search.tool.TavilySearchResults.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ✅ |  |\n",
|
||||
"| Class | Package | Serializable | [JS support](https://js.langchain.com/docs/integrations/tools/tavily_search) | Package latest |\n",
|
||||
"|:--------------------------------------------------------------|:---------------------------------------------------------------| :---: | :---: | :---: |\n",
|
||||
"| [TavilySearch](https://github.com/tavily-ai/langchain-tavily) | [langchain-tavily](https://pypi.org/project/langchain-tavily/) | ❌ | ❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"| [Returns artifact](/docs/how_to/tool_artifacts/) | Native async | Return data | Pricing |\n",
|
||||
"| :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | Title, URL, content, answer | 1,000 free searches / month | \n",
|
||||
"| [Returns artifact](/docs/how_to/tool_artifacts/) | Native async | Return data | Pricing |\n",
|
||||
"| :---: | :---: |:--------------------------------------------------------:| :---: |\n",
|
||||
"| ❌ | ✅ | title, URL, content snippet, raw_content, answer, images | 1,000 free searches / month |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package. We also need to install the `tavily-python` package."
|
||||
"The integration lives in the `langchain-tavily` package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f85b4089",
|
||||
"id": "5536649f-e73d-42f2-8d57-80b7a790b7eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU \"langchain-community>=0.2.11\" tavily-python"
|
||||
"%pip install -qU langchain-tavily"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -57,7 +57,12 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-19T16:43:32.810957Z",
|
||||
"start_time": "2025-03-19T16:43:15.570Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
@ -67,25 +72,6 @@
|
||||
" os.environ[\"TAVILY_API_KEY\"] = getpass.getpass(\"Tavily API key:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc5ab717-fd27-4c59-b912-bdd099541478",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
|
||||
@ -93,29 +79,34 @@
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Here we show how to instantiate an instance of the Tavily search tools, with "
|
||||
"Here we show how to instantiate an instance of the Tavily search tools, with"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 3,
|
||||
"id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-19T16:44:04.570451Z",
|
||||
"start_time": "2025-03-19T16:44:04.561713Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import TavilySearchResults\n",
|
||||
"from langchain_tavily import TavilySearch\n",
|
||||
"\n",
|
||||
"tool = TavilySearchResults(\n",
|
||||
"tool = TavilySearch(\n",
|
||||
" max_results=5,\n",
|
||||
" search_depth=\"advanced\",\n",
|
||||
" include_answer=True,\n",
|
||||
" include_raw_content=True,\n",
|
||||
" include_images=True,\n",
|
||||
" # include_domains=[...],\n",
|
||||
" # exclude_domains=[...],\n",
|
||||
" # name=\"...\", # overwrite default tool name\n",
|
||||
" # description=\"...\", # overwrite default tool description\n",
|
||||
" # args_schema=..., # overwrite default args_schema: BaseModel\n",
|
||||
" topic=\"general\",\n",
|
||||
" # include_answer=False,\n",
|
||||
" # include_raw_content=False,\n",
|
||||
" # include_images=False,\n",
|
||||
" # include_image_descriptions=False,\n",
|
||||
" # search_depth=\"basic\",\n",
|
||||
" # time_range=\"day\",\n",
|
||||
" # include_domains=None,\n",
|
||||
" # exclude_domains=None\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -128,31 +119,57 @@
|
||||
"\n",
|
||||
"### [Invoke directly with args](/docs/concepts/tools)\n",
|
||||
"\n",
|
||||
"The `TavilySearchResults` tool takes a single \"query\" argument, which should be a natural language query:"
|
||||
"The Tavily search tool accepts the following arguments during invocation:\n",
|
||||
"- `query` (required): A natural language search query\n",
|
||||
"- The following arguments can also be set during invokation : `include_images`, `search_depth` , `time_range`, `include_domains`, `exclude_domains`, `include_images`\n",
|
||||
"- For reliability and performance reasons, certain parameters that affect response size cannot be modified during invocation: `include_answer` and `include_raw_content`. These limitations prevent unexpected context window issues and ensure consistent results.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"NOTE: The optional arguments are available for agents to dynamically set, if you set a argument during instantiation and then invoke the tool with a different value, the tool will use the value you passed during invokation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'url': 'https://www.theguardian.com/sport/live/2023/jul/16/wimbledon-mens-singles-final-2023-carlos-alcaraz-v-novak-djokovic-live?page=with:block-64b3ff568f08df28470056bf',\n",
|
||||
" 'content': 'Carlos Alcaraz recovered from a set down to topple Djokovic 1-6, 7-6(6), 6-1, 3-6, 6-4 and win his first Wimbledon title in a battle for the ages'},\n",
|
||||
" {'url': 'https://www.nytimes.com/athletic/live-blogs/wimbledon-2024-live-updates-alcaraz-djokovic-mens-final-result/kJJdTKhOgkZo/',\n",
|
||||
" 'content': \"It was Djokovic's first straight-sets defeat at Wimbledon since the 2013 final, when he lost to Andy Murray. Below, The Athletic 's writers, Charlie Eccleshare and Matt Futterman, analyze the ...\"},\n",
|
||||
" {'url': 'https://www.foxsports.com.au/tennis/wimbledon/fk-you-stars-explosion-stuns-wimbledon-as-massive-final-locked-in/news-story/41cf7d28a12845cdab6be4150a22a170',\n",
|
||||
" 'content': 'The last time Djokovic and Wimbledon met was at the French Open in June when the Serb claimed victory in a third round tie which ended at 3:07 in the morning. On Friday, however, Djokovic was ...'},\n",
|
||||
" {'url': 'https://www.cnn.com/2024/07/09/sport/novak-djokovic-wimbledon-crowd-quarterfinals-spt-intl/index.html',\n",
|
||||
" 'content': 'Novak Djokovic produced another impressive performance at Wimbledon on Monday to cruise into the quarterfinals, but the 24-time grand slam champion was far from happy after his win.'},\n",
|
||||
" {'url': 'https://www.cnn.com/2024/07/05/sport/andy-murray-wimbledon-farewell-ceremony-spt-intl/index.html',\n",
|
||||
" 'content': \"It was an emotional night for three-time grand slam champion Andy Murray on Thursday, as the 37-year-old's Wimbledon farewell began with doubles defeat.. Murray will retire from the sport this ...\"}]"
|
||||
"{'query': 'What happened at the last wimbledon',\n",
|
||||
" 'follow_up_questions': None,\n",
|
||||
" 'answer': None,\n",
|
||||
" 'images': [],\n",
|
||||
" 'results': [{'title': \"Andy Murray pulls out of the men's singles draw at his last Wimbledon\",\n",
|
||||
" 'url': 'https://www.nbcnews.com/news/sports/andy-murray-wimbledon-tennis-singles-draw-rcna159912',\n",
|
||||
" 'content': \"NBC News Now LONDON — Andy Murray, one of the last decade's most successful male tennis players, has pulled out of the singles tournament at what is almost certain to be his last Wimbledon, his team confirmed Tuesday. Murray, 37, who has won the Wimbledon singles title twice and the U.S Open once, has been battling to be fit to play at the All England Club for weeks. “Unfortunately, despite working incredibly hard on his recovery since his operation just over a week ago, Andy has taken the very difficult decision not to play the singles this year,” his team said in a statement reported by Sky News. The news caps a glittering career on the ATP singles tour, which placed Murray at No. 1 in the world for 41 weeks.\",\n",
|
||||
" 'score': 0.67527276,\n",
|
||||
" 'raw_content': None},\n",
|
||||
" {'title': 'Andy Murray brought to tears by emotional ceremony as Wimbledon ...',\n",
|
||||
" 'url': 'https://edition.cnn.com/2024/07/05/sport/andy-murray-wimbledon-farewell-ceremony-spt-intl/index.html',\n",
|
||||
" 'content': 'Andy Murray brought to tears by emotional ceremony as Wimbledon farewell begins with doubles defeat | CNN Football Tennis Golf Motorsport US Sports Olympics Climbing Esports Hockey CNN10 About CNN Andy Murray became emotional when speaking on court following his Wimbledon defeat on Thursday. It was an emotional night for three-time grand slam champion Andy Murray on Thursday, as the 37-year-old’s Wimbledon farewell began with doubles defeat. Following a doubles defeat alongside his brother Jamie on Thursday, Murray was moved to tears after a short ceremony on Centre Court in which a montage of his career played out on big screens. Murray watches on as a video montage of his career highlights plays on the big screens at Wimbledon. CNN10 About CNN',\n",
|
||||
" 'score': 0.43482184,\n",
|
||||
" 'raw_content': None},\n",
|
||||
" {'title': 'Wimbledon - Latest News, Headlines and Entertainment from the BBC',\n",
|
||||
" 'url': 'https://www.bbc.co.uk/news/topics/c1kr68g26j9t',\n",
|
||||
" 'content': \"Wimbledon - Latest News, Headlines and Entertainment from the BBC BBC Homepage Search BBC Close menu BBC News BBC Verify World News TV Weather for Wimbledon London London Disabled people share experience of accessible homes London Man's pop-up urinal death may never be explained, family fears London London London London London London London Met PC jailed for assaulting man in hospital bed London London London Man jumped to his death in police station - inquest London Central London YMCA closes after failed injunction Kerr denies 'whiteness as insult' against police Man denies being getaway driver in £1m watch raid About the BBC Contact the BBC BBC emails for you The BBC is not responsible for the content of external sites.\",\n",
|
||||
" 'score': 0.3916624,\n",
|
||||
" 'raw_content': None},\n",
|
||||
" {'title': 'Wimbledon - latest news, breaking stories and comment - The Independent',\n",
|
||||
" 'url': 'https://www.independent.co.uk/topic/wimbledon',\n",
|
||||
" 'content': \"Independent Australian Open champion Jannik Sinner's style draws comparisons to Novak Djokovic Patten wins second grand slam doubles title after Australian Open epic Australian Open: Madison Keys can win her first Slam title and stop Aryna Sabalenka's threepeat Novak Djokovic hits back to beat Carlos Alcaraz in Australian Open thriller Australian Open 2025: Carlos Alcaraz and Jannik Sinner have a real rivalry atop men's tennis Australian Open 2025: Carlos Alcaraz and Jannik Sinner have a real rivalry atop men's tennis Australian Open 2025: Cases involving Jannik Sinner and Iga Swiatek make doping a top topic Australian Open 2025: There really isn't much time off in the offseason for tennis players Jd Sports Discount Code\",\n",
|
||||
" 'score': 0.3539422,\n",
|
||||
" 'raw_content': None},\n",
|
||||
" {'title': 'Novak Djokovic loses to Carlos Alcaraz Wimbledon final',\n",
|
||||
" 'url': 'https://www.wimbledon.com/en_GB/news/articles/2023-07-16/alcaraz_ends_the_djokovic_run.html',\n",
|
||||
" 'content': 'Password* By joining myWimbledon you are confirming you are happy to receive news and information from The All England Lawn Tennis Club regarding The Club, The Championships and The Grounds via email By joining myWimbledon you are confirming you are happy to receive news and information from The All England Lawn Tennis Club regarding The Club, The Championships and The Grounds via email Please enter your email address to update your password: We have sent details on how to update your password to the email address you provided. A verification email with a link to verify your account has been sent to you. Please enter the code sent to your email address below and click SUBMIT to complete the verification.',\n",
|
||||
" 'score': 0.23453853,\n",
|
||||
" 'raw_content': None}],\n",
|
||||
" 'response_time': 1.43}"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -181,7 +198,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{\"url\": \"https://www.radiotimes.com/tv/sport/football/euro-2024-location/\", \"content\": \"Euro 2024 host cities. Germany have 10 host cities for Euro 2024, topped by the country's capital Berlin. Berlin. Cologne. Dortmund. Dusseldorf. Frankfurt. Gelsenkirchen. Hamburg.\"}, {\"url\": \"https://www.sportingnews.com/ca/soccer/news/list-euros-host-nations-uefa-european-championship-countries/85f8069d69c9f4\n"
|
||||
"{\"query\": \"euro 2024 host nation\", \"follow_up_questions\": null, \"answer\": null, \"images\": [], \"results\": [{\"title\": \"UEFA Euro 2024 - Wikipedia\", \"url\": \"https://en.wikipedia.org/wiki/UEFA_Euro_2024\", \"content\": \"Tournament details Host country Germany Dates 14 June – 14 July Teams 24 Venue(s) 10 (in 10 host cities) Final positions Champions Spain (4th title) Runners-up England Tournament statisti\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -199,69 +216,16 @@
|
||||
"print(tool_msg.content[:400])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "d8e27be0-1098-4688-8d8c-6e257aae8d56",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': str,\n",
|
||||
" 'follow_up_questions': NoneType,\n",
|
||||
" 'answer': str,\n",
|
||||
" 'images': list,\n",
|
||||
" 'results': list,\n",
|
||||
" 'response_time': float}"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# The artifact is a dict with richer, raw results\n",
|
||||
"{k: type(v) for k, v in tool_msg.artifact.items()}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "237ca620-ac31-449a-826b-b4f2e265b194",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"query\": \"euro 2024 host nation\",\n",
|
||||
" \"follow_up_questions\": \"None\",\n",
|
||||
" \"answer\": \"Germany will be the host nation for Euro 2024, with the tournament scheduled to take place from June 14 to July 14. The matches will be held in 10 different cities across Germany, including Berlin, Co\",\n",
|
||||
" \"images\": \"['https://i.ytimg.com/vi/3hsX0vLatNw/maxresdefault.jpg', 'https://img.planetafobal.com/2021/10/sedes-uefa-euro-2024-alemania-fg.jpg', 'https://editorial.uefa.com/resources/0274-14fe4fafd0d4-413fc8a7b7\",\n",
|
||||
" \"results\": \"[{'title': 'Where is Euro 2024? Country, host cities and venues', 'url': 'https://www.radiotimes.com/tv/sport/football/euro-2024-location/', 'content': \\\"Euro 2024 host cities. Germany have 10 host cit\",\n",
|
||||
" \"response_time\": \"3.97\"\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"# Abbreviate the results for demo purposes\n",
|
||||
"print(json.dumps({k: str(v)[:200] for k, v in tool_msg.artifact.items()}, indent=2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n",
|
||||
"We can use our tools directly with an agent executor by binding the tool to the agent. This gives the agent the ability to dynamically set the available arguments to the Tavily search tool.\n",
|
||||
"\n",
|
||||
"In the below example when we ask the agent to find \"What nation hosted the Euro 2024? Include only wikipedia sources.\" the agent will dynamically set the argments and invoke Tavily search tool : Invoking `tavily_search` with `{'query': 'Euro 2024 host nation', 'include_domains': ['wikipedia.org']`\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
@ -270,7 +234,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"id": "b14d41f2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OPENAI_API_KEY:\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "af3123ad-7a02-40e5-b58e-7d56e23e5830",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -284,61 +259,73 @@
|
||||
"llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1020a506-473b-4e6a-a563-7aaf92c4d183",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will need to install langgraph:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": null,
|
||||
"id": "53ddaebb-dcd0-4e84-89c2-e9a85085f16b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"The last women's singles champion at Wimbledon was Markéta Vondroušová, who won the title in 2023.\", response_metadata={'token_usage': {'completion_tokens': 26, 'prompt_tokens': 802, 'total_tokens': 828}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_4e2b2da518', 'finish_reason': 'stop', 'logprobs': None}, id='run-2bfeec6e-8f04-477e-bf51-9500f18bd514-0', usage_metadata={'input_tokens': 802, 'output_tokens': 26, 'total_tokens': 828})"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What nation hosted the Euro 2024? Include only wikipedia sources.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" tavily_search (call_yxmR4K2uadsQ8LKoyi8JyoLD)\n",
|
||||
" Call ID: call_yxmR4K2uadsQ8LKoyi8JyoLD\n",
|
||||
" Args:\n",
|
||||
" query: Euro 2024 host nation\n",
|
||||
" include_domains: ['wikipedia.org']\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: tavily_search\n",
|
||||
"\n",
|
||||
"{\"query\": \"Euro 2024 host nation\", \"follow_up_questions\": null, \"answer\": null, \"images\": [], \"results\": [{\"title\": \"UEFA Euro 2024 - Wikipedia\", \"url\": \"https://en.wikipedia.org/wiki/UEFA_Euro_2024\", \"content\": \"Tournament details Host country Germany Dates 14 June – 14 July Teams 24 Venue(s) 10 (in 10 host cities) Final positions Champions Spain (4th title) Runners-up England Tournament statistics Matches played 51 Goals scored 117 (2.29 per match) Attendance 2,681,288 (52,574 per match) Top scorer(s) Harry Kane Georges Mikautadze Jamal Musiala Cody Gakpo Ivan Schranz Dani Olmo (3 goals each) Best player(s) Rodri Best young player Lamine Yamal ← 2020 2028 → The 2024 UEFA European Football Championship, commonly referred to as UEFA Euro 2024 (stylised as UEFA EURO 2024) or simply Euro 2024, was the 17th UEFA European Championship, the quadrennial international football championship organised by UEFA for the European men's national teams of their member associations. Germany hosted the tournament, which took place from 14 June to 14 July 2024. The tournament involved 24 teams, with Georgia making their European Championship debut. [4] Host nation Germany were eliminated by Spain in the quarter-finals; Spain went on to win the tournament for a record fourth time after defeating England 2–1 in the final.\", \"score\": 0.9104262, \"raw_content\": null}, {\"title\": \"UEFA Euro 2024 - Simple English Wikipedia, the free encyclopedia\", \"url\": \"https://simple.wikipedia.org/wiki/UEFA_Euro_2024\", \"content\": \"The 2024 UEFA European Football Championship, also known as UEFA Euro 2024 or simply Euro 2024, was the 17th edition of the UEFA European Championship. Germany was hosting the tournament. ... The UEFA Executive Committee voted for the host in a secret ballot, with only a simple majority (more than half of the valid votes) required to determine\", \"score\": 0.81418616, \"raw_content\": null}, {\"title\": \"Championnat d'Europe de football 2024 — Wikipédia\", \"url\": \"https://fr.wikipedia.org/wiki/Championnat_d'Europe_de_football_2024\", \"content\": \"Le Championnat d'Europe de l'UEFA de football 2024 est la 17 e édition du Championnat d'Europe de football, communément abrégé en Euro 2024, compétition organisée par l'UEFA et rassemblant les meilleures équipes nationales masculines européennes. L'Allemagne est désignée pays organisateur de la compétition le 27 septembre 2018. C'est la troisième fois que des matches du Championnat\", \"score\": 0.8055255, \"raw_content\": null}, {\"title\": \"UEFA Euro 2024 bids - Wikipedia\", \"url\": \"https://en.wikipedia.org/wiki/UEFA_Euro_2024_bids\", \"content\": \"The bidding process of UEFA Euro 2024 ended on 27 September 2018 in Nyon, Switzerland, when Germany was announced to be the host. [1] Two bids came before the deadline, 3 March 2017, which were Germany and Turkey as single bids. ... Press agencies revealed on 24 October 2013, that the European football governing body UEFA would have decided on\", \"score\": 0.7882741, \"raw_content\": null}, {\"title\": \"2024 UEFA European Under-19 Championship - Wikipedia\", \"url\": \"https://en.wikipedia.org/wiki/2024_UEFA_European_Under-19_Championship\", \"content\": \"The 2024 UEFA European Under-19 Championship (also known as UEFA Under-19 Euro 2024) was the 21st edition of the UEFA European Under-19 Championship (71st edition if the Under-18 and Junior eras are included), the annual international youth football championship organised by UEFA for the men's under-19 national teams of Europe. Northern Ireland hosted the tournament from 15 to 28 July 2024.\", \"score\": 0.7783298, \"raw_content\": null}], \"response_time\": 1.67}\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The nation that hosted Euro 2024 was Germany. You can find more information on the [Wikipedia page for UEFA Euro 2024](https://en.wikipedia.org/wiki/UEFA_Euro_2024).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import datetime\n",
|
||||
"from langchain_tavily import TavilySearch\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableConfig, chain\n",
|
||||
"\n",
|
||||
"today = datetime.datetime.today().strftime(\"%D\")\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\"system\", f\"You are a helpful assistant. The date today is {today}.\"),\n",
|
||||
" (\"human\", \"{user_input}\"),\n",
|
||||
" (\"placeholder\", \"{messages}\"),\n",
|
||||
" ]\n",
|
||||
"# Initialize Tavily Search Tool\n",
|
||||
"tavily_search_tool = TavilySearch(\n",
|
||||
" max_results=5,\n",
|
||||
" topic=\"general\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# specifying tool_choice will force the model to call this tool.\n",
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"agent = create_react_agent(llm, [tavily_search_tool])\n",
|
||||
"\n",
|
||||
"llm_chain = prompt | llm_with_tools\n",
|
||||
"user_input = \"What nation hosted the Euro 2024? Include only wikipedia sources.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def tool_chain(user_input: str, config: RunnableConfig):\n",
|
||||
" input_ = {\"user_input\": user_input}\n",
|
||||
" ai_msg = llm_chain.invoke(input_, config=config)\n",
|
||||
" tool_msgs = tool.batch(ai_msg.tool_calls, config=config)\n",
|
||||
" return llm_chain.invoke({**input_, \"messages\": [ai_msg, *tool_msgs]}, config=config)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tool_chain.invoke(\"who won the last womens singles wimbledon\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fb115693-e89e-40f2-a460-0d0d39a17963",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here's the [LangSmith trace](https://smith.langchain.com/public/b43232c1-b243-4a7f-afeb-5fba8c84ba56/r) for this run."
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": user_input},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -348,15 +335,15 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all TavilySearchResults features and configurations head to the API reference: https://python.langchain.com/api_reference/community/tools/langchain_community.tools.tavily_search.tool.TavilySearchResults.html"
|
||||
"For detailed documentation of all Tavily Search API features and configurations head to the API reference: https://docs.tavily.com/documentation/api-reference/endpoint/search"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@ -368,7 +355,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -505,7 +505,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `AstraDBVectorStore` features and configurations head to the API reference:https://python.langchain.com/api_reference/astradb/vectorstores/langchain_astradb.vectorstores.AstraDBVectorStore.html"
|
||||
"For detailed documentation of all `AstraDBVectorStore` features and configurations head to the API reference: https://python.langchain.com/api_reference/astradb/vectorstores/langchain_astradb.vectorstores.AstraDBVectorStore.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -36,7 +36,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_milvus"
|
||||
"pip install -qU langchain_milvus"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -113,22 +113,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"id": "979a65bd-742f-4b0d-be1e-c0baae245ec6",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_postgres import PGVector\n",
|
||||
"from langchain_postgres.vectorstores import PGVector\n",
|
||||
"\n",
|
||||
"# See docker command above to launch a postgres instance with pgvector enabled.\n",
|
||||
"connection = \"postgresql+psycopg://langchain:langchain@localhost:6024/langchain\" # Uses psycopg3!\n",
|
||||
"collection_name = \"my_docs\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"vector_store = PGVector(\n",
|
||||
" embeddings=embeddings,\n",
|
||||
" collection_name=collection_name,\n",
|
||||
@ -169,6 +166,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"there are cats in the pond\",\n",
|
||||
|
@ -26,7 +26,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-pinecone pinecone-notebooks"
|
||||
"pip install -qU langchain-pinecone pinecone-notebooks"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -56,7 +56,6 @@
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"from pinecone import Pinecone, ServerlessSpec\n",
|
||||
"\n",
|
||||
|
@ -32,7 +32,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-qdrant"
|
||||
"pip install -qU langchain-qdrant"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -1,8 +1,9 @@
|
||||
"""Loader that uses Playwright to load a page, then uses unstructured to parse html."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, AsyncIterator, Dict, Iterator, List, Optional
|
||||
from typing import TYPE_CHECKING, AsyncIterator, Dict, Iterator, List, Optional, Union
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
@ -113,6 +114,8 @@ class PlaywrightURLLoader(BaseLoader):
|
||||
headless (bool): If True, the browser will run in headless mode.
|
||||
proxy (Optional[Dict[str, str]]): If set, the browser will access URLs
|
||||
through the specified proxy.
|
||||
browser_session (Optional[Union[str, os.PathLike[str]]]): Path to a file with
|
||||
browser session data that can be used to restore the browser session.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
@ -137,6 +140,7 @@ class PlaywrightURLLoader(BaseLoader):
|
||||
remove_selectors: Optional[List[str]] = None,
|
||||
evaluator: Optional[PlaywrightEvaluator] = None,
|
||||
proxy: Optional[Dict[str, str]] = None,
|
||||
browser_session: Optional[Union[str, os.PathLike[str]]] = None,
|
||||
):
|
||||
"""Load a list of URLs using Playwright."""
|
||||
try:
|
||||
@ -151,6 +155,7 @@ class PlaywrightURLLoader(BaseLoader):
|
||||
self.continue_on_failure = continue_on_failure
|
||||
self.headless = headless
|
||||
self.proxy = proxy
|
||||
self.browser_session = browser_session
|
||||
|
||||
if remove_selectors and evaluator:
|
||||
raise ValueError(
|
||||
@ -170,9 +175,20 @@ class PlaywrightURLLoader(BaseLoader):
|
||||
|
||||
with sync_playwright() as p:
|
||||
browser = p.chromium.launch(headless=self.headless, proxy=self.proxy)
|
||||
context = None
|
||||
|
||||
if self.browser_session:
|
||||
if os.path.exists(self.browser_session):
|
||||
context = browser.new_context(storage_state=self.browser_session)
|
||||
else:
|
||||
logger.warning(f"Session file not found: {self.browser_session}")
|
||||
|
||||
if context is None:
|
||||
context = browser.new_context()
|
||||
|
||||
for url in self.urls:
|
||||
try:
|
||||
page = browser.new_page()
|
||||
page = context.new_page()
|
||||
response = page.goto(url)
|
||||
if response is None:
|
||||
raise ValueError(f"page.goto() returned None for url {url}")
|
||||
@ -180,6 +196,7 @@ class PlaywrightURLLoader(BaseLoader):
|
||||
page.wait_for_load_state("load")
|
||||
|
||||
text = self.evaluator.evaluate(page, browser, response)
|
||||
page.close()
|
||||
metadata = {"source": url}
|
||||
yield Document(page_content=text, metadata=metadata)
|
||||
except Exception as e:
|
||||
@ -211,9 +228,22 @@ class PlaywrightURLLoader(BaseLoader):
|
||||
|
||||
async with async_playwright() as p:
|
||||
browser = await p.chromium.launch(headless=self.headless, proxy=self.proxy)
|
||||
context = None
|
||||
|
||||
if self.browser_session:
|
||||
if os.path.exists(self.browser_session):
|
||||
context = await browser.new_context(
|
||||
storage_state=self.browser_session
|
||||
)
|
||||
else:
|
||||
logger.warning(f"Session file not found: {self.browser_session}")
|
||||
|
||||
if context is None:
|
||||
context = await browser.new_context()
|
||||
|
||||
for url in self.urls:
|
||||
try:
|
||||
page = await browser.new_page()
|
||||
page = await context.new_page()
|
||||
response = await page.goto(url)
|
||||
if response is None:
|
||||
raise ValueError(f"page.goto() returned None for url {url}")
|
||||
@ -221,6 +251,7 @@ class PlaywrightURLLoader(BaseLoader):
|
||||
await page.wait_for_load_state("load")
|
||||
|
||||
text = await self.evaluator.evaluate_async(page, browser, response)
|
||||
await page.close()
|
||||
metadata = {"source": url}
|
||||
yield Document(page_content=text, metadata=metadata)
|
||||
except Exception as e:
|
||||
|
@ -12,6 +12,7 @@ from __future__ import annotations
|
||||
import base64
|
||||
import inspect
|
||||
import json
|
||||
import math
|
||||
from collections.abc import Iterable, Sequence
|
||||
from functools import partial
|
||||
from typing import (
|
||||
@ -1424,3 +1425,80 @@ def _convert_to_openai_tool_calls(tool_calls: list[ToolCall]) -> list[dict]:
|
||||
}
|
||||
for tool_call in tool_calls
|
||||
]
|
||||
|
||||
|
||||
def count_tokens_approximately(
|
||||
messages: Iterable[MessageLikeRepresentation],
|
||||
*,
|
||||
chars_per_token: float = 4.0,
|
||||
extra_tokens_per_message: float = 3.0,
|
||||
count_name: bool = True,
|
||||
) -> int:
|
||||
"""Approximate the total number of tokens in messages.
|
||||
|
||||
The token count includes stringified message content, role, and (optionally) name.
|
||||
- For AI messages, the token count also includes stringified tool calls.
|
||||
- For tool messages, the token count also includes the tool call ID.
|
||||
|
||||
Args:
|
||||
messages: List of messages to count tokens for.
|
||||
chars_per_token: Number of characters per token to use for the approximation.
|
||||
Default is 4 (one token corresponds to ~4 chars for common English text).
|
||||
You can also specify float values for more fine-grained control.
|
||||
See more here: https://platform.openai.com/tokenizer
|
||||
extra_tokens_per_message: Number of extra tokens to add per message.
|
||||
Default is 3 (special tokens, including beginning/end of message).
|
||||
You can also specify float values for more fine-grained control.
|
||||
See more here:
|
||||
https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
count_name: Whether to include message names in the count.
|
||||
Enabled by default.
|
||||
|
||||
Returns:
|
||||
Approximate number of tokens in the messages.
|
||||
|
||||
Note:
|
||||
This is a simple approximation that may not match the exact token count
|
||||
used by specific models. For accurate counts, use model-specific tokenizers.
|
||||
|
||||
.. versionadded:: 0.3.46
|
||||
"""
|
||||
token_count = 0.0
|
||||
for message in convert_to_messages(messages):
|
||||
message_chars = 0
|
||||
if isinstance(message.content, str):
|
||||
message_chars += len(message.content)
|
||||
|
||||
# TODO: add support for approximate counting for image blocks
|
||||
else:
|
||||
content = repr(message.content)
|
||||
message_chars += len(content)
|
||||
|
||||
if (
|
||||
isinstance(message, AIMessage)
|
||||
# exclude Anthropic format as tool calls are already included in the content
|
||||
and not isinstance(message.content, list)
|
||||
and message.tool_calls
|
||||
):
|
||||
tool_calls_content = repr(message.tool_calls)
|
||||
message_chars += len(tool_calls_content)
|
||||
|
||||
if isinstance(message, ToolMessage):
|
||||
message_chars += len(message.tool_call_id)
|
||||
|
||||
role = _get_message_openai_role(message)
|
||||
message_chars += len(role)
|
||||
|
||||
if message.name and count_name:
|
||||
message_chars += len(message.name)
|
||||
|
||||
# NOTE: we're rounding up per message to ensure that
|
||||
# individual message token counts add up to the total count
|
||||
# for a list of messages
|
||||
token_count += math.ceil(message_chars / chars_per_token)
|
||||
|
||||
# add extra tokens per message
|
||||
token_count += extra_tokens_per_message
|
||||
|
||||
# round up once more time in case extra_tokens_per_message is a float
|
||||
return math.ceil(token_count)
|
||||
|
@ -17,7 +17,6 @@ from collections.abc import (
|
||||
Sequence,
|
||||
)
|
||||
from concurrent.futures import FIRST_COMPLETED, wait
|
||||
from contextvars import copy_context
|
||||
from functools import wraps
|
||||
from itertools import groupby, tee
|
||||
from operator import itemgetter
|
||||
@ -47,7 +46,6 @@ from langchain_core.load.serializable import (
|
||||
)
|
||||
from langchain_core.runnables.config import (
|
||||
RunnableConfig,
|
||||
_set_config_context,
|
||||
acall_func_with_variable_args,
|
||||
call_func_with_variable_args,
|
||||
ensure_config,
|
||||
@ -58,6 +56,7 @@ from langchain_core.runnables.config import (
|
||||
merge_configs,
|
||||
patch_config,
|
||||
run_in_executor,
|
||||
set_config_context,
|
||||
)
|
||||
from langchain_core.runnables.graph import Graph
|
||||
from langchain_core.runnables.utils import (
|
||||
@ -1920,19 +1919,18 @@ class Runnable(Generic[Input, Output], ABC):
|
||||
)
|
||||
try:
|
||||
child_config = patch_config(config, callbacks=run_manager.get_child())
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
output = cast(
|
||||
Output,
|
||||
context.run(
|
||||
call_func_with_variable_args, # type: ignore[arg-type]
|
||||
func, # type: ignore[arg-type]
|
||||
input, # type: ignore[arg-type]
|
||||
config,
|
||||
run_manager,
|
||||
**kwargs,
|
||||
),
|
||||
)
|
||||
with set_config_context(child_config) as context:
|
||||
output = cast(
|
||||
Output,
|
||||
context.run(
|
||||
call_func_with_variable_args, # type: ignore[arg-type]
|
||||
func, # type: ignore[arg-type]
|
||||
input, # type: ignore[arg-type]
|
||||
config,
|
||||
run_manager,
|
||||
**kwargs,
|
||||
),
|
||||
)
|
||||
except BaseException as e:
|
||||
run_manager.on_chain_error(e)
|
||||
raise
|
||||
@ -1970,15 +1968,14 @@ class Runnable(Generic[Input, Output], ABC):
|
||||
)
|
||||
try:
|
||||
child_config = patch_config(config, callbacks=run_manager.get_child())
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
coro = acall_func_with_variable_args(
|
||||
func, input, config, run_manager, **kwargs
|
||||
)
|
||||
if asyncio_accepts_context():
|
||||
output: Output = await asyncio.create_task(coro, context=context) # type: ignore
|
||||
else:
|
||||
output = await coro
|
||||
with set_config_context(child_config) as context:
|
||||
coro = acall_func_with_variable_args(
|
||||
func, input, config, run_manager, **kwargs
|
||||
)
|
||||
if asyncio_accepts_context():
|
||||
output: Output = await asyncio.create_task(coro, context=context) # type: ignore
|
||||
else:
|
||||
output = await coro
|
||||
except BaseException as e:
|
||||
await run_manager.on_chain_error(e)
|
||||
raise
|
||||
@ -2182,49 +2179,50 @@ class Runnable(Generic[Input, Output], ABC):
|
||||
kwargs["config"] = child_config
|
||||
if accepts_run_manager(transformer):
|
||||
kwargs["run_manager"] = run_manager
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
iterator = context.run(transformer, input_for_transform, **kwargs) # type: ignore[arg-type]
|
||||
if stream_handler := next(
|
||||
(
|
||||
cast(_StreamingCallbackHandler, h)
|
||||
for h in run_manager.handlers
|
||||
# instance check OK here, it's a mixin
|
||||
if isinstance(h, _StreamingCallbackHandler) # type: ignore[misc]
|
||||
),
|
||||
None,
|
||||
):
|
||||
# populates streamed_output in astream_log() output if needed
|
||||
iterator = stream_handler.tap_output_iter(run_manager.run_id, iterator)
|
||||
try:
|
||||
while True:
|
||||
chunk: Output = context.run(next, iterator) # type: ignore
|
||||
yield chunk
|
||||
if final_output_supported:
|
||||
if final_output is None:
|
||||
with set_config_context(child_config) as context:
|
||||
iterator = context.run(transformer, input_for_transform, **kwargs) # type: ignore[arg-type]
|
||||
if stream_handler := next(
|
||||
(
|
||||
cast(_StreamingCallbackHandler, h)
|
||||
for h in run_manager.handlers
|
||||
# instance check OK here, it's a mixin
|
||||
if isinstance(h, _StreamingCallbackHandler) # type: ignore[misc]
|
||||
),
|
||||
None,
|
||||
):
|
||||
# populates streamed_output in astream_log() output if needed
|
||||
iterator = stream_handler.tap_output_iter(
|
||||
run_manager.run_id, iterator
|
||||
)
|
||||
try:
|
||||
while True:
|
||||
chunk: Output = context.run(next, iterator) # type: ignore
|
||||
yield chunk
|
||||
if final_output_supported:
|
||||
if final_output is None:
|
||||
final_output = chunk
|
||||
else:
|
||||
try:
|
||||
final_output = final_output + chunk # type: ignore
|
||||
except TypeError:
|
||||
final_output = chunk
|
||||
final_output_supported = False
|
||||
else:
|
||||
final_output = chunk
|
||||
except (StopIteration, GeneratorExit):
|
||||
pass
|
||||
for ichunk in input_for_tracing:
|
||||
if final_input_supported:
|
||||
if final_input is None:
|
||||
final_input = ichunk
|
||||
else:
|
||||
try:
|
||||
final_output = final_output + chunk # type: ignore
|
||||
final_input = final_input + ichunk # type: ignore
|
||||
except TypeError:
|
||||
final_output = chunk
|
||||
final_output_supported = False
|
||||
final_input = ichunk
|
||||
final_input_supported = False
|
||||
else:
|
||||
final_output = chunk
|
||||
except (StopIteration, GeneratorExit):
|
||||
pass
|
||||
for ichunk in input_for_tracing:
|
||||
if final_input_supported:
|
||||
if final_input is None:
|
||||
final_input = ichunk
|
||||
else:
|
||||
try:
|
||||
final_input = final_input + ichunk # type: ignore
|
||||
except TypeError:
|
||||
final_input = ichunk
|
||||
final_input_supported = False
|
||||
else:
|
||||
final_input = ichunk
|
||||
except BaseException as e:
|
||||
run_manager.on_chain_error(e, inputs=final_input)
|
||||
raise
|
||||
@ -2283,60 +2281,59 @@ class Runnable(Generic[Input, Output], ABC):
|
||||
kwargs["config"] = child_config
|
||||
if accepts_run_manager(transformer):
|
||||
kwargs["run_manager"] = run_manager
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
iterator_ = context.run(transformer, input_for_transform, **kwargs) # type: ignore[arg-type]
|
||||
with set_config_context(child_config) as context:
|
||||
iterator_ = context.run(transformer, input_for_transform, **kwargs) # type: ignore[arg-type]
|
||||
|
||||
if stream_handler := next(
|
||||
(
|
||||
cast(_StreamingCallbackHandler, h)
|
||||
for h in run_manager.handlers
|
||||
# instance check OK here, it's a mixin
|
||||
if isinstance(h, _StreamingCallbackHandler) # type: ignore[misc]
|
||||
),
|
||||
None,
|
||||
):
|
||||
# populates streamed_output in astream_log() output if needed
|
||||
iterator = stream_handler.tap_output_aiter(
|
||||
run_manager.run_id, iterator_
|
||||
)
|
||||
else:
|
||||
iterator = iterator_
|
||||
try:
|
||||
while True:
|
||||
if asyncio_accepts_context():
|
||||
chunk: Output = await asyncio.create_task( # type: ignore[call-arg]
|
||||
py_anext(iterator), # type: ignore[arg-type]
|
||||
context=context,
|
||||
)
|
||||
else:
|
||||
chunk = cast(Output, await py_anext(iterator))
|
||||
yield chunk
|
||||
if final_output_supported:
|
||||
if final_output is None:
|
||||
if stream_handler := next(
|
||||
(
|
||||
cast(_StreamingCallbackHandler, h)
|
||||
for h in run_manager.handlers
|
||||
# instance check OK here, it's a mixin
|
||||
if isinstance(h, _StreamingCallbackHandler) # type: ignore[misc]
|
||||
),
|
||||
None,
|
||||
):
|
||||
# populates streamed_output in astream_log() output if needed
|
||||
iterator = stream_handler.tap_output_aiter(
|
||||
run_manager.run_id, iterator_
|
||||
)
|
||||
else:
|
||||
iterator = iterator_
|
||||
try:
|
||||
while True:
|
||||
if asyncio_accepts_context():
|
||||
chunk: Output = await asyncio.create_task( # type: ignore[call-arg]
|
||||
py_anext(iterator), # type: ignore[arg-type]
|
||||
context=context,
|
||||
)
|
||||
else:
|
||||
chunk = cast(Output, await py_anext(iterator))
|
||||
yield chunk
|
||||
if final_output_supported:
|
||||
if final_output is None:
|
||||
final_output = chunk
|
||||
else:
|
||||
try:
|
||||
final_output = final_output + chunk # type: ignore
|
||||
except TypeError:
|
||||
final_output = chunk
|
||||
final_output_supported = False
|
||||
else:
|
||||
final_output = chunk
|
||||
except StopAsyncIteration:
|
||||
pass
|
||||
async for ichunk in input_for_tracing:
|
||||
if final_input_supported:
|
||||
if final_input is None:
|
||||
final_input = ichunk
|
||||
else:
|
||||
try:
|
||||
final_output = final_output + chunk # type: ignore
|
||||
final_input = final_input + ichunk # type: ignore[operator]
|
||||
except TypeError:
|
||||
final_output = chunk
|
||||
final_output_supported = False
|
||||
final_input = ichunk
|
||||
final_input_supported = False
|
||||
else:
|
||||
final_output = chunk
|
||||
except StopAsyncIteration:
|
||||
pass
|
||||
async for ichunk in input_for_tracing:
|
||||
if final_input_supported:
|
||||
if final_input is None:
|
||||
final_input = ichunk
|
||||
else:
|
||||
try:
|
||||
final_input = final_input + ichunk # type: ignore[operator]
|
||||
except TypeError:
|
||||
final_input = ichunk
|
||||
final_input_supported = False
|
||||
else:
|
||||
final_input = ichunk
|
||||
except BaseException as e:
|
||||
await run_manager.on_chain_error(e, inputs=final_input)
|
||||
raise
|
||||
@ -3021,12 +3018,11 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
config = patch_config(
|
||||
config, callbacks=run_manager.get_child(f"seq:step:{i + 1}")
|
||||
)
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, config)
|
||||
if i == 0:
|
||||
input = context.run(step.invoke, input, config, **kwargs)
|
||||
else:
|
||||
input = context.run(step.invoke, input, config)
|
||||
with set_config_context(config) as context:
|
||||
if i == 0:
|
||||
input = context.run(step.invoke, input, config, **kwargs)
|
||||
else:
|
||||
input = context.run(step.invoke, input, config)
|
||||
# finish the root run
|
||||
except BaseException as e:
|
||||
run_manager.on_chain_error(e)
|
||||
@ -3061,17 +3057,16 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
config = patch_config(
|
||||
config, callbacks=run_manager.get_child(f"seq:step:{i + 1}")
|
||||
)
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, config)
|
||||
if i == 0:
|
||||
part = functools.partial(step.ainvoke, input, config, **kwargs)
|
||||
else:
|
||||
part = functools.partial(step.ainvoke, input, config)
|
||||
if asyncio_accepts_context():
|
||||
input = await asyncio.create_task(part(), context=context) # type: ignore
|
||||
else:
|
||||
input = await asyncio.create_task(part())
|
||||
# finish the root run
|
||||
with set_config_context(config) as context:
|
||||
if i == 0:
|
||||
part = functools.partial(step.ainvoke, input, config, **kwargs)
|
||||
else:
|
||||
part = functools.partial(step.ainvoke, input, config)
|
||||
if asyncio_accepts_context():
|
||||
input = await asyncio.create_task(part(), context=context) # type: ignore
|
||||
else:
|
||||
input = await asyncio.create_task(part())
|
||||
# finish the root run
|
||||
except BaseException as e:
|
||||
await run_manager.on_chain_error(e)
|
||||
raise
|
||||
@ -3713,13 +3708,12 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
# mark each step as a child run
|
||||
callbacks=run_manager.get_child(f"map:key:{key}"),
|
||||
)
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
return context.run(
|
||||
step.invoke,
|
||||
input,
|
||||
child_config,
|
||||
)
|
||||
with set_config_context(child_config) as context:
|
||||
return context.run(
|
||||
step.invoke,
|
||||
input,
|
||||
child_config,
|
||||
)
|
||||
|
||||
# gather results from all steps
|
||||
try:
|
||||
@ -3764,14 +3758,13 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
|
||||
config,
|
||||
callbacks=run_manager.get_child(f"map:key:{key}"),
|
||||
)
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
if asyncio_accepts_context():
|
||||
return await asyncio.create_task( # type: ignore
|
||||
step.ainvoke(input, child_config), context=context
|
||||
)
|
||||
else:
|
||||
return await asyncio.create_task(step.ainvoke(input, child_config))
|
||||
with set_config_context(child_config) as context:
|
||||
if asyncio_accepts_context():
|
||||
return await asyncio.create_task( # type: ignore
|
||||
step.ainvoke(input, child_config), context=context
|
||||
)
|
||||
else:
|
||||
return await asyncio.create_task(step.ainvoke(input, child_config))
|
||||
|
||||
# gather results from all steps
|
||||
try:
|
||||
|
@ -6,7 +6,7 @@ import warnings
|
||||
from collections.abc import Awaitable, Generator, Iterable, Iterator, Sequence
|
||||
from concurrent.futures import Executor, Future, ThreadPoolExecutor
|
||||
from contextlib import contextmanager
|
||||
from contextvars import ContextVar, copy_context
|
||||
from contextvars import Context, ContextVar, Token, copy_context
|
||||
from functools import partial
|
||||
from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union, cast
|
||||
|
||||
@ -115,7 +115,10 @@ var_child_runnable_config: ContextVar[RunnableConfig | None] = ContextVar(
|
||||
)
|
||||
|
||||
|
||||
def _set_config_context(config: RunnableConfig) -> None:
|
||||
# This is imported and used in langgraph, so don't break.
|
||||
def _set_config_context(
|
||||
config: RunnableConfig,
|
||||
) -> tuple[Token[Optional[RunnableConfig]], Optional[dict[str, Any]]]:
|
||||
"""Set the child Runnable config + tracing context.
|
||||
|
||||
Args:
|
||||
@ -123,7 +126,8 @@ def _set_config_context(config: RunnableConfig) -> None:
|
||||
"""
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
|
||||
var_child_runnable_config.set(config)
|
||||
config_token = var_child_runnable_config.set(config)
|
||||
current_context = None
|
||||
if (
|
||||
(callbacks := config.get("callbacks"))
|
||||
and (
|
||||
@ -141,9 +145,39 @@ def _set_config_context(config: RunnableConfig) -> None:
|
||||
)
|
||||
and (run := tracer.run_map.get(str(parent_run_id)))
|
||||
):
|
||||
from langsmith.run_helpers import _set_tracing_context
|
||||
from langsmith.run_helpers import _set_tracing_context, get_tracing_context
|
||||
|
||||
current_context = get_tracing_context()
|
||||
_set_tracing_context({"parent": run})
|
||||
return config_token, current_context
|
||||
|
||||
|
||||
@contextmanager
|
||||
def set_config_context(config: RunnableConfig) -> Generator[Context, None, None]:
|
||||
"""Set the child Runnable config + tracing context.
|
||||
|
||||
Args:
|
||||
config (RunnableConfig): The config to set.
|
||||
"""
|
||||
from langsmith.run_helpers import _set_tracing_context
|
||||
|
||||
ctx = copy_context()
|
||||
config_token, _ = ctx.run(_set_config_context, config)
|
||||
try:
|
||||
yield ctx
|
||||
finally:
|
||||
ctx.run(var_child_runnable_config.reset, config_token)
|
||||
ctx.run(
|
||||
_set_tracing_context,
|
||||
{
|
||||
"parent": None,
|
||||
"project_name": None,
|
||||
"tags": None,
|
||||
"metadata": None,
|
||||
"enabled": None,
|
||||
"client": None,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:
|
||||
|
@ -2,7 +2,6 @@ import asyncio
|
||||
import inspect
|
||||
import typing
|
||||
from collections.abc import AsyncIterator, Iterator, Sequence
|
||||
from contextvars import copy_context
|
||||
from functools import wraps
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@ -18,12 +17,12 @@ from typing_extensions import override
|
||||
from langchain_core.runnables.base import Runnable, RunnableSerializable
|
||||
from langchain_core.runnables.config import (
|
||||
RunnableConfig,
|
||||
_set_config_context,
|
||||
ensure_config,
|
||||
get_async_callback_manager_for_config,
|
||||
get_callback_manager_for_config,
|
||||
get_config_list,
|
||||
patch_config,
|
||||
set_config_context,
|
||||
)
|
||||
from langchain_core.runnables.utils import (
|
||||
ConfigurableFieldSpec,
|
||||
@ -174,14 +173,13 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
if self.exception_key and last_error is not None:
|
||||
input[self.exception_key] = last_error
|
||||
child_config = patch_config(config, callbacks=run_manager.get_child())
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
output = context.run(
|
||||
runnable.invoke,
|
||||
input,
|
||||
config,
|
||||
**kwargs,
|
||||
)
|
||||
with set_config_context(child_config) as context:
|
||||
output = context.run(
|
||||
runnable.invoke,
|
||||
input,
|
||||
config,
|
||||
**kwargs,
|
||||
)
|
||||
except self.exceptions_to_handle as e:
|
||||
if first_error is None:
|
||||
first_error = e
|
||||
@ -228,13 +226,12 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
if self.exception_key and last_error is not None:
|
||||
input[self.exception_key] = last_error
|
||||
child_config = patch_config(config, callbacks=run_manager.get_child())
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
coro = runnable.ainvoke(input, child_config, **kwargs)
|
||||
if asyncio_accepts_context():
|
||||
output = await asyncio.create_task(coro, context=context) # type: ignore
|
||||
else:
|
||||
output = await coro
|
||||
with set_config_context(child_config) as context:
|
||||
coro = context.run(runnable.ainvoke, input, config, **kwargs)
|
||||
if asyncio_accepts_context():
|
||||
output = await asyncio.create_task(coro, context=context) # type: ignore
|
||||
else:
|
||||
output = await coro
|
||||
except self.exceptions_to_handle as e:
|
||||
if first_error is None:
|
||||
first_error = e
|
||||
@ -475,14 +472,13 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
if self.exception_key and last_error is not None:
|
||||
input[self.exception_key] = last_error
|
||||
child_config = patch_config(config, callbacks=run_manager.get_child())
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
stream = context.run(
|
||||
runnable.stream,
|
||||
input,
|
||||
**kwargs,
|
||||
)
|
||||
chunk: Output = context.run(next, stream) # type: ignore
|
||||
with set_config_context(child_config) as context:
|
||||
stream = context.run(
|
||||
runnable.stream,
|
||||
input,
|
||||
**kwargs,
|
||||
)
|
||||
chunk: Output = context.run(next, stream) # type: ignore
|
||||
except self.exceptions_to_handle as e:
|
||||
first_error = e if first_error is None else first_error
|
||||
last_error = e
|
||||
@ -539,20 +535,19 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
if self.exception_key and last_error is not None:
|
||||
input[self.exception_key] = last_error
|
||||
child_config = patch_config(config, callbacks=run_manager.get_child())
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
stream = runnable.astream(
|
||||
input,
|
||||
child_config,
|
||||
**kwargs,
|
||||
)
|
||||
if asyncio_accepts_context():
|
||||
chunk: Output = await asyncio.create_task( # type: ignore[call-arg]
|
||||
py_anext(stream), # type: ignore[arg-type]
|
||||
context=context,
|
||||
with set_config_context(child_config) as context:
|
||||
stream = runnable.astream(
|
||||
input,
|
||||
child_config,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
chunk = cast(Output, await py_anext(stream))
|
||||
if asyncio_accepts_context():
|
||||
chunk: Output = await asyncio.create_task( # type: ignore[call-arg]
|
||||
py_anext(stream), # type: ignore[arg-type]
|
||||
context=context,
|
||||
)
|
||||
else:
|
||||
chunk = cast(Output, await py_anext(stream))
|
||||
except self.exceptions_to_handle as e:
|
||||
first_error = e if first_error is None else first_error
|
||||
last_error = e
|
||||
|
@ -6,7 +6,6 @@ import inspect
|
||||
import json
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from contextvars import copy_context
|
||||
from inspect import signature
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@ -52,7 +51,7 @@ from langchain_core.runnables import (
|
||||
patch_config,
|
||||
run_in_executor,
|
||||
)
|
||||
from langchain_core.runnables.config import _set_config_context
|
||||
from langchain_core.runnables.config import set_config_context
|
||||
from langchain_core.runnables.utils import asyncio_accepts_context
|
||||
from langchain_core.utils.function_calling import (
|
||||
_parse_google_docstring,
|
||||
@ -722,14 +721,15 @@ class ChildTool(BaseTool):
|
||||
error_to_raise: Union[Exception, KeyboardInterrupt, None] = None
|
||||
try:
|
||||
child_config = patch_config(config, callbacks=run_manager.get_child())
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id)
|
||||
if signature(self._run).parameters.get("run_manager"):
|
||||
tool_kwargs = tool_kwargs | {"run_manager": run_manager}
|
||||
if config_param := _get_runnable_config_param(self._run):
|
||||
tool_kwargs = tool_kwargs | {config_param: config}
|
||||
response = context.run(self._run, *tool_args, **tool_kwargs)
|
||||
with set_config_context(child_config) as context:
|
||||
tool_args, tool_kwargs = self._to_args_and_kwargs(
|
||||
tool_input, tool_call_id
|
||||
)
|
||||
if signature(self._run).parameters.get("run_manager"):
|
||||
tool_kwargs = tool_kwargs | {"run_manager": run_manager}
|
||||
if config_param := _get_runnable_config_param(self._run):
|
||||
tool_kwargs = tool_kwargs | {config_param: config}
|
||||
response = context.run(self._run, *tool_args, **tool_kwargs)
|
||||
if self.response_format == "content_and_artifact":
|
||||
if not isinstance(response, tuple) or len(response) != 2:
|
||||
msg = (
|
||||
@ -832,21 +832,20 @@ class ChildTool(BaseTool):
|
||||
try:
|
||||
tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id)
|
||||
child_config = patch_config(config, callbacks=run_manager.get_child())
|
||||
context = copy_context()
|
||||
context.run(_set_config_context, child_config)
|
||||
func_to_check = (
|
||||
self._run if self.__class__._arun is BaseTool._arun else self._arun
|
||||
)
|
||||
if signature(func_to_check).parameters.get("run_manager"):
|
||||
tool_kwargs["run_manager"] = run_manager
|
||||
if config_param := _get_runnable_config_param(func_to_check):
|
||||
tool_kwargs[config_param] = config
|
||||
with set_config_context(child_config) as context:
|
||||
func_to_check = (
|
||||
self._run if self.__class__._arun is BaseTool._arun else self._arun
|
||||
)
|
||||
if signature(func_to_check).parameters.get("run_manager"):
|
||||
tool_kwargs["run_manager"] = run_manager
|
||||
if config_param := _get_runnable_config_param(func_to_check):
|
||||
tool_kwargs[config_param] = config
|
||||
|
||||
coro = context.run(self._arun, *tool_args, **tool_kwargs)
|
||||
if asyncio_accepts_context():
|
||||
response = await asyncio.create_task(coro, context=context) # type: ignore
|
||||
else:
|
||||
response = await coro
|
||||
coro = self._arun(*tool_args, **tool_kwargs)
|
||||
if asyncio_accepts_context():
|
||||
response = await asyncio.create_task(coro, context=context) # type: ignore
|
||||
else:
|
||||
response = await coro
|
||||
if self.response_format == "content_and_artifact":
|
||||
if not isinstance(response, tuple) or len(response) != 2:
|
||||
msg = (
|
||||
|
@ -89,11 +89,11 @@ def tracing_v2_enabled(
|
||||
tags=tags,
|
||||
client=client,
|
||||
)
|
||||
token = tracing_v2_callback_var.set(cb)
|
||||
try:
|
||||
tracing_v2_callback_var.set(cb)
|
||||
yield cb
|
||||
finally:
|
||||
tracing_v2_callback_var.set(None)
|
||||
tracing_v2_callback_var.reset(token)
|
||||
|
||||
|
||||
@contextmanager
|
||||
@ -109,9 +109,11 @@ def collect_runs() -> Generator[RunCollectorCallbackHandler, None, None]:
|
||||
run_id = runs_cb.traced_runs[0].id
|
||||
"""
|
||||
cb = RunCollectorCallbackHandler()
|
||||
run_collector_var.set(cb)
|
||||
yield cb
|
||||
run_collector_var.set(None)
|
||||
token = run_collector_var.set(cb)
|
||||
try:
|
||||
yield cb
|
||||
finally:
|
||||
run_collector_var.reset(token)
|
||||
|
||||
|
||||
def _get_trace_callbacks(
|
||||
|
@ -173,7 +173,13 @@ class LangChainTracer(BaseTracer):
|
||||
return chat_model_run
|
||||
|
||||
def _persist_run(self, run: Run) -> None:
|
||||
self.latest_run = run
|
||||
# We want to free up more memory by avoiding keeping a reference to the
|
||||
# whole nested run tree.
|
||||
self.latest_run = Run.construct(
|
||||
**run.dict(exclude={"child_runs", "inputs", "outputs"}),
|
||||
inputs=run.inputs,
|
||||
outputs=run.outputs,
|
||||
)
|
||||
|
||||
def get_run_url(self) -> str:
|
||||
"""Get the LangSmith root run URL.
|
||||
|
@ -17,7 +17,7 @@ dependencies = [
|
||||
"pydantic<3.0.0,>=2.7.4; python_full_version >= \"3.12.4\"",
|
||||
]
|
||||
name = "langchain-core"
|
||||
version = "0.3.45"
|
||||
version = "0.3.46"
|
||||
description = "Building applications with LLMs through composability"
|
||||
readme = "README.md"
|
||||
|
||||
|
@ -18,6 +18,7 @@ from langchain_core.messages import (
|
||||
from langchain_core.messages.utils import (
|
||||
convert_to_messages,
|
||||
convert_to_openai_messages,
|
||||
count_tokens_approximately,
|
||||
filter_messages,
|
||||
merge_message_runs,
|
||||
trim_messages,
|
||||
@ -976,3 +977,130 @@ def test_convert_to_openai_messages_developer() -> None:
|
||||
]
|
||||
result = convert_to_openai_messages(messages)
|
||||
assert result == [{"role": "developer", "content": "a"}] * 2
|
||||
|
||||
|
||||
def test_count_tokens_approximately_empty_messages() -> None:
|
||||
# Test with empty message list
|
||||
assert count_tokens_approximately([]) == 0
|
||||
|
||||
# Test with empty content
|
||||
messages = [HumanMessage(content="")]
|
||||
# 4 role chars -> 1 + 3 = 4 tokens
|
||||
assert count_tokens_approximately(messages) == 4
|
||||
|
||||
|
||||
def test_count_tokens_approximately_with_names() -> None:
|
||||
messages = [
|
||||
# 5 chars + 4 role chars -> 3 + 3 = 6 tokens
|
||||
# (with name: extra 4 name chars, so total = 4 + 3 = 7 tokens)
|
||||
HumanMessage(content="Hello", name="user"),
|
||||
# 8 chars + 9 role chars -> 5 + 3 = 8 tokens
|
||||
# (with name: extra 9 name chars, so total = 7 + 3 = 10 tokens)
|
||||
AIMessage(content="Hi there", name="assistant"),
|
||||
]
|
||||
# With names included (default)
|
||||
assert count_tokens_approximately(messages) == 17
|
||||
|
||||
# Without names
|
||||
without_names = count_tokens_approximately(messages, count_name=False)
|
||||
assert without_names == 14
|
||||
|
||||
|
||||
def test_count_tokens_approximately_openai_format() -> None:
|
||||
# same as test_count_tokens_approximately_with_names, but in OpenAI format
|
||||
messages = [
|
||||
{"role": "user", "content": "Hello", "name": "user"},
|
||||
{"role": "assistant", "content": "Hi there", "name": "assistant"},
|
||||
]
|
||||
# With names included (default)
|
||||
assert count_tokens_approximately(messages) == 17
|
||||
|
||||
# Without names
|
||||
without_names = count_tokens_approximately(messages, count_name=False)
|
||||
assert without_names == 14
|
||||
|
||||
|
||||
def test_count_tokens_approximately_string_content() -> None:
|
||||
messages = [
|
||||
# 5 chars + 4 role chars -> 3 + 3 = 6 tokens
|
||||
HumanMessage(content="Hello"),
|
||||
# 8 chars + 9 role chars -> 5 + 3 = 8 tokens
|
||||
AIMessage(content="Hi there"),
|
||||
# 12 chars + 4 role chars -> 4 + 3 = 7 tokens
|
||||
HumanMessage(content="How are you?"),
|
||||
]
|
||||
assert count_tokens_approximately(messages) == 21
|
||||
|
||||
|
||||
def test_count_tokens_approximately_list_content() -> None:
|
||||
messages = [
|
||||
# '[{"foo": "bar"}]' -> 16 chars + 4 role chars -> 5 + 3 = 8 tokens
|
||||
HumanMessage(content=[{"foo": "bar"}]),
|
||||
# '[{"test": 123}]' -> 15 chars + 9 role chars -> 6 + 3 = 9 tokens
|
||||
AIMessage(content=[{"test": 123}]),
|
||||
]
|
||||
assert count_tokens_approximately(messages) == 17
|
||||
|
||||
|
||||
def test_count_tokens_approximately_tool_calls() -> None:
|
||||
tool_calls = [{"name": "test_tool", "args": {"foo": "bar"}, "id": "1"}]
|
||||
messages = [
|
||||
# tool calls json -> 79 chars + 9 role chars -> 22 + 3 = 25 tokens
|
||||
AIMessage(content="", tool_calls=tool_calls),
|
||||
# 15 chars + 4 role chars -> 5 + 3 = 8 tokens
|
||||
HumanMessage(content="Regular message"),
|
||||
]
|
||||
assert count_tokens_approximately(messages) == 33
|
||||
# AI message w/ both content and tool calls
|
||||
# 94 chars + 9 role chars -> 26 + 3 = 29 tokens
|
||||
messages = [
|
||||
AIMessage(content="Regular message", tool_calls=tool_calls),
|
||||
]
|
||||
assert count_tokens_approximately(messages) == 29
|
||||
|
||||
|
||||
def test_count_tokens_approximately_custom_token_length() -> None:
|
||||
messages = [
|
||||
# 11 chars + 4 role chars -> (4 tokens of length 4 / 8 tokens of length 2) + 3
|
||||
HumanMessage(content="Hello world"),
|
||||
# 7 chars + 9 role chars -> (4 tokens of length 4 / 8 tokens of length 2) + 3
|
||||
AIMessage(content="Testing"),
|
||||
]
|
||||
assert count_tokens_approximately(messages, chars_per_token=4) == 14
|
||||
assert count_tokens_approximately(messages, chars_per_token=2) == 22
|
||||
|
||||
|
||||
def test_count_tokens_approximately_large_message_content() -> None:
|
||||
# Test with large content to ensure no issues
|
||||
large_text = "x" * 10000
|
||||
messages = [HumanMessage(content=large_text)]
|
||||
# 10,000 chars + 4 role chars -> 2501 + 3 = 2504 tokens
|
||||
assert count_tokens_approximately(messages) == 2504
|
||||
|
||||
|
||||
def test_count_tokens_approximately_large_number_of_messages() -> None:
|
||||
# Test with large content to ensure no issues
|
||||
messages = [HumanMessage(content="x")] * 1_000
|
||||
# 1 chars + 4 role chars -> 2 + 3 = 5 tokens
|
||||
assert count_tokens_approximately(messages) == 5_000
|
||||
|
||||
|
||||
def test_count_tokens_approximately_mixed_content_types() -> None:
|
||||
# Test with a variety of content types in the same message list
|
||||
tool_calls = [{"name": "test_tool", "args": {"foo": "bar"}, "id": "1"}]
|
||||
messages = [
|
||||
# 13 chars + 6 role chars -> 5 + 3 = 8 tokens
|
||||
SystemMessage(content="System prompt"),
|
||||
# '[{"foo": "bar"}]' -> 16 chars + 4 role chars -> 5 + 3 = 8 tokens
|
||||
HumanMessage(content=[{"foo": "bar"}]),
|
||||
# tool calls json -> 79 chars + 9 role chars -> 22 + 3 = 25 tokens
|
||||
AIMessage(content="", tool_calls=tool_calls),
|
||||
# 13 chars + 4 role chars + 9 name chars + 1 tool call ID char ->
|
||||
# 7 + 3 = 10 tokens
|
||||
ToolMessage(content="Tool response", name="test_tool", tool_call_id="1"),
|
||||
]
|
||||
token_count = count_tokens_approximately(messages)
|
||||
assert token_count == 51
|
||||
|
||||
# Ensure that count is consistent if we do one message at a time
|
||||
assert sum(count_tokens_approximately([m]) for m in messages) == token_count
|
||||
|
@ -445,11 +445,12 @@ def test_tree_is_constructed(parent_type: Literal["ls", "lc"]) -> None:
|
||||
metadata={"some_foo": "some_bar"},
|
||||
tags=["afoo"],
|
||||
):
|
||||
if parent_type == "ls":
|
||||
collected: dict[str, RunTree] = {} # noqa
|
||||
collected: dict[str, RunTree] = {} # noqa
|
||||
|
||||
def collect_run(run: RunTree) -> None:
|
||||
collected[str(run.id)] = run
|
||||
def collect_run(run: RunTree) -> None:
|
||||
collected[str(run.id)] = run
|
||||
|
||||
if parent_type == "ls":
|
||||
|
||||
@traceable
|
||||
def parent() -> str:
|
||||
@ -459,7 +460,6 @@ def test_tree_is_constructed(parent_type: Literal["ls", "lc"]) -> None:
|
||||
parent(langsmith_extra={"on_end": collect_run, "run_id": rid}) == "foo"
|
||||
)
|
||||
assert collected
|
||||
run = collected.get(str(rid))
|
||||
|
||||
else:
|
||||
|
||||
@ -468,8 +468,10 @@ def test_tree_is_constructed(parent_type: Literal["ls", "lc"]) -> None:
|
||||
return child.invoke("foo")
|
||||
|
||||
tracer = LangChainTracer()
|
||||
tracer._persist_run = collect_run # type: ignore
|
||||
|
||||
assert parent.invoke(..., {"run_id": rid, "callbacks": [tracer]}) == "foo" # type: ignore
|
||||
run = tracer.latest_run
|
||||
run = collected.get(str(rid))
|
||||
|
||||
assert run is not None
|
||||
assert run.name == "parent"
|
||||
|
@ -1,4 +1,5 @@
|
||||
version = 1
|
||||
revision = 1
|
||||
requires-python = ">=3.9, <4.0"
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.12.4'",
|
||||
@ -935,7 +936,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.45"
|
||||
version = "0.3.46"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@ -1062,7 +1063,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-text-splitters"
|
||||
version = "0.3.6"
|
||||
version = "0.3.7"
|
||||
source = { directory = "../text-splitters" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
|
@ -524,3 +524,11 @@ packages:
|
||||
path: .
|
||||
repo: dell/powerscale-rag-connector
|
||||
provider_page: dell
|
||||
- name: langchain-tavily
|
||||
path: .
|
||||
repo: tavily-ai/langchain-tavily
|
||||
- name: langchain-zotero-retriever
|
||||
path: .
|
||||
repo: TimBMK/langchain-zotero-retriever
|
||||
name_title: Zotero
|
||||
provider_page: zotero
|
||||
|
@ -125,13 +125,17 @@ def _parse_arguments_from_tool_call(
|
||||
if "function" not in raw_tool_call:
|
||||
return None
|
||||
arguments = raw_tool_call["function"]["arguments"]
|
||||
parsed_arguments = {}
|
||||
parsed_arguments: dict = {}
|
||||
if isinstance(arguments, dict):
|
||||
for key, value in arguments.items():
|
||||
if isinstance(value, str):
|
||||
parsed_arguments[key] = _parse_json_string(
|
||||
parsed_value = _parse_json_string(
|
||||
value, skip=True, raw_tool_call=raw_tool_call
|
||||
)
|
||||
if isinstance(parsed_value, (dict, list)):
|
||||
parsed_arguments[key] = parsed_value
|
||||
else:
|
||||
parsed_arguments[key] = value
|
||||
else:
|
||||
parsed_arguments[key] = value
|
||||
else:
|
||||
|
@ -1,10 +1,10 @@
|
||||
"""Test chat model integration."""
|
||||
|
||||
import json
|
||||
from typing import Dict, Type
|
||||
|
||||
from langchain_tests.unit_tests import ChatModelUnitTests
|
||||
|
||||
from langchain_ollama.chat_models import ChatOllama
|
||||
from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call
|
||||
|
||||
|
||||
class TestChatOllama(ChatModelUnitTests):
|
||||
@ -15,3 +15,11 @@ class TestChatOllama(ChatModelUnitTests):
|
||||
@property
|
||||
def chat_model_params(self) -> Dict:
|
||||
return {"model": "llama3-groq-tool-use"}
|
||||
|
||||
|
||||
def test__parse_arguments_from_tool_call() -> None:
|
||||
raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501
|
||||
raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"]
|
||||
response = _parse_arguments_from_tool_call(raw_tool_calls[0])
|
||||
assert response is not None
|
||||
assert isinstance(response["arg_1"], str)
|
||||
|
@ -1807,7 +1807,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
images. Otherwise, set the ``supports_image_inputs`` property to False.
|
||||
"""
|
||||
if not self.supports_image_inputs:
|
||||
return
|
||||
pytest.skip("Model does not support image message.")
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
message = HumanMessage(
|
||||
@ -1863,7 +1863,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
False.
|
||||
"""
|
||||
if not self.supports_image_tool_message:
|
||||
return
|
||||
pytest.skip("Model does not support image tool message.")
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
messages = [
|
||||
|
Loading…
Reference in New Issue
Block a user