mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-07 01:30:24 +00:00
Compare commits
2 Commits
cc/many_to
...
wfh/parent
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3168d4d70 | ||
|
|
6d59f2e069 |
35
.github/scripts/check_prerelease_dependencies.py
vendored
35
.github/scripts/check_prerelease_dependencies.py
vendored
@@ -1,35 +0,0 @@
|
||||
import sys
|
||||
import tomllib
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
|
||||
# read toml file
|
||||
with open(toml_file, "rb") as file:
|
||||
toml_data = tomllib.load(file)
|
||||
|
||||
# see if we're releasing an rc
|
||||
version = toml_data["tool"]["poetry"]["version"]
|
||||
releasing_rc = "rc" in version
|
||||
|
||||
# if not, iterate through dependencies and make sure none allow prereleases
|
||||
if not releasing_rc:
|
||||
dependencies = toml_data["tool"]["poetry"]["dependencies"]
|
||||
for lib in dependencies:
|
||||
dep_version = dependencies[lib]
|
||||
dep_version_string = (
|
||||
dep_version["version"] if isinstance(dep_version, dict) else dep_version
|
||||
)
|
||||
|
||||
if "rc" in dep_version_string:
|
||||
raise ValueError(
|
||||
f"Dependency {lib} has a prerelease version. Please remove this."
|
||||
)
|
||||
|
||||
if isinstance(dep_version, dict) and dep_version.get(
|
||||
"allow-prereleases", False
|
||||
):
|
||||
raise ValueError(
|
||||
f"Dependency {lib} has allow-prereleases set to true. Please remove this."
|
||||
)
|
||||
19
.github/scripts/get_min_versions.py
vendored
19
.github/scripts/get_min_versions.py
vendored
@@ -1,11 +1,6 @@
|
||||
import sys
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
else:
|
||||
# for python 3.10 and below, which doesnt have stdlib tomllib
|
||||
import tomli as tomllib
|
||||
|
||||
import tomllib
|
||||
from packaging.version import parse as parse_version
|
||||
import re
|
||||
|
||||
@@ -17,8 +12,6 @@ MIN_VERSION_LIBS = [
|
||||
"SQLAlchemy",
|
||||
]
|
||||
|
||||
SKIP_IF_PULL_REQUEST = ["langchain-core"]
|
||||
|
||||
|
||||
def get_min_version(version: str) -> str:
|
||||
# base regex for x.x.x with cases for rc/post/etc
|
||||
@@ -45,7 +38,7 @@ def get_min_version(version: str) -> str:
|
||||
raise ValueError(f"Unrecognized version format: {version}")
|
||||
|
||||
|
||||
def get_min_version_from_toml(toml_path: str, versions_for: str):
|
||||
def get_min_version_from_toml(toml_path: str):
|
||||
# Parse the TOML file
|
||||
with open(toml_path, "rb") as file:
|
||||
toml_data = tomllib.load(file)
|
||||
@@ -58,10 +51,6 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
|
||||
|
||||
# Iterate over the libs in MIN_VERSION_LIBS
|
||||
for lib in MIN_VERSION_LIBS:
|
||||
if versions_for == "pull_request" and lib in SKIP_IF_PULL_REQUEST:
|
||||
# some libs only get checked on release because of simultaneous
|
||||
# changes
|
||||
continue
|
||||
# Check if the lib is present in the dependencies
|
||||
if lib in dependencies:
|
||||
# Get the version string
|
||||
@@ -82,10 +71,8 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
|
||||
if __name__ == "__main__":
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
versions_for = sys.argv[2]
|
||||
assert versions_for in ["release", "pull_request"]
|
||||
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file, versions_for)
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
|
||||
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
|
||||
|
||||
9
.github/workflows/_release.yml
vendored
9
.github/workflows/_release.yml
vendored
@@ -189,7 +189,7 @@ jobs:
|
||||
--extra-index-url https://test.pypi.org/simple/ \
|
||||
"$PKG_NAME==$VERSION" || \
|
||||
( \
|
||||
sleep 15 && \
|
||||
sleep 5 && \
|
||||
poetry run pip install \
|
||||
--extra-index-url https://test.pypi.org/simple/ \
|
||||
"$PKG_NAME==$VERSION" \
|
||||
@@ -221,17 +221,12 @@ jobs:
|
||||
run: make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Check for prerelease versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry run python $GITHUB_WORKSPACE/.github/scripts/check_prerelease_dependencies.py pyproject.toml
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release)"
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
|
||||
19
.github/workflows/_test.yml
vendored
19
.github/workflows/_test.yml
vendored
@@ -65,22 +65,3 @@ jobs:
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging tomli
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
# Temporarily disabled until we can get the minimum versions working
|
||||
# - name: Run unit tests with minimum dependency versions
|
||||
# if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
# env:
|
||||
# MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
# run: |
|
||||
# poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
|
||||
# make tests
|
||||
# working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
@@ -64,7 +64,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -355,7 +355,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U --quiet langchain langchain-chroma langchain-community openai langchain-experimental\n",
|
||||
"%pip install -U --quiet langchain langchain_community openai chromadb langchain-experimental\n",
|
||||
"%pip install --quiet \"unstructured[all-docs]\" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken"
|
||||
]
|
||||
},
|
||||
@@ -344,8 +344,8 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import VertexAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub langchain-chroma langchain-anthropic"
|
||||
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub chromadb langchain-anthropic"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -645,7 +645,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"# Initialize all_texts with leaf_texts\n",
|
||||
"all_texts = leaf_texts.copy()\n",
|
||||
|
||||
@@ -58,5 +58,4 @@ Notebook | Description
|
||||
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
|
||||
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
|
||||
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
|
||||
[rag-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag-locally-on-intel-cpu.ipynb) | Perform Retrieval-Augmented-Generation (RAG) on locally downloaded open-source models using langchain and open source tools and execute it on Intel Xeon CPU. We showed an example of how to apply RAG on Llama 2 model and enable it to answer the queries related to Intel Q1 2024 earnings release.
|
||||
[visual_RAG_vdms.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/visual_RAG_vdms.ipynb) | Performs Visual Retrieval-Augmented-Generation (RAG) using videos and scene descriptions generated by open source models.
|
||||
[rag-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag-locally-on-intel-cpu.ipynb) | Perform Retrieval-Augmented-Generation (RAG) on locally downloaded open-source models using langchain and open source tools and execute it on Intel Xeon CPU. We showed an example of how to apply RAG on Llama 2 model and enable it to answer the queries related to Intel Q1 2024 earnings release.
|
||||
@@ -39,7 +39,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml langchainhub"
|
||||
"! pip install langchain unstructured[all-docs] pydantic lxml langchainhub"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -320,7 +320,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml"
|
||||
"! pip install langchain unstructured[all-docs] pydantic lxml"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -375,7 +375,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml"
|
||||
"! pip install langchain unstructured[all-docs] pydantic lxml"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -378,8 +378,8 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import GPT4AllEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain openai langchain_chroma langchain-experimental # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -132,7 +132,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"baseline = Chroma.from_texts(\n",
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-airbyte langchain_chroma"
|
||||
"%pip install -qU langchain-airbyte"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -123,7 +123,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tiktoken\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"enc = tiktoken.get_encoding(\"cl100k_base\")\n",
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub langchain-chroma hnswlib --upgrade --quiet"
|
||||
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -547,7 +547,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --quiet pypdf langchain-chroma tiktoken openai \n",
|
||||
"%pip install --quiet pypdf chromadb tiktoken openai \n",
|
||||
"%pip uninstall -y langchain-fireworks\n",
|
||||
"%pip install --editable /mnt/disks/data/langchain/libs/partners/fireworks"
|
||||
]
|
||||
@@ -138,7 +138,7 @@
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"# Add to vectorDB\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_fireworks.embeddings import FireworksEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(\n",
|
||||
|
||||
@@ -170,7 +170,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"with open(\"../../state_of_the_union.txt\") as f:\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
|
||||
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,8 +30,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph tavily-python"
|
||||
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph tavily-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -77,8 +77,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
@@ -180,8 +180,8 @@
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
|
||||
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -86,8 +86,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
@@ -188,7 +188,7 @@
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -187,7 +187,7 @@
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_experimental.open_clip import OpenCLIPEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain"
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -167,7 +167,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -194,7 +194,7 @@
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter"
|
||||
]
|
||||
|
||||
@@ -80,7 +80,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
|
||||
@@ -36,10 +36,10 @@
|
||||
"from bs4 import BeautifulSoup as Soup\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryByteStore, LocalFileStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders.recursive_url_loader import (\n",
|
||||
" RecursiveUrlLoader,\n",
|
||||
")\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"# For our example, we'll load docs from the web\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
@@ -370,14 +370,13 @@
|
||||
],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from langchain_huggingface.llms import HuggingFacePipeline\n",
|
||||
"from optimum.intel.ipex import IPEXModelForCausalLM\n",
|
||||
"from transformers import AutoTokenizer, pipeline\n",
|
||||
"from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n",
|
||||
"from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
|
||||
"\n",
|
||||
"model_id = \"Intel/neural-chat-7b-v3-3\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
|
||||
"model = IPEXModelForCausalLM.from_pretrained(\n",
|
||||
" model_id, torch_dtype=torch.bfloat16, export=True\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(\n",
|
||||
" model_id, device_map=\"auto\", torch_dtype=torch.bfloat16\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=100)\n",
|
||||
@@ -582,7 +581,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.14"
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -740,7 +740,7 @@ Even this relatively large model will most likely fail to generate more complica
|
||||
|
||||
|
||||
```bash
|
||||
poetry run pip install pyyaml langchain_chroma
|
||||
poetry run pip install pyyaml chromadb
|
||||
import yaml
|
||||
```
|
||||
|
||||
@@ -994,7 +994,7 @@ from langchain.prompts import FewShotPromptTemplate, PromptTemplate
|
||||
from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX
|
||||
from langchain_huggingface import HuggingFaceEmbeddings
|
||||
from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector
|
||||
from langchain_chroma import Chroma
|
||||
from langchain_community.vectorstores import Chroma
|
||||
|
||||
example_prompt = PromptTemplate(
|
||||
input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"],
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install --quiet pypdf tiktoken openai langchain-chroma langchain-together"
|
||||
"! pip install --quiet pypdf chromadb tiktoken openai langchain-together"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -45,8 +45,8 @@
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"# Add to vectorDB\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
"from langchain_together.embeddings import TogetherEmbeddings\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -236,7 +236,7 @@ This is where information like log-probs and token usage may be stored.
|
||||
These represent a decision from an language model to call a tool. They are included as part of an `AIMessage` output.
|
||||
They can be accessed from there with the `.tool_calls` property.
|
||||
|
||||
This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with the following arguments:
|
||||
This property returns a list of dictionaries. Each dictionary has the following keys:
|
||||
|
||||
- `name`: The name of the tool that should be called.
|
||||
- `args`: The arguments to that tool.
|
||||
@@ -246,19 +246,14 @@ This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with t
|
||||
|
||||
This represents a system message, which tells the model how to behave. Not every model provider supports this.
|
||||
|
||||
#### ToolMessage
|
||||
|
||||
This represents the result of a tool call. In addition to `role` and `content`, this message has:
|
||||
|
||||
- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result.
|
||||
- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
|
||||
|
||||
#### (Legacy) FunctionMessage
|
||||
|
||||
This is a legacy message type, corresponding to OpenAI's legacy function-calling API. ToolMessage should be used instead to correspond to the updated tool-calling API.
|
||||
#### FunctionMessage
|
||||
|
||||
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
|
||||
|
||||
#### ToolMessage
|
||||
|
||||
This represents the result of a tool call. This is distinct from a FunctionMessage in order to match OpenAI's `function` and `tool` message types. In addition to `role` and `content`, this message has a `tool_call_id` parameter which conveys the id of the call to the tool that was called to produce this result.
|
||||
|
||||
|
||||
### Prompt templates
|
||||
<span data-heading-keywords="prompt,prompttemplate,chatprompttemplate"></span>
|
||||
@@ -501,87 +496,35 @@ For specifics on how to use retrievers, see the [relevant how-to guides here](/d
|
||||
### Tools
|
||||
<span data-heading-keywords="tool,tools"></span>
|
||||
|
||||
Tools are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models.
|
||||
Tools are needed whenever you want a model to control parts of your code or call out to external APIs.
|
||||
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
|
||||
|
||||
A tool consists of:
|
||||
A tool consists of the following components:
|
||||
|
||||
1. The name of the tool.
|
||||
2. A description of what the tool does.
|
||||
3. A JSON schema defining the inputs to the tool.
|
||||
4. A function (and, optionally, an async variant of the function).
|
||||
1. The name of the tool
|
||||
2. A description of what the tool does
|
||||
3. JSON schema of what the inputs to the tool are
|
||||
4. The function to call
|
||||
5. Whether the result of a tool should be returned directly to the user (only relevant for agents)
|
||||
|
||||
When a tool is bound to a model, the name, description and JSON schema are provided as context to the model.
|
||||
Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs.
|
||||
Typical usage may look like the following:
|
||||
The name, description and JSON schema are provided as context
|
||||
to the LLM, allowing the LLM to determine how to use the tool
|
||||
appropriately.
|
||||
|
||||
```python
|
||||
tools = [...] # Define a list of tools
|
||||
llm_with_tools = llm.bind_tools(tools)
|
||||
ai_msg = llm_with_tools.invoke("do xyz...") # AIMessage(tool_calls=[ToolCall(...), ...], ...)
|
||||
```
|
||||
Given a list of available tools and a prompt, an LLM can request
|
||||
that one or more tools be invoked with appropriate arguments.
|
||||
|
||||
The `AIMessage` returned from the model MAY have `tool_calls` associated with it.
|
||||
Read [this guide](/docs/concepts/#aimessage) for more information on what the response type may look like.
|
||||
Generally, when designing tools to be used by a chat model or LLM, it is important to keep in mind the following:
|
||||
|
||||
Once the chosen tools are invoked, the results can be passed back to the model so that it can complete whatever task
|
||||
it's performing.
|
||||
There are generally two different ways to invoke the tool and pass back the response:
|
||||
- Chat models that have been fine-tuned for tool calling will be better at tool calling than non-fine-tuned models.
|
||||
- Non fine-tuned models may not be able to use tools at all, especially if the tools are complex or require multiple tool calls.
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
|
||||
- Simpler tools are generally easier for models to use than more complex tools.
|
||||
|
||||
#### Invoke with just the arguments
|
||||
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
|
||||
|
||||
When you invoke a tool with just the arguments, you will get back the raw tool output (usually a string).
|
||||
This generally looks like:
|
||||
|
||||
```python
|
||||
# You will want to previously check that the LLM returned tool calls
|
||||
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
|
||||
tool_output = tool.invoke(tool_call["args"])
|
||||
tool_message = ToolMessage(content=tool_output, tool_call_id=tool_call["id"], name=tool_call["name"])
|
||||
```
|
||||
|
||||
Note that the `content` field will generally be passed back to the model.
|
||||
If you do not want the raw tool response to be passed to the model, but you still want to keep it around,
|
||||
you can transform the tool output but also pass it as an artifact (read more about [`ToolMessage.artifact` here](/docs/concepts/#toolmessage))
|
||||
|
||||
```python
|
||||
... # Same code as above
|
||||
response_for_llm = transform(response)
|
||||
tool_message = ToolMessage(content=response_for_llm, tool_call_id=tool_call["id"], name=tool_call["name"], artifact=tool_output)
|
||||
```
|
||||
|
||||
#### Invoke with `ToolCall`
|
||||
|
||||
The other way to invoke a tool is to call it with the full `ToolCall` that was generated by the model.
|
||||
When you do this, the tool will return a ToolMessage.
|
||||
The benefits of this are that you don't have to write the logic yourself to transform the tool output into a ToolMessage.
|
||||
This generally looks like:
|
||||
|
||||
```python
|
||||
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
|
||||
tool_message = tool.invoke(tool_call)
|
||||
# -> ToolMessage(content="tool result foobar...", tool_call_id=..., name="tool_name")
|
||||
```
|
||||
|
||||
If you are invoking the tool this way and want to include an [artifact](/docs/concepts/#toolmessage) for the ToolMessage, you will need to have the tool return two things.
|
||||
Read more about [defining tools that return artifacts here](/docs/how_to/tool_artifacts/).
|
||||
|
||||
#### Best practices
|
||||
|
||||
When designing tools to be used by a model, it is important to keep in mind that:
|
||||
|
||||
- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models.
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering.
|
||||
- Simple, narrowly scoped tools are easier for models to use than complex tools.
|
||||
|
||||
#### Related
|
||||
|
||||
For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools).
|
||||
|
||||
To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/).
|
||||
To use an existing pre-built tool, see [here](/docs/integrations/tools/) for a list of pre-built tools.
|
||||
|
||||
### Toolkits
|
||||
<span data-heading-keywords="toolkit,toolkits"></span>
|
||||
|
||||
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
|
||||
|
||||
|
||||
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
" def parse(self, text: str) -> List[str]:\n",
|
||||
" lines = text.strip().split(\"\\n\")\n",
|
||||
" return list(filter(None, lines)) # Remove empty lines\n",
|
||||
" return lines\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"output_parser = LineListOutputParser()\n",
|
||||
|
||||
@@ -1,342 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to dispatch custom callback events\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
"- [Astream Events API](/docs/concepts/#astream_events) the `astream_events` method will surface custom callback events.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In some situations, you may want to dipsatch a custom callback event from within a [Runnable](/docs/concepts/#runnable-interface) so it can be surfaced\n",
|
||||
"in a custom callback handler or via the [Astream Events API](/docs/concepts/#astream_events).\n",
|
||||
"\n",
|
||||
"For example, if you have a long running tool with multiple steps, you can dispatch custom events between the steps and use these custom events to monitor progress.\n",
|
||||
"You could also surface these custom events to an end user of your application to show them how the current task is progressing.\n",
|
||||
"\n",
|
||||
"To dispatch a custom event you need to decide on two attributes for the event: the `name` and the `data`.\n",
|
||||
"\n",
|
||||
"| Attribute | Type | Description |\n",
|
||||
"|-----------|------|----------------------------------------------------------------------------------------------------------|\n",
|
||||
"| name | str | A user defined name for the event. |\n",
|
||||
"| data | Any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"* Dispatching custom callback events requires `langchain-core>=0.2.15`.\n",
|
||||
"* Custom callback events can only be dispatched from within an existing `Runnable`.\n",
|
||||
"* If using `astream_events`, you must use `version='v2'` to see custom events.\n",
|
||||
"* Sending or rendering custom callbacks events in LangSmith is not yet supported.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::caution COMPATIBILITY\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for astream_events(), to child runnables if you are running async code in python<=3.10. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"\n",
|
||||
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"\n",
|
||||
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in other Python versions.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain-core"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Astream Events API\n",
|
||||
"\n",
|
||||
"The most useful way to consume custom events is via the [Astream Events API](/docs/concepts/#astream_events).\n",
|
||||
"\n",
|
||||
"We can use the `async` `adispatch_custom_event` API to emit custom events in an async setting. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
"To see custom events via the astream events API, you need to use the newer `v2` API of `astream_events`.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'foo', 'tags': [], 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'metadata': {}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" adispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"async def foo(x: str) -> str:\n",
|
||||
" await adispatch_custom_event(\"event1\", {\"x\": x})\n",
|
||||
" await adispatch_custom_event(\"event2\", 5)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async for event in foo.astream_events(\"hello world\", version=\"v2\"):\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In python <= 3.10, you must propagate the config manually!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'bar', 'tags': [], 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'metadata': {}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" adispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"async def bar(x: str, config: RunnableConfig) -> str:\n",
|
||||
" \"\"\"An example that shows how to manually propagate config.\n",
|
||||
"\n",
|
||||
" You must do this if you're running python<=3.10.\n",
|
||||
" \"\"\"\n",
|
||||
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
|
||||
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async for event in bar.astream_events(\"hello world\", version=\"v2\"):\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Async Callback Handler\n",
|
||||
"\n",
|
||||
"You can also consume the dispatched event via an async callback handler."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n",
|
||||
"Received event event2 with data: 5, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List, Optional\n",
|
||||
"from uuid import UUID\n",
|
||||
"\n",
|
||||
"from langchain_core.callbacks import AsyncCallbackHandler\n",
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" adispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class AsyncCustomCallbackHandler(AsyncCallbackHandler):\n",
|
||||
" async def on_custom_event(\n",
|
||||
" self,\n",
|
||||
" name: str,\n",
|
||||
" data: Any,\n",
|
||||
" *,\n",
|
||||
" run_id: UUID,\n",
|
||||
" tags: Optional[List[str]] = None,\n",
|
||||
" metadata: Optional[Dict[str, Any]] = None,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> None:\n",
|
||||
" print(\n",
|
||||
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"async def bar(x: str, config: RunnableConfig) -> str:\n",
|
||||
" \"\"\"An example that shows how to manually propagate config.\n",
|
||||
"\n",
|
||||
" You must do this if you're running python<=3.10.\n",
|
||||
" \"\"\"\n",
|
||||
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
|
||||
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async_handler = AsyncCustomCallbackHandler()\n",
|
||||
"await foo.ainvoke(1, {\"callbacks\": [async_handler], \"tags\": [\"foo\", \"bar\"]})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sync Callback Handler\n",
|
||||
"\n",
|
||||
"Let's see how to emit custom events in a sync environment using `dispatch_custom_event`.\n",
|
||||
"\n",
|
||||
"You **must** call `dispatch_custom_event` from within an existing `Runnable`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n",
|
||||
"Received event event2 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List, Optional\n",
|
||||
"from uuid import UUID\n",
|
||||
"\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" dispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CustomHandler(BaseCallbackHandler):\n",
|
||||
" def on_custom_event(\n",
|
||||
" self,\n",
|
||||
" name: str,\n",
|
||||
" data: Any,\n",
|
||||
" *,\n",
|
||||
" run_id: UUID,\n",
|
||||
" tags: Optional[List[str]] = None,\n",
|
||||
" metadata: Optional[Dict[str, Any]] = None,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> None:\n",
|
||||
" print(\n",
|
||||
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"def foo(x: int, config: RunnableConfig) -> int:\n",
|
||||
" dispatch_custom_event(\"event1\", {\"x\": x})\n",
|
||||
" dispatch_custom_event(\"event2\", {\"x\": x})\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"handler = CustomHandler()\n",
|
||||
"foo.invoke(1, {\"callbacks\": [handler], \"tags\": [\"foo\", \"bar\"]})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've seen how to emit custom events, you can check out the more in depth guide for [astream events](/docs/how_to/streaming/#using-stream-events) which is the easiest way to leverage custom events."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
|
||||
"\n",
|
||||
"This guide requires `langchain-openai >= 0.1.9`."
|
||||
"This guide requires `langchain-openai >= 0.1.8`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
"#### OpenAI\n",
|
||||
"\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
|
||||
@@ -220,57 +220,6 @@
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14002ec8-7ee5-4f91-9315-dd21c3808776",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### `LLMListwiseRerank`\n",
|
||||
"\n",
|
||||
"[LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
|
||||
"\n",
|
||||
"Note that `LLMListwiseRerank` requires a model with the [with_structured_output](/docs/integrations/chat/) method implemented."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "4ab9ee9f-917e-4d6f-9344-eb7f01533228",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers.document_compressors import LLMListwiseRerank\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"\n",
|
||||
"_filter = LLMListwiseRerank.from_llm(llm, top_n=1)\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=_filter, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7194da42",
|
||||
@@ -346,7 +295,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"id": "617a1756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "9a8bceb3-95bd-4496-bb9e-57655136e070",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to convert Runnables as Tools\n",
|
||||
"# How to use Runnables as Tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -541,7 +541,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "5436020b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create tools\n",
|
||||
"# How to create custom tools\n",
|
||||
"\n",
|
||||
"When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
|
||||
"\n",
|
||||
|
||||
@@ -44,7 +44,6 @@ This highlights functionality that is core to using LangChain.
|
||||
- [How to: inspect runnables](/docs/how_to/inspect)
|
||||
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
|
||||
- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains)
|
||||
- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets)
|
||||
|
||||
## Components
|
||||
|
||||
@@ -186,22 +185,19 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
|
||||
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. Refer [here](/docs/integrations/tools/) for a list of pre-buit tools.
|
||||
|
||||
- [How to: create tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use chat models to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool outputs to chat models](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human-in-the-loop for tools](/docs/how_to/tools_human)
|
||||
- [How to: handle tool errors](/docs/how_to/tools_error)
|
||||
- [How to: force models to call a tool](/docs/how_to/tool_choice)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel)
|
||||
- [How to: access the `RunnableConfig` from a tool](/docs/how_to/tool_configure)
|
||||
- [How to: stream events from a tool](/docs/how_to/tool_stream_events)
|
||||
- [How to: return artifacts from a tool](/docs/how_to/tool_artifacts/)
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
|
||||
- [How to: add ad-hoc tool calling capability to models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass in runtime secrets](/docs/how_to/runnable_runtime_secrets)
|
||||
- [How to: handle large numbers of tools](/docs/how_to/many_tools)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_choice)
|
||||
- [How to: access the `RunnableConfig` object within a custom tool](/docs/how_to/tool_configure)
|
||||
- [How to: stream events from child runs within a custom tool](/docs/how_to/tool_stream_events)
|
||||
- [How to: return extra artifacts from a tool](/docs/how_to/tool_artifacts/)
|
||||
|
||||
### Multimodal
|
||||
|
||||
@@ -229,7 +225,6 @@ For in depth how-to guides for agents, please check out [LangGraph](https://lang
|
||||
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: use callbacks in async environments](/docs/how_to/callbacks_async)
|
||||
- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
|
||||
|
||||
### Custom
|
||||
|
||||
@@ -242,7 +237,6 @@ All of LangChain components can easily be extended to support your own versions.
|
||||
- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: define a custom tool](/docs/how_to/custom_tools)
|
||||
- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
|
||||
|
||||
### Serialization
|
||||
- [How to: save and load LangChain objects](/docs/how_to/serialization)
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
" * document addition by id (`add_documents` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with `ids` argument)\n",
|
||||
"\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MongoDBAtlasVectorSearch`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
" \n",
|
||||
"## Caution\n",
|
||||
"\n",
|
||||
|
||||
@@ -1,268 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f05a0e81-372d-477e-a96b-95a9f217a6a4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to handle large numbers of tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Tools](/docs/concepts#tools)\n",
|
||||
"- [Tool calling](/docs/concepts/#functiontool-calling)\n",
|
||||
"- [Embeddings](/docs/concepts/#embedding-models) and [vector stores](/docs/concepts#vector-stores)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Tool calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. Many LLM providers, including [Anthropic](https://www.anthropic.com/), [Cohere](https://cohere.com/), [Google](https://cloud.google.com/vertex-ai), [Mistral](https://mistral.ai/), [OpenAI](https://openai.com/), and others, support variants of a tool calling feature. These features typically allow requests to the LLM to include available tools and their schemas, and for responses to include calls to these tools.\n",
|
||||
"\n",
|
||||
"Importantly, the subset of available tools to call is generally at the discretion of the model (although many providers also enable the user to [specify or constrain the choice of tool](/docs/how_to/tool_choice)). As the number of available tools grows, you may want to limit the scope of the LLM's selection, to decrease token consumption and to help manage sources of error in LLM reasoning.\n",
|
||||
"\n",
|
||||
"Here we will demonstrate how to dynamically adjust the tools available to a model. Bottom line up front: like [RAG](/docs/tutorials/rag) and similar methods, we prefix the model invocation by retrieving over available tools. Although we demonstrate one implementation that searches over tool descriptions, the details of the tool selection can be customized as needed.\n",
|
||||
"\n",
|
||||
"**Note**: this guide uses [OpenAI](/docs/integrations/platforms/openai/) for embeddings, but any LangChain embeddings should suffice."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b7be98fe-c8b7-4979-96db-f030469bce35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install -U langchain-core langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2ea8145f-c468-47c8-9f6a-693877d804fb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e3595ba3-1c13-4a35-ab2a-14e433dbbe31",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84803177-44d6-4cb8-a7f3-16fd02500bc6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's consider a toy example in which we have one tool for each company in the S&P 500 index. Each tool will fetch information, and is parameterized by a single integer representing the year.\n",
|
||||
"\n",
|
||||
"We first construct a registry that associates a unique identifier with a schema for each tool. We will represent the tools using JSON schema, which can be bound directly to [chat models supporting tool calling](/docs/integrations/chat/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "77cc23d3-9913-4159-8a14-ff7df3523440",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def create_tool(company: str) -> dict:\n",
|
||||
" \"\"\"Create schema for a placeholder tool.\"\"\"\n",
|
||||
" formatted_company = re.sub(r\"[^\\w\\s]\", \"\", company).replace(\" \", \"_\")\n",
|
||||
" return {\n",
|
||||
" \"title\": f\"{formatted_company}_information\",\n",
|
||||
" \"description\": f\"Information about {company}.\",\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\"year\": {\"title\": \"year\", \"type\": \"integer\"}},\n",
|
||||
" \"required\": [\"year\"],\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"s_and_p_500_companies = [ # Abbreviated list for demonstration purposes\n",
|
||||
" \"3M\",\n",
|
||||
" \"A.O. Smith\",\n",
|
||||
" \"Abbott\",\n",
|
||||
" \"Accenture\",\n",
|
||||
" \"Advanced Micro Devices\",\n",
|
||||
" \"Yum! Brands\",\n",
|
||||
" \"Zebra Technologies\",\n",
|
||||
" \"Zimmer Biomet\",\n",
|
||||
" \"Zoetis\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"tool_registry = {\n",
|
||||
" str(uuid.uuid4()): create_tool(company) for company in s_and_p_500_companies\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72c206b0-25e4-4034-999d-0a2442ff2c7b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we create a [vector store](/docs/how_to/vectorstores) that will store embeddings of the tool descriptions. This will allow a user query to be associated to a tool via semantic search. This is a simple solution, and in general the full scope of [retrieval solutions](/docs/concepts#retrieval) are available for this step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "390d388f-3b5c-4fe0-bd77-f22c0391a2e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore, VectorStore\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"tool_documents = [\n",
|
||||
" Document(page_content=tool[\"description\"], id=id)\n",
|
||||
" for id, tool in tool_registry.items()\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"vector_store = InMemoryVectorStore(embedding=OpenAIEmbeddings())\n",
|
||||
"upsert_response = vector_store.upsert(tool_documents)\n",
|
||||
"assert not upsert_response[\"failed\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0bd360a3-0a02-4661-87c6-b300e602573b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, we construct our [Runnable](/docs/concepts/#langchain-expression-language-lcel) as follows:\n",
|
||||
"\n",
|
||||
"- We create a `retrieve_tools` runnable that will return tools that are relevant to a user query;\n",
|
||||
"- We create a `get_chat_model` runnable that will receive the query and tools, bind the tools to a chat model, and run it on the query.\n",
|
||||
"\n",
|
||||
"Note that here we leverage the fact that if a [RunnableLambda](/docs/how_to/functions/) returns an instance of Runnable, that instance is [called on its input](/docs/how_to/dynamic_chain). So `get_chat_model` only needs to construct the chat model for the chat model to be called on the input query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "01bc976b-eede-4ab2-b924-6eb283732324",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"from typing import List, Mapping\n",
|
||||
"\n",
|
||||
"from langchain_core.runnables import (\n",
|
||||
" Runnable,\n",
|
||||
" RunnableLambda,\n",
|
||||
" RunnablePassthrough,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def _retrieve_tools(\n",
|
||||
" input: dict,\n",
|
||||
" vector_store: VectorStore,\n",
|
||||
" tool_registry: Mapping[str, dict],\n",
|
||||
") -> List[dict]:\n",
|
||||
" query = input[\"query\"]\n",
|
||||
" tool_documents = await vector_store.asimilarity_search(query)\n",
|
||||
" return [tool_registry[document.id] for document in tool_documents]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def _get_chat_model(input: dict) -> Runnable:\n",
|
||||
" model = llm.bind_tools(input[\"tools\"])\n",
|
||||
" return itemgetter(\"query\") | model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"retrieve_tools = RunnableLambda(_retrieve_tools).bind(\n",
|
||||
" vector_store=vector_store,\n",
|
||||
" tool_registry=tool_registry,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"get_chat_model = RunnableLambda(_get_chat_model)\n",
|
||||
"\n",
|
||||
"chain = RunnablePassthrough.assign(tools=retrieve_tools) | get_chat_model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1bf3a0e4-25a5-4cc1-8899-4e93fbd1043d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Invoking the chain, we see that the retriever step is able to recover an appropriate tool, and the LLM is able to translate the user's query into an invocation of the tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "cea1d8dc-05b9-46b6-b355-5749da1dd006",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Advanced_Micro_Devices_information',\n",
|
||||
" 'args': {'year': 2022},\n",
|
||||
" 'id': 'call_jgQ4Hgt5Svw0YJ9dFpGYinzO',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = await chain.ainvoke(\n",
|
||||
" {\"query\": \"Can you give me some information about AMD in 2022?\"}\n",
|
||||
")\n",
|
||||
"response.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4a8e42bc-042a-4872-97c6-5e4e7195f2ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [LangSmith trace](https://smith.langchain.com/public/0a298c50-1b88-4914-8007-db4c98a4d3e4/r) for the above run."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6fcd2994-0092-4fa3-9bb1-c9c84babadc5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass runtime secrets to runnables\n",
|
||||
"\n",
|
||||
":::info Requires `langchain-core >= 0.2.22`\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"We can pass in secrets to our runnables at runtime using the `RunnableConfig`. Specifically we can pass in secrets with a `__` prefix to the `configurable` field. This will ensure that these secrets aren't traced as part of the invocation:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "92e42e91-c277-49de-aa7a-dfb5c993c817",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"7"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def foo(x: int, config: RunnableConfig) -> int:\n",
|
||||
" \"\"\"Sum x and a secret int\"\"\"\n",
|
||||
" return x + config[\"configurable\"][\"__top_secret_int\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"foo.invoke({\"x\": 5}, {\"configurable\": {\"__top_secret_int\": 2, \"traced_key\": \"bar\"}})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae3a4fb9-2ce7-46b2-b654-35dff0ae7197",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace for this run, we can see that \"traced_key\" was recorded (as part of Metadata) while our secret int was not: https://smith.langchain.com/public/aa7e3289-49ca-422d-a408-f6b927210170/r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -452,7 +452,7 @@
|
||||
"source": [
|
||||
"#### Generator Functions\n",
|
||||
"\n",
|
||||
"Let's fix the streaming using a generator function that can operate on the **input stream**.\n",
|
||||
"Le'ts fix the streaming using a generator function that can operate on the **input stream**.\n",
|
||||
"\n",
|
||||
":::{.callout-tip}\n",
|
||||
"A generator function (a function that uses `yield`) allows writing code that operates on **input streams**\n",
|
||||
|
||||
@@ -5,12 +5,11 @@
|
||||
"id": "503e36ae-ca62-4f8a-880c-4fe78ff5df93",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to return artifacts from a tool\n",
|
||||
"# How to return extra artifacts from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [ToolMessage](/docs/concepts/#toolmessage)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
|
||||
"\n",
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use chat models to call tools\n",
|
||||
"# How to use a model to call tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -82,24 +82,30 @@
|
||||
"## Passing tools to chat models\n",
|
||||
"\n",
|
||||
"Chat models that support tool calling features implement a `.bind_tools` method, which \n",
|
||||
"receives a list of functions, Pydantic models, or LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
|
||||
"receives a list of LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
|
||||
"and binds them to the chat model in its expected format. Subsequent invocations of the \n",
|
||||
"chat model will include tool schemas in its calls to the LLM.\n",
|
||||
"\n",
|
||||
"For example, below we implement simple tools for arithmetic:"
|
||||
"For example, we can define the schema for custom tools using the `@tool` decorator \n",
|
||||
"on Python functions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
@@ -112,14 +118,12 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"LangChain also implements a `@tool` decorator that allows for further control of the tool schema, such as tool names and argument descriptions. See the how-to guide [here](/docs/how_to/custom_tools/#creating-tools-from-functions) for detail.\n",
|
||||
"\n",
|
||||
"We can also define the schema using [Pydantic](https://docs.pydantic.dev):"
|
||||
"Or below, we define the schema using [Pydantic](https://docs.pydantic.dev):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -339,7 +343,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -4,13 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to disable parallel tool calling\n",
|
||||
"\n",
|
||||
":::info OpenAI-specific\n",
|
||||
"\n",
|
||||
"This API is currently only supported by OpenAI.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"### Disabling parallel tool calling (OpenAI only)\n",
|
||||
"\n",
|
||||
"OpenAI tool calling performs tool calling in parallel by default. That means that if we ask a question like \"What is the weather in Tokyo, New York, and Chicago?\" and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the ``parallel_tool_call`` parameter."
|
||||
]
|
||||
@@ -105,24 +99,10 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to force models to call a tool\n",
|
||||
"# How to force tool calling behavior\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -125,24 +125,10 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to access the RunnableConfig from a tool\n",
|
||||
"# How to access the RunnableConfig object within a custom tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -110,7 +110,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -124,9 +124,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass tool outputs to chat models\n",
|
||||
"# How to pass tool outputs to the model\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass run time values to tools\n",
|
||||
"# How to pass run time values to a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -15,25 +15,26 @@
|
||||
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::info Using with LangGraph\n",
|
||||
":::{.callout-info} Supported models\n",
|
||||
"\n",
|
||||
"This how-to guide uses models with native tool calling capability.\n",
|
||||
"You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Using with LangGraph\n",
|
||||
"\n",
|
||||
"If you're using LangGraph, please refer to [this how-to guide](https://langchain-ai.github.io/langgraph/how-tos/pass-run-time-values-to-tools/)\n",
|
||||
"which shows how to create an agent that keeps track of a given user's favorite pets.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::caution Added in `langchain-core==0.2.21`\n",
|
||||
"\n",
|
||||
"Must have `langchain-core>=0.2.21` to use this functionality.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n",
|
||||
"\n",
|
||||
"Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n",
|
||||
"\n",
|
||||
"Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic.\n",
|
||||
"\n",
|
||||
"This how-to guide shows you how to prevent the model from generating certain tool arguments and injecting them in directly at runtime."
|
||||
"This how-to guide shows a simple design pattern that creates the tool dynamically at run time and binds to them appropriate values."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -56,12 +57,23 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"# %pip install -qU langchain langchain_openai\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -78,9 +90,10 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Hiding arguments from the model\n",
|
||||
"# Passing request time information\n",
|
||||
"\n",
|
||||
"We can use the InjectedToolArg annotation to mark certain parameters of our Tool, like `user_id` as being injected at runtime, meaning they shouldn't be generated by the model"
|
||||
"The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n",
|
||||
"this information may be the user ID as resolved from the request itself."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -91,88 +104,46 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import InjectedToolArg, tool\n",
|
||||
"from typing_extensions import Annotated\n",
|
||||
"\n",
|
||||
"user_to_pets = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def update_favorite_pets(\n",
|
||||
" pets: List[str], user_id: Annotated[str, InjectedToolArg]\n",
|
||||
") -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" pets: List of favorite pets to set.\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def delete_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def list_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we look at the input schemas for these tools, we'll see that user_id is still listed:"
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_core.tools import BaseTool, tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_petsSchema',\n",
|
||||
" 'description': 'Add the list of favorite pets.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
"user_to_pets = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_tools_for_user(user_id: str) -> List[BaseTool]:\n",
|
||||
" \"\"\"Generate a set of tools that have a user id associated with them.\"\"\"\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def update_favorite_pets(pets: List[str]) -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def delete_favorite_pets() -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def list_favorite_pets() -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])\n",
|
||||
"\n",
|
||||
" return [update_favorite_pets, delete_favorite_pets, list_favorite_pets]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But if we look at the tool call schema, which is what is passed to the model for tool-calling, user_id has been removed:"
|
||||
"Verify that the tools work correctly"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -181,60 +152,46 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Add the list of favorite pets.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'eugene': ['cat', 'dog']}\n",
|
||||
"['cat', 'dog']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So when we invoke our tool, we need to pass in user_id:"
|
||||
"update_pets, delete_pets, list_pets = generate_tools_for_user(\"eugene\")\n",
|
||||
"update_pets.invoke({\"pets\": [\"cat\", \"dog\"]})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_pets.invoke({}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'123': ['lizard', 'dog']}\n",
|
||||
"['lizard', 'dog']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"user_id = \"123\"\n",
|
||||
"update_favorite_pets.invoke({\"pets\": [\"lizard\", \"dog\"], \"user_id\": user_id})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_favorite_pets.invoke({\"user_id\": user_id}))"
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_run_time_request(user_id: str, query: str):\n",
|
||||
" \"\"\"Handle run time request.\"\"\"\n",
|
||||
" tools = generate_tools_for_user(user_id)\n",
|
||||
" llm_with_tools = llm.bind_tools(tools)\n",
|
||||
" prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", \"You are a helpful assistant.\")],\n",
|
||||
" )\n",
|
||||
" chain = prompt | llm_with_tools\n",
|
||||
" return llm_with_tools.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But when the model calls the tool, no user_id argument will be generated:"
|
||||
"This code will allow the LLM to invoke the tools, but the LLM is **unaware** of the fact that a **user ID** even exists!"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -247,8 +204,7 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots']},\n",
|
||||
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
" 'id': 'call_jJvjPXsNbFO5MMgW0q84iqCN'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -257,349 +213,30 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" update_favorite_pets,\n",
|
||||
" delete_favorite_pets,\n",
|
||||
" list_favorite_pets,\n",
|
||||
"]\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||||
"ai_msg = llm_with_tools.invoke(\"my favorite animals are cats and parrots\")\n",
|
||||
"ai_msg.tool_calls"
|
||||
"ai_message = handle_run_time_request(\n",
|
||||
" \"eugene\", \"my favorite animals are cats and parrots.\"\n",
|
||||
")\n",
|
||||
"ai_message.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Injecting arguments at runtime"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to actually execute our tools using the model-generated tool call, we'll need to inject the user_id ourselves:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots'], 'user_id': '123'},\n",
|
||||
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from copy import deepcopy\n",
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
"from langchain_core.runnables import chain\n",
|
||||
"Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def inject_user_id(ai_msg):\n",
|
||||
" tool_calls = []\n",
|
||||
" for tool_call in ai_msg.tool_calls:\n",
|
||||
" tool_call_copy = deepcopy(tool_call)\n",
|
||||
" tool_call_copy[\"args\"][\"user_id\"] = user_id\n",
|
||||
" tool_calls.append(tool_call_copy)\n",
|
||||
" return tool_calls\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"inject_user_id.invoke(ai_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now we can chain together our model, injection code, and the actual tools to create a tool-executing chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ToolMessage(content='null', name='update_favorite_pets', tool_call_id='call_HUyF6AihqANzEYxQnTUKxkXj')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool_map = {tool.name: tool for tool in tools}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def tool_router(tool_call):\n",
|
||||
" return tool_map[tool_call[\"name\"]]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | inject_user_id | tool_router.map()\n",
|
||||
"chain.invoke(\"my favorite animals are cats and parrots\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the user_to_pets dict, we can see that it's been updated to include cats and parrots:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'123': ['cats', 'parrots']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"user_to_pets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Other ways of annotating args\n",
|
||||
"\n",
|
||||
"Here are a few other ways of annotating our tool args:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'UpdateFavoritePetsSchema',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class UpdateFavoritePetsSchema(BaseModel):\n",
|
||||
" \"\"\"Update list of favorite pets\"\"\"\n",
|
||||
"\n",
|
||||
" pets: List[str] = Field(..., description=\"List of favorite pets to set.\")\n",
|
||||
" user_id: Annotated[str, InjectedToolArg] = Field(..., description=\"User's ID.\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(args_schema=UpdateFavoritePetsSchema)\n",
|
||||
"def update_favorite_pets(pets, user_id):\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'UpdateFavoritePetsSchema',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class UpdateFavoritePets(BaseTool):\n",
|
||||
" name: str = \"update_favorite_pets\"\n",
|
||||
" description: str = \"Update list of favorite pets\"\n",
|
||||
" args_schema: Optional[Type[BaseModel]] = UpdateFavoritePetsSchema\n",
|
||||
"\n",
|
||||
" def _run(self, pets, user_id):\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets().get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets().tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_petsSchema',\n",
|
||||
" 'description': 'Use the tool.\\n\\nAdd run_manager: Optional[CallbackManagerForToolRun] = None\\nto child implementations to enable tracing.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id', 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class UpdateFavoritePets2(BaseTool):\n",
|
||||
" name: str = \"update_favorite_pets\"\n",
|
||||
" description: str = \"Update list of favorite pets\"\n",
|
||||
"\n",
|
||||
" def _run(self, pets: List[str], user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets2().get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets2().tool_call_schema.schema()"
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling).\n",
|
||||
":::"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -611,7 +248,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to stream events from a tool\n",
|
||||
"# How to stream events from child runs within a custom tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -22,11 +22,10 @@
|
||||
"\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for `astream_events()`, to child runnables if you are running `async` code in `python<=3.10`. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"\n",
|
||||
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"\n",
|
||||
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in older Python versions.\n",
|
||||
"If you are running `python>=3.11`, configuration will automatically propagate to child runnables in async environments, and you don't need to access the `RunnableConfig` object for that tool as shown in this guide. However, it is still a good idea if your code may run in other Python versions.\n",
|
||||
"\n",
|
||||
"This guide also requires `langchain-core>=0.2.16`.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Say you have a custom tool that calls a chain that condenses its input by prompting a chat model to return only 10 words, then reversing the output. First, define it in a naive way:\n",
|
||||
@@ -269,7 +268,6 @@
|
||||
"\n",
|
||||
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
|
||||
"- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n",
|
||||
"- [Dispatch custom callback events](/docs/how_to/callbacks_custom_events)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
@@ -280,7 +278,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -294,9 +292,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -228,7 +228,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -540,7 +540,7 @@
|
||||
"id": "137662a6"
|
||||
},
|
||||
"source": [
|
||||
"## Example usage within RunnableWithMessageHistory "
|
||||
"## Example usage within a Conversation Chains"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -550,7 +550,7 @@
|
||||
"id": "79efa62d"
|
||||
},
|
||||
"source": [
|
||||
"Like any other integration, ChatNVIDIA is fine to support chat utilities like RunnableWithMessageHistory which is analogous to using `ConversationChain`. Below, we show the [LangChain RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
|
||||
"Like any other integration, ChatNVIDIA is fine to support chat utilities like conversation buffers by default. Below, we show the [LangChain ConversationBufferMemory](https://python.langchain.com/docs/modules/memory/types/buffer) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -572,19 +572,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"# store is a dictionary that maps session IDs to their corresponding chat histories.\n",
|
||||
"store = {} # memory is maintained outside the chain\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# A function that returns the chat history for a given session ID.\n",
|
||||
"def get_session_history(session_id: str) -> InMemoryChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = InMemoryChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"\n",
|
||||
"chat = ChatNVIDIA(\n",
|
||||
" model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n",
|
||||
@@ -593,18 +582,24 @@
|
||||
" top_p=1.0,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Define a RunnableConfig object, with a `configurable` key. session_id determines thread\n",
|
||||
"config = {\"configurable\": {\"session_id\": \"1\"}}\n",
|
||||
"\n",
|
||||
"conversation = RunnableWithMessageHistory(\n",
|
||||
" chat,\n",
|
||||
" get_session_history,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation.invoke(\n",
|
||||
" \"Hi I'm Srijan Dubey.\", # input or query\n",
|
||||
" config=config,\n",
|
||||
")"
|
||||
"conversation = ConversationChain(llm=chat, memory=ConversationBufferMemory())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f644ff28",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 268
|
||||
},
|
||||
"id": "f644ff28",
|
||||
"outputId": "bae354cc-2118-4e01-ce20-a717ac94d27d"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation.invoke(\"Hi there!\")[\"response\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -621,30 +616,26 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation.invoke(\n",
|
||||
" \"I'm doing well! Just having a conversation with an AI.\",\n",
|
||||
" config=config,\n",
|
||||
")"
|
||||
"conversation.invoke(\"I'm doing well! Just having a conversation with an AI.\")[\n",
|
||||
" \"response\"\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "uHIMZxVSVNBC",
|
||||
"id": "LyD1xVKmVSs4",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 284
|
||||
"height": 350
|
||||
},
|
||||
"id": "uHIMZxVSVNBC",
|
||||
"outputId": "79acc89d-a820-4f2c-bac2-afe99da95580"
|
||||
"id": "LyD1xVKmVSs4",
|
||||
"outputId": "a1714513-a8fd-4d14-f974-233e39d5c4f5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation.invoke(\n",
|
||||
" \"Tell me about yourself.\",\n",
|
||||
" config=config,\n",
|
||||
")"
|
||||
"conversation.invoke(\"Tell me about yourself.\")[\"response\"]"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
@@ -12,7 +11,6 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatOllama\n",
|
||||
@@ -25,18 +23,6 @@
|
||||
"\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/ollama) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_ollama.chat_models.ChatOllama.html) | [langchain-ollama](https://api.python.langchain.com/en/latest/ollama_api_reference.html) | ✅ | ❌ | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
@@ -54,285 +40,307 @@
|
||||
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
|
||||
"* To view all pulled models, use `ollama list`\n",
|
||||
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"\n",
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html).\n",
|
||||
"\n",
|
||||
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` interface.\n",
|
||||
"\n",
|
||||
"This includes [special tokens](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) for system message and user input.\n",
|
||||
"\n",
|
||||
"## Interacting with Models \n",
|
||||
"\n",
|
||||
"Here are a few ways to interact with pulled local models\n",
|
||||
"\n",
|
||||
"#### In the terminal:\n",
|
||||
"\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
|
||||
"\n",
|
||||
"#### Via an API\n",
|
||||
"\n",
|
||||
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"curl http://localhost:11434/api/generate -d '{\n",
|
||||
" \"model\": \"llama3\",\n",
|
||||
" \"prompt\":\"Why is the sky blue?\"\n",
|
||||
"}'\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"See the Ollama [API documentation](https://github.com/jmorganca/ollama/blob/main/docs/api.md) for all endpoints.\n",
|
||||
"\n",
|
||||
"#### Via LangChain\n",
|
||||
"\n",
|
||||
"See a typical basic example of using Ollama via the `ChatOllama` chat model in your LangChain application. \n",
|
||||
"\n",
|
||||
"View the [API Reference for ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ollama.ChatOllama.html#langchain_community.chat_models.ollama.ChatOllama) for more."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Ollama integration lives in the `langchain-ollama` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(\n",
|
||||
" model=\"llama3\",\n",
|
||||
" temperature=0,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Je adore le programmation.\\n\\n(Note: \"programmation\" is not commonly used in French, but I translated it as \"le programmation\" to maintain the same grammatical structure and meaning as the original English sentence.)', response_metadata={'model': 'llama3', 'created_at': '2024-07-22T17:43:54.731273Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 11094839375, 'load_duration': 10121854667, 'prompt_eval_count': 36, 'prompt_eval_duration': 146569000, 'eval_count': 46, 'eval_duration': 816593000}, id='run-befccbdc-e1f9-42a9-85cf-e69b926d6b8b-0', usage_metadata={'input_tokens': 36, 'output_tokens': 46, 'total_tokens': 82})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Je adore le programmation.\n",
|
||||
"Why did the astronaut break up with his girlfriend?\n",
|
||||
"\n",
|
||||
"(Note: \"programmation\" is not commonly used in French, but I translated it as \"le programmation\" to maintain the same grammatical structure and meaning as the original English sentence.)\n"
|
||||
"Because he needed space!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren!\\n\\n(Note: \"Ich liebe\" means \"I love\", \"Programmieren\" is the verb for \"programming\")', response_metadata={'model': 'llama3', 'created_at': '2024-07-04T04:22:33.864132Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 1310800083, 'load_duration': 1782000, 'prompt_eval_count': 16, 'prompt_eval_duration': 250199000, 'eval_count': 29, 'eval_duration': 1057192000}, id='run-cbadbe59-2de2-4ec0-a18a-b3220226c3d2-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# LangChain supports many other chat models. Here, we're using Ollama\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"# supports many more optional parameters. Hover on your `ChatOllama(...)`\n",
|
||||
"# class to view the latest available supported parameters\n",
|
||||
"llm = ChatOllama(model=\"llama3\")\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
"# using LangChain Expressive Language chain syntax\n",
|
||||
"# learn more about the LCEL on\n",
|
||||
"# /docs/concepts/#langchain-expression-language-lcel\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"# for brevity, response is printed in terminal\n",
|
||||
"# You can use LangServe to deploy your application for\n",
|
||||
"# production\n",
|
||||
"print(chain.invoke({\"topic\": \"Space travel\"}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f51345d-0a9d-43f1-8fca-d0662cb8e21b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"We can use [tool calling](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/) with an LLM [that has been fine-tuned for tool use](https://ollama.com/library/llama3-groq-tool-use): \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"ollama pull llama3-groq-tool-use\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"We can just pass normal Python functions directly as tools."
|
||||
"LCEL chains, out of the box, provide extra functionalities, such as streaming of responses, and async support"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "5250bceb-1029-41ff-b447-983518704d88",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'validate_user',\n",
|
||||
" 'args': {'addresses': ['123 Fake St, Boston MA',\n",
|
||||
" '234 Pretend Boulevard, Houston TX'],\n",
|
||||
" 'user_id': 123},\n",
|
||||
" 'id': 'fe2148d3-95fb-48e9-845a-4bfecc1f1f96',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Why\n",
|
||||
" did\n",
|
||||
" the\n",
|
||||
" astronaut\n",
|
||||
" break\n",
|
||||
" up\n",
|
||||
" with\n",
|
||||
" his\n",
|
||||
" girlfriend\n",
|
||||
" before\n",
|
||||
" going\n",
|
||||
" to\n",
|
||||
" Mars\n",
|
||||
"?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Because\n",
|
||||
" he\n",
|
||||
" needed\n",
|
||||
" space\n",
|
||||
"!\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"topic = {\"topic\": \"Space travel\"}\n",
|
||||
"\n",
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"from typing_extensions import TypedDict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def validate_user(user_id: int, addresses: List) -> bool:\n",
|
||||
" \"\"\"Validate user using historical addresses.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: (int) the user ID.\n",
|
||||
" addresses: Previous addresses.\n",
|
||||
" \"\"\"\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(\n",
|
||||
" model=\"llama3-groq-tool-use\",\n",
|
||||
" temperature=0,\n",
|
||||
").bind_tools([validate_user])\n",
|
||||
"\n",
|
||||
"result = llm.invoke(\n",
|
||||
" \"Could you validate user 123? They previously lived at \"\n",
|
||||
" \"123 Fake St in Boston MA and 234 Pretend Boulevard in \"\n",
|
||||
" \"Houston TX.\"\n",
|
||||
")\n",
|
||||
"result.tool_calls"
|
||||
"for chunks in chain.stream(topic):\n",
|
||||
" print(chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bb034ff-218f-4865-afea-3f5e57d3bdee",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We look at the LangSmith trace to see that the tool call was performed: \n",
|
||||
"For streaming async support, here's an example - all possible via the single chain created above."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"topic = {\"topic\": \"Space travel\"}\n",
|
||||
"\n",
|
||||
"https://smith.langchain.com/public/4169348a-d6be-45df-a7cf-032f6baa4697/r\n",
|
||||
"\n",
|
||||
"In particular, the trace shows how the tool schema was populated."
|
||||
"async for chunks in chain.astream(topic):\n",
|
||||
" print(chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Take a look at the [LangChain Expressive Language (LCEL) Interface](/docs/concepts#interface) for the other available interfaces for use when a chain is created.\n",
|
||||
"\n",
|
||||
"## Building from source\n",
|
||||
"\n",
|
||||
"For up to date instructions on building from source, check the Ollama documentation on [Building from Source](https://github.com/ollama/ollama?tab=readme-ov-file#building)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extraction\n",
|
||||
" \n",
|
||||
"Use the latest version of Ollama and supply the [`format`](https://github.com/jmorganca/ollama/blob/main/docs/api.md#json-mode) flag. The `format` flag will force the model to produce the response in JSON.\n",
|
||||
"\n",
|
||||
"> **Note:** You can also try out the experimental [OllamaFunctions](/docs/integrations/chat/ollama_functions) wrapper for convenience."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"llama3\", format=\"json\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='{ \"morning\": \"blue\", \"noon\": \"clear blue\", \"afternoon\": \"hazy yellow\", \"evening\": \"orange-red\" }\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n ' id='run-e893700f-e2d0-4df8-ad86-17525dcee318-0'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"What color is the sky at different times of the day? Respond using JSON\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chat_model_response = llm.invoke(messages)\n",
|
||||
"print(chat_model_response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Name: John\n",
|
||||
"Age: 35\n",
|
||||
"Likes: Pizza\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"json_schema = {\n",
|
||||
" \"title\": \"Person\",\n",
|
||||
" \"description\": \"Identifying information about a person.\",\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"name\": {\"title\": \"Name\", \"description\": \"The person's name\", \"type\": \"string\"},\n",
|
||||
" \"age\": {\"title\": \"Age\", \"description\": \"The person's age\", \"type\": \"integer\"},\n",
|
||||
" \"fav_food\": {\n",
|
||||
" \"title\": \"Fav Food\",\n",
|
||||
" \"description\": \"The person's favorite food\",\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"name\", \"age\"],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"llama2\")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Please tell me about a person using the following JSON schema:\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"{dumps}\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Now, considering the schema, tell me about a person named John who is 35 years old and loves pizza.\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(messages)\n",
|
||||
"dumps = json.dumps(json_schema, indent=2)\n",
|
||||
"\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"print(chain.invoke({\"dumps\": dumps}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4c5e0197",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal\n",
|
||||
"\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.com/library/bakllava) and [llava](https://ollama.com/library/llava).\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n",
|
||||
"\n",
|
||||
" ollama pull bakllava\n",
|
||||
"Browse the full set of versions for models with `tags`, such as [Llava](https://ollama.ai/library/llava/tags).\n",
|
||||
"\n",
|
||||
"Be sure to update Ollama so that you have the most recent version to support multi-modal."
|
||||
"Download the desired LLM via `ollama pull bakllava`\n",
|
||||
"\n",
|
||||
"Be sure to update Ollama so that you have the most recent version to support multi-modal.\n",
|
||||
"\n",
|
||||
"Check out the typical example of how to use ChatOllama multi-modal support below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "36c9b1c2",
|
||||
"execution_count": 18,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install --upgrade --quiet pillow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -391,8 +399,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "32b3ba7b",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -404,8 +411,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"bakllava\", temperature=0)\n",
|
||||
"\n",
|
||||
@@ -442,12 +449,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"## Concurrency Features\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatOllama features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_ollama.chat_models.ChatOllama.html"
|
||||
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
|
||||
"\n",
|
||||
"Start the Ollama server with:\n",
|
||||
"\n",
|
||||
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
|
||||
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
|
||||
"\n",
|
||||
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
|
||||
"\n",
|
||||
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -471,5 +486,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Ollama Functions\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
@@ -16,16 +15,16 @@
|
||||
"source": [
|
||||
"# OllamaFunctions\n",
|
||||
"\n",
|
||||
":::warning\n",
|
||||
"\n",
|
||||
"This was an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. The [primary Ollama integration](/docs/integrations/chat/ollama/) now supports tool calling, and should be used instead.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"This notebook shows how to use an experimental wrapper around Ollama that gives it [tool calling capabilities](https://python.langchain.com/v0.2/docs/concepts/#functiontool-calling).\n",
|
||||
"\n",
|
||||
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use llama3 and phi3 models.\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
|
||||
"\n",
|
||||
":::warning\n",
|
||||
"\n",
|
||||
"This is an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. Use with caution.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
|
||||
@@ -1,484 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b74f73d-1763-42d0-9c24-8f65f445bb72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Dedoc\n",
|
||||
"\n",
|
||||
"This sample demonstrates the use of `Dedoc` in combination with `LangChain` as a `DocumentLoader`.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"[Dedoc](https://dedoc.readthedocs.io) is an [open-source](https://github.com/ispras/dedoc)\n",
|
||||
"library/service that extracts texts, tables, attached files and document structure\n",
|
||||
"(e.g., titles, list items, etc.) from files of various formats.\n",
|
||||
"\n",
|
||||
"`Dedoc` supports `DOCX`, `XLSX`, `PPTX`, `EML`, `HTML`, `PDF`, images and more.\n",
|
||||
"Full list of supported formats can be found [here](https://dedoc.readthedocs.io/en/latest/#id1).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support |\n",
|
||||
"|:-----------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------|:-----:|:------------:|:----------:|\n",
|
||||
"| [DedocFileLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocFileLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ |\n",
|
||||
"| [DedocPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.DedocPDFLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ | \n",
|
||||
"| [DedocAPIFileLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocAPIFileLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ | \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Loader features\n",
|
||||
"\n",
|
||||
"Methods for lazy loading and async loading are available, but in fact, document loading is executed synchronously.\n",
|
||||
"\n",
|
||||
"| Source | Document Lazy Loading | Async Support |\n",
|
||||
"|:------------------:|:---------------------:|:-------------:| \n",
|
||||
"| DedocFileLoader | ❌ | ❌ |\n",
|
||||
"| DedocPDFLoader | ❌ | ❌ | \n",
|
||||
"| DedocAPIFileLoader | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"* To access `DedocFileLoader` and `DedocPDFLoader` document loaders, you'll need to install the `dedoc` integration package.\n",
|
||||
"* To access `DedocAPIFileLoader`, you'll need to run the `Dedoc` service, e.g. `Docker` container (please see [the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker) \n",
|
||||
"for more details):\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"docker pull dedocproject/dedoc\n",
|
||||
"docker run -p 1231:1231\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"`Dedoc` installation instruction is given [here](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "511c109d-a5c3-42ba-914e-5d1b385bc40f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Install package\n",
|
||||
"%pip install --quiet \"dedoc[torch]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6820c0e9-d56d-4899-b8c8-374760360e2b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c1f98cae-71ec-4d60-87fb-96c1a76851d8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocFileLoader\n",
|
||||
"\n",
|
||||
"loader = DedocFileLoader(\"./example_data/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d7bc2b3-73a0-4cd6-8014-cc7184aa9d4a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "b9097c14-6168-4726-819e-24abb9a63b13",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and t'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0].page_content[:100]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ed8bd46-0047-4ccc-b2d6-beb7761f7312",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "6ae12d7e-8105-4bbe-9031-0e968475f6bf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and t\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.lazy_load()\n",
|
||||
"\n",
|
||||
"for doc in docs:\n",
|
||||
" print(doc.page_content[:100])\n",
|
||||
" break"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8772ae40-6239-4751-bb2d-b4a9415c1ad1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed information on configuring and calling `Dedoc` loaders, please see the API references: \n",
|
||||
"\n",
|
||||
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocFileLoader.html\n",
|
||||
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.DedocPDFLoader.html\n",
|
||||
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocAPIFileLoader.html"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c4d5e702-0e21-4cad-a4c3-b9b3bff77203",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading any file\n",
|
||||
"\n",
|
||||
"For automatic handling of any file in a [supported format](https://dedoc.readthedocs.io/en/latest/#id1),\n",
|
||||
"`DedocFileLoader` can be useful.\n",
|
||||
"The file loader automatically detects the file type with a correct extension.\n",
|
||||
"\n",
|
||||
"File parsing process can be configured through `dedoc_kwargs` during the `DedocFileLoader` class initialization.\n",
|
||||
"Here the basic examples of some options usage are given, \n",
|
||||
"please see the documentation of `DedocFileLoader` and \n",
|
||||
"[dedoc documentation](https://dedoc.readthedocs.io/en/latest/parameters/parameters.html) \n",
|
||||
"to get more details about configuration parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "de97d0ed-d6b1-44e0-b392-1f3d89c762f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Basic example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "50ffeeee-db12-4801-b208-7e32ea3d72ad",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\n\\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\n\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\n\\n\\nWith a duty to one another to the American people to '"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocFileLoader\n",
|
||||
"\n",
|
||||
"loader = DedocFileLoader(\"./example_data/state_of_the_union.txt\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "457e5d4c-a4ee-4f31-ae74-3f75a1bbd0af",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Modes of split\n",
|
||||
"\n",
|
||||
"`DedocFileLoader` supports different types of document splitting into parts (each part is returned separately).\n",
|
||||
"For this purpose, `split` parameter is used with the following options:\n",
|
||||
"* `document` (default value): document text is returned as a single langchain `Document` object (don't split);\n",
|
||||
"* `page`: split document text into pages (works for `PDF`, `DJVU`, `PPTX`, `PPT`, `ODP`);\n",
|
||||
"* `node`: split document text into `Dedoc` tree nodes (title nodes, list item nodes, raw text nodes);\n",
|
||||
"* `line`: split document text into textual lines."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "eec54d31-ae7a-4a3c-aa10-4ae276b1e4c4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"2"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DedocFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" split=\"page\",\n",
|
||||
" pages=\":2\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61e11769-4780-4f77-b10e-27db6936f226",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Handling tables\n",
|
||||
"\n",
|
||||
"`DedocFileLoader` supports tables handling when `with_tables` parameter is \n",
|
||||
"set to `True` during loader initialization (`with_tables=True` by default). \n",
|
||||
"\n",
|
||||
"Tables are not split - each table corresponds to one langchain `Document` object.\n",
|
||||
"For tables, `Document` object has additional `metadata` fields `type=\"table\"` \n",
|
||||
"and `text_as_html` with table `HTML` representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "bbeb2f8a-ac5e-4b59-8026-7ea3fc14c928",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"('table',\n",
|
||||
" '<table border=\"1\" style=\"border-collapse: collapse; width: 100%;\">\\n<tbody>\\n<tr>\\n<td colspan=\"1\" rowspan=\"1\">Team</td>\\n<td colspan=\"1\" rowspan=\"1\"> "Payroll (millions)"</td>\\n<td colspan=\"1\" r')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DedocFileLoader(\"./example_data/mlb_teams_2012.csv\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[1].metadata[\"type\"], docs[1].metadata[\"text_as_html\"][:200]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4a2b872-2aba-4e4c-8b2f-83a5a81ee1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Handling attached files\n",
|
||||
"\n",
|
||||
"`DedocFileLoader` supports attached files handling when `with_attachments` is set \n",
|
||||
"to `True` during loader initialization (`with_attachments=False` by default). \n",
|
||||
"\n",
|
||||
"Attachments are split according to the `split` parameter.\n",
|
||||
"For attachments, langchain `Document` object has an additional metadata \n",
|
||||
"field `type=\"attachment\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "bb9d6c1c-e24c-4979-88a0-38d54abd6332",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"('attachment',\n",
|
||||
" '\\nContent-Type\\nmultipart/mixed; boundary=\"0000000000005d654405f082adb7\"\\nDate\\nFri, 23 Dec 2022 12:08:48 -0600\\nFrom\\nMallori Harrell <mallori@unstructured.io>\\nMIME-Version\\n1.0\\nMessage-ID\\n<CAPgNNXSzLVJ-d1OCX_TjFgJU7ugtQrjFybPtAMmmYZzphxNFYg@mail.gmail.com>\\nSubject\\nFake email with attachment\\nTo\\nMallori Harrell <mallori@unstructured.io>')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DedocFileLoader(\n",
|
||||
" \"./example_data/fake-email-attachment.eml\",\n",
|
||||
" with_attachments=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[1].metadata[\"type\"], docs[1].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d435c3f6-703a-4064-8307-ace140de967a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading PDF file\n",
|
||||
"\n",
|
||||
"If you want to handle only `PDF` documents, you can use `DedocPDFLoader` with only `PDF` support.\n",
|
||||
"The loader supports the same parameters for document split, tables and attachments extraction.\n",
|
||||
"\n",
|
||||
"`Dedoc` can extract `PDF` with or without a textual layer, \n",
|
||||
"as well as automatically detect its presence and correctness.\n",
|
||||
"Several `PDF` handlers are available, you can use `pdf_with_text_layer` \n",
|
||||
"parameter to choose one of them.\n",
|
||||
"Please see [parameters description](https://dedoc.readthedocs.io/en/latest/parameters/pdf_handling.html) \n",
|
||||
"to get more details.\n",
|
||||
"\n",
|
||||
"For `PDF` without a textual layer, `Tesseract OCR` and its language packages should be installed.\n",
|
||||
"In this case, [the instruction](https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html) can be useful."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0103a7f3-6b5e-4444-8f4d-83dd3724a9af",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n2\\n\\nZ. Shen et al.\\n\\n37], layout detection [38, 22], table detection [26], and scene text detection [4].\\n\\nA generalized learning-based framework dramatically reduces the need for the\\n\\nmanual specification of complicated rules, which is the status quo with traditional\\n\\nmethods. DL has the potential to transform DIA pipelines and benefit a broad\\n\\nspectrum of large-scale document digitization projects.\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocPDFLoader\n",
|
||||
"\n",
|
||||
"loader = DedocPDFLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", pdf_with_text_layer=\"true\", pages=\"2:2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13061995-1805-40c2-a77a-a6cd80999e20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dedoc API\n",
|
||||
"\n",
|
||||
"If you want to get up and running with less set up, you can use `Dedoc` as a service.\n",
|
||||
"**`DedocAPIFileLoader` can be used without installation of `dedoc` library.**\n",
|
||||
"The loader supports the same parameters as `DedocFileLoader` and\n",
|
||||
"also automatically detects input file types.\n",
|
||||
"\n",
|
||||
"To use `DedocAPIFileLoader`, you should run the `Dedoc` service, e.g. `Docker` container (please see [the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker) \n",
|
||||
"for more details):\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"docker pull dedocproject/dedoc\n",
|
||||
"docker run -p 1231:1231\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Please do not use our demo URL `https://dedoc-readme.hf.space` in your code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "211fc0b5-6080-4974-a6c1-f982bafd87d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\n\\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\n\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\n\\n\\nWith a duty to one another to the American people to '"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocAPIFileLoader\n",
|
||||
"\n",
|
||||
"loader = DedocAPIFileLoader(\n",
|
||||
" \"./example_data/state_of_the_union.txt\",\n",
|
||||
" url=\"https://dedoc-readme.hf.space\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "faaff475-5209-436f-bcde-97d58daed05c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.19"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -162,7 +162,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!poetry run pip install --upgrade langchain-openai tiktoken langchain-chroma hnswlib"
|
||||
"!poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -211,7 +211,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embedding = OpenAIEmbeddings()\n",
|
||||
@@ -365,7 +365,7 @@
|
||||
"source": [
|
||||
"from langchain.chains.query_constructor.schema import AttributeInfo\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"\n",
|
||||
"EXCLUDE_KEYS = [\"id\", \"xpath\", \"structure\"]\n",
|
||||
"metadata_field_info = [\n",
|
||||
@@ -540,7 +540,7 @@
|
||||
"source": [
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
|
||||
@@ -316,7 +316,7 @@
|
||||
"id": "eb00a625-a6c9-4766-b3f0-eaed024851c9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return SPARQL query\n",
|
||||
"## Return SQARQL query\n",
|
||||
"You can return the SPARQL query step from the Sparql QA Chain using the `return_sparql_query` parameter"
|
||||
]
|
||||
},
|
||||
@@ -358,7 +358,7 @@
|
||||
"\u001b[32;1m\u001b[1;3m[]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"SPARQL query: PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n",
|
||||
"SQARQL query: PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n",
|
||||
"SELECT ?workHomepage\n",
|
||||
"WHERE {\n",
|
||||
" ?person foaf:name \"Tim Berners-Lee\" .\n",
|
||||
@@ -370,7 +370,7 @@
|
||||
],
|
||||
"source": [
|
||||
"result = chain(\"What is Tim Berners-Lee's work homepage?\")\n",
|
||||
"print(f\"SPARQL query: {result['sparql_query']}\")\n",
|
||||
"print(f\"SQARQL query: {result['sparql_query']}\")\n",
|
||||
"print(f\"Final answer: {result['result']}\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet transformers"
|
||||
"%pip install --upgrade --quiet transformers --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,21 +1,10 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Ollama\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OllamaLLM\n",
|
||||
"# Ollama\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Ollama models as [text completion models](/docs/concepts/#llms). Many popular Ollama models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
@@ -23,35 +12,21 @@
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/ollama/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"This page goes over how to use LangChain to interact with `Ollama` models.\n",
|
||||
"[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally.\n",
|
||||
"\n",
|
||||
"Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. \n",
|
||||
"\n",
|
||||
"It optimizes setup and configuration details, including GPU usage.\n",
|
||||
"\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/ollama/ollama#model-library).\n",
|
||||
"\n",
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# install package\n",
|
||||
"%pip install -U langchain-ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
"First, follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance:\n",
|
||||
"\n",
|
||||
"* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n",
|
||||
"* Fetch available LLM model via `ollama pull <name-of-model>`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library)\n",
|
||||
" * e.g., `ollama pull llama3`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library) and pull to use locally with the command `ollama pull llama3`\n",
|
||||
"* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n",
|
||||
"\n",
|
||||
"> On Mac, the models will be download to `~/.ollama/models`\n",
|
||||
@@ -59,67 +34,194 @@
|
||||
"> On Linux (or WSL), the models will be stored at `/usr/share/ollama/.ollama/models`\n",
|
||||
"\n",
|
||||
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
|
||||
"* To view all pulled models, use `ollama list`\n",
|
||||
"* To view all pulled models on your local instance, use `ollama list`\n",
|
||||
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"* View the [Ollama documentation](https://github.com/ollama/ollama) for more commands. \n",
|
||||
"* Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"\n",
|
||||
"## Usage"
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html).\n",
|
||||
"\n",
|
||||
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` [interface](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/).\n",
|
||||
"\n",
|
||||
"This includes [special tokens](https://ollama.com/library/llama3) for system message and user input.\n",
|
||||
"\n",
|
||||
"## Interacting with Models \n",
|
||||
"\n",
|
||||
"Here are a few ways to interact with pulled local models\n",
|
||||
"\n",
|
||||
"#### In the terminal:\n",
|
||||
"\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
|
||||
"\n",
|
||||
"#### Via the API\n",
|
||||
"\n",
|
||||
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"curl http://localhost:11434/api/generate -d '{\n",
|
||||
" \"model\": \"llama3\",\n",
|
||||
" \"prompt\":\"Why is the sky blue?\"\n",
|
||||
"}'\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"See the Ollama [API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) for all endpoints.\n",
|
||||
"\n",
|
||||
"#### via LangChain\n",
|
||||
"\n",
|
||||
"See a typical basic example of using [Ollama chat model](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/) in your LangChain application."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'A great start!\\n\\nLangChain is a type of AI model that uses language processing techniques to generate human-like text based on input prompts or chains of reasoning. In other words, it can have a conversation with humans, understanding the context and responding accordingly.\\n\\nHere\\'s a possible breakdown:\\n\\n* \"Lang\" likely refers to its focus on natural language processing (NLP) and linguistic analysis.\\n* \"Chain\" suggests that LangChain is designed to generate text in response to a series of connected ideas or prompts, rather than simply generating random text.\\n\\nSo, what do you think LangChain\\'s capabilities might be?'"
|
||||
"\"Here's one:\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\\n\\nHope that made you smile! Do you want to hear another one?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_ollama.llms import OllamaLLM\n",
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"llm = Ollama(\n",
|
||||
" model=\"llama3\"\n",
|
||||
") # assuming you have Ollama installed and have llama3 model pulled with `ollama pull llama3 `\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"model = OllamaLLM(model=\"llama3\")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"\n",
|
||||
"chain.invoke({\"question\": \"What is LangChain?\"})"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To stream tokens, use the `.stream(...)` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"S\n",
|
||||
"ure\n",
|
||||
",\n",
|
||||
" here\n",
|
||||
"'\n",
|
||||
"s\n",
|
||||
" one\n",
|
||||
":\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Why\n",
|
||||
" don\n",
|
||||
"'\n",
|
||||
"t\n",
|
||||
" scient\n",
|
||||
"ists\n",
|
||||
" trust\n",
|
||||
" atoms\n",
|
||||
"?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"B\n",
|
||||
"ecause\n",
|
||||
" they\n",
|
||||
" make\n",
|
||||
" up\n",
|
||||
" everything\n",
|
||||
"!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"I\n",
|
||||
" hope\n",
|
||||
" you\n",
|
||||
" found\n",
|
||||
" that\n",
|
||||
" am\n",
|
||||
"using\n",
|
||||
"!\n",
|
||||
" Do\n",
|
||||
" you\n",
|
||||
" want\n",
|
||||
" to\n",
|
||||
" hear\n",
|
||||
" another\n",
|
||||
" one\n",
|
||||
"?\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"Tell me a joke\"\n",
|
||||
"\n",
|
||||
"for chunks in llm.stream(query):\n",
|
||||
" print(chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To learn more about the LangChain Expressive Language and the available methods on an LLM, see the [LCEL Interface](/docs/concepts#interface)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2d85456",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal\n",
|
||||
"\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.com/library/bakllava) and [llava](https://ollama.com/library/llava).\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n",
|
||||
"\n",
|
||||
" ollama pull bakllava\n",
|
||||
"`ollama pull bakllava`\n",
|
||||
"\n",
|
||||
"Be sure to update Ollama so that you have the most recent version to support multi-modal."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"bakllava = Ollama(model=\"bakllava\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4043e202",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -177,8 +279,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "79aaf863",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -187,24 +288,38 @@
|
||||
"'90%'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_ollama import OllamaLLM\n",
|
||||
"\n",
|
||||
"llm = OllamaLLM(model=\"bakllava\")\n",
|
||||
"\n",
|
||||
"llm_with_image_context = llm.bind(images=[image_b64])\n",
|
||||
"llm_with_image_context = bakllava.bind(images=[image_b64])\n",
|
||||
"llm_with_image_context.invoke(\"What is the dollar based gross retention rate:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Concurrency Features\n",
|
||||
"\n",
|
||||
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
|
||||
"\n",
|
||||
"Start the Ollama server with:\n",
|
||||
"\n",
|
||||
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
|
||||
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
|
||||
"\n",
|
||||
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
|
||||
"\n",
|
||||
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -218,14 +333,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
"version": "3.11.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -50,8 +50,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_community.llms import PipelineAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
@@ -123,7 +123,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = prompt | llm | StrOutputParser()"
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -142,7 +142,7 @@
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.invoke(question)"
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -88,7 +88,6 @@
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
@@ -117,7 +116,6 @@
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
@@ -177,7 +175,9 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"sambastudio_base_url = \"<Your SambaStudio environment URL>\"\n",
|
||||
"sambastudio_base_uri = \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/generic\" set as default\n",
|
||||
"sambastudio_base_uri = (\n",
|
||||
" \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/nlp\" set as default\n",
|
||||
")\n",
|
||||
"sambastudio_project_id = \"<Your SambaStudio project id>\"\n",
|
||||
"sambastudio_endpoint_id = \"<Your SambaStudio endpoint id>\"\n",
|
||||
"sambastudio_api_key = \"<Your SambaStudio endpoint API key>\"\n",
|
||||
@@ -271,7 +271,6 @@
|
||||
" \"do_sample\": True,\n",
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" \"select_expert\": \"Meta-Llama-3-8B-Instruct\",\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
|
||||
@@ -1,325 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a283d2fd-e26e-4811-a486-d3cf0ecf6749",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Couchbase\n",
|
||||
"> Couchbase is an award-winning distributed NoSQL cloud database that delivers unmatched versatility, performance, scalability, and financial value for all of your cloud, mobile, AI, and edge computing applications. Couchbase embraces AI with coding assistance for developers and vector search for their applications.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use the `CouchbaseChatMessageHistory` class to store the chat message history in a Couchbase cluster\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff868a6c-3e17-4c3d-8d32-67b01f4d7bcc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set Up Couchbase Cluster\n",
|
||||
"To run this demo, you need a Couchbase Cluster. \n",
|
||||
"\n",
|
||||
"You can work with both [Couchbase Capella](https://www.couchbase.com/products/capella/) and your self-managed Couchbase Server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41fa85e7-6968-45e4-a445-de305d80f332",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Dependencies\n",
|
||||
"`CouchbaseChatMessageHistory` lives inside the `langchain-couchbase` package. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b744ca05-b8c6-458c-91df-f50ca2c20b3c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-couchbase"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41f29205-6452-493b-ba18-8a3b006bcca4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Couchbase Connection Object\n",
|
||||
"We create a connection to the Couchbase cluster initially and then pass the cluster object to the Vector Store. \n",
|
||||
"\n",
|
||||
"Here, we are connecting using the username and password. You can also connect using any other supported way to your cluster. \n",
|
||||
"\n",
|
||||
"For more information on connecting to the Couchbase cluster, please check the [Python SDK documentation](https://docs.couchbase.com/python-sdk/current/hello-world/start-using-sdk.html#connect)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f394908e-f5fe-408a-84d7-b97fdebcfa26",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"COUCHBASE_CONNECTION_STRING = (\n",
|
||||
" \"couchbase://localhost\" # or \"couchbases://localhost\" if using TLS\n",
|
||||
")\n",
|
||||
"DB_USERNAME = \"Administrator\"\n",
|
||||
"DB_PASSWORD = \"Password\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ad4dce21-d80c-465a-b709-fd366ba5ce35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import timedelta\n",
|
||||
"\n",
|
||||
"from couchbase.auth import PasswordAuthenticator\n",
|
||||
"from couchbase.cluster import Cluster\n",
|
||||
"from couchbase.options import ClusterOptions\n",
|
||||
"\n",
|
||||
"auth = PasswordAuthenticator(DB_USERNAME, DB_PASSWORD)\n",
|
||||
"options = ClusterOptions(auth)\n",
|
||||
"cluster = Cluster(COUCHBASE_CONNECTION_STRING, options)\n",
|
||||
"\n",
|
||||
"# Wait until the cluster is ready for use.\n",
|
||||
"cluster.wait_until_ready(timedelta(seconds=5))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3d0210c-e2e6-437a-86f3-7397a1899fef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will now set the bucket, scope, and collection names in the Couchbase cluster that we want to use for storing the message history.\n",
|
||||
"\n",
|
||||
"Note that the bucket, scope, and collection need to exist before using them to store the message history."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e8c7f846-a5c4-4465-a40e-4a9a23ac71bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"BUCKET_NAME = \"langchain-testing\"\n",
|
||||
"SCOPE_NAME = \"_default\"\n",
|
||||
"COLLECTION_NAME = \"conversational_cache\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "283959e1-6af7-4768-9211-5b0facc6ef65",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"In order to store the messages, you need the following:\n",
|
||||
"- Couchbase Cluster object: Valid connection to the Couchbase cluster\n",
|
||||
"- bucket_name: Bucket in cluster to store the chat message history\n",
|
||||
"- scope_name: Scope in bucket to store the message history\n",
|
||||
"- collection_name: Collection in scope to store the message history\n",
|
||||
"- session_id: Unique identifier for the session\n",
|
||||
"\n",
|
||||
"Optionally you can configure the following:\n",
|
||||
"- session_id_key: Field in the chat message documents to store the `session_id`\n",
|
||||
"- message_key: Field in the chat message documents to store the message content\n",
|
||||
"- create_index: Used to specify if the index needs to be created on the collection. By default, an index is created on the `message_key` and the `session_id_key` of the documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "43c3b2d5-aae2-44a9-9e9f-f10adf054cfa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_couchbase.chat_message_histories import CouchbaseChatMessageHistory\n",
|
||||
"\n",
|
||||
"message_history = CouchbaseChatMessageHistory(\n",
|
||||
" cluster=cluster,\n",
|
||||
" bucket_name=BUCKET_NAME,\n",
|
||||
" scope_name=SCOPE_NAME,\n",
|
||||
" collection_name=COLLECTION_NAME,\n",
|
||||
" session_id=\"test-session\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"message_history.add_user_message(\"hi!\")\n",
|
||||
"\n",
|
||||
"message_history.add_ai_message(\"how are you doing?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e7e348ef-79e9-481c-aeef-969ae03dea6a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi!'), AIMessage(content='how are you doing?')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message_history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8b942a7-93fa-4cd9-8414-d047135c2733",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"The chat message history class can be used with [LCEL Runnables](https://python.langchain.com/v0.2/docs/how_to/message_history/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8a9f0d91-d1d6-481d-8137-ea11229f485a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "946d45aa-5a61-49ae-816b-1c3949c56d9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a helpful assistant.\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create the LCEL runnable\n",
|
||||
"chain = prompt | ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "20dfd838-b549-42ed-b3ba-ac005f7e024c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_with_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: CouchbaseChatMessageHistory(\n",
|
||||
" cluster=cluster,\n",
|
||||
" bucket_name=BUCKET_NAME,\n",
|
||||
" scope_name=SCOPE_NAME,\n",
|
||||
" collection_name=COLLECTION_NAME,\n",
|
||||
" session_id=session_id,\n",
|
||||
" ),\n",
|
||||
" input_messages_key=\"question\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "17bd09f4-896d-433d-bb9a-369a06e7aa8a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is where we configure the session id\n",
|
||||
"config = {\"configurable\": {\"session_id\": \"testing\"}}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "4bda1096-2fc2-40d7-a046-0d5d8e3a8f75",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 22, 'total_tokens': 32}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-a0f8a29e-ddf4-4e06-a1fe-cf8c325a2b72-0', usage_metadata={'input_tokens': 22, 'output_tokens': 10, 'total_tokens': 32})"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke({\"question\": \"Hi! I'm bob\"}, config=config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "1cfb31da-51bb-4c5f-909a-b7118b0ae08d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Bob.', response_metadata={'token_usage': {'completion_tokens': 5, 'prompt_tokens': 43, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f764a9eb-999e-4042-96b6-fe47b7ae4779-0', usage_metadata={'input_tokens': 43, 'output_tokens': 5, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke({\"question\": \"Whats my name\"}, config=config)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -89,23 +89,3 @@ set_llm_cache(
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Chat Message History
|
||||
Use Couchbase as the storage for your chat messages.
|
||||
|
||||
See a [usage example](/docs/integrations/memory/couchbase_chat_message_history).
|
||||
|
||||
To use the chat message history in your applications:
|
||||
```python
|
||||
from langchain_couchbase.chat_message_histories import CouchbaseChatMessageHistory
|
||||
|
||||
message_history = CouchbaseChatMessageHistory(
|
||||
cluster=cluster,
|
||||
bucket_name=BUCKET_NAME,
|
||||
scope_name=SCOPE_NAME,
|
||||
collection_name=COLLECTION_NAME,
|
||||
session_id="test-session",
|
||||
)
|
||||
|
||||
message_history.add_user_message("hi!")
|
||||
```
|
||||
@@ -1,56 +0,0 @@
|
||||
# Dedoc
|
||||
|
||||
>[Dedoc](https://dedoc.readthedocs.io) is an [open-source](https://github.com/ispras/dedoc)
|
||||
library/service that extracts texts, tables, attached files and document structure
|
||||
(e.g., titles, list items, etc.) from files of various formats.
|
||||
|
||||
`Dedoc` supports `DOCX`, `XLSX`, `PPTX`, `EML`, `HTML`, `PDF`, images and more.
|
||||
Full list of supported formats can be found [here](https://dedoc.readthedocs.io/en/latest/#id1).
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
### Dedoc library
|
||||
|
||||
You can install `Dedoc` using `pip`.
|
||||
In this case, you will need to install dependencies,
|
||||
please go [here](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html)
|
||||
to get more information.
|
||||
|
||||
```bash
|
||||
pip install dedoc
|
||||
```
|
||||
|
||||
### Dedoc API
|
||||
|
||||
If you are going to use `Dedoc` API, you don't need to install `dedoc` library.
|
||||
In this case, you should run the `Dedoc` service, e.g. `Docker` container (please see
|
||||
[the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker)
|
||||
for more details):
|
||||
|
||||
```bash
|
||||
docker pull dedocproject/dedoc
|
||||
docker run -p 1231:1231
|
||||
```
|
||||
|
||||
## Document Loader
|
||||
|
||||
* For handling files of any formats (supported by `Dedoc`), you can use `DedocFileLoader`:
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import DedocFileLoader
|
||||
```
|
||||
|
||||
* For handling PDF files (with or without a textual layer), you can use `DedocPDFLoader`:
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import DedocPDFLoader
|
||||
```
|
||||
|
||||
* For handling files of any formats without library installation,
|
||||
you can use `Dedoc API` with `DedocAPIFileLoader`:
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import DedocAPIFileLoader
|
||||
```
|
||||
|
||||
Please see a [usage example](/docs/integrations/document_loaders/dedoc) for more details.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -101,7 +101,7 @@
|
||||
" sambastudio_embeddings_project_id=sambastudio_project_id,\n",
|
||||
" sambastudio_embeddings_endpoint_id=sambastudio_endpoint_id,\n",
|
||||
" sambastudio_embeddings_api_key=sambastudio_api_key,\n",
|
||||
" batch_size=32, # set depending on the deployed endpoint configuration\n",
|
||||
" batch_size=32,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# TextEmbed - Embedding Inference Server\n",
|
||||
"\n",
|
||||
"TextEmbed is a high-throughput, low-latency REST API designed for serving vector embeddings. It supports a wide range of sentence-transformer models and frameworks, making it suitable for various applications in natural language processing.\n",
|
||||
"\n",
|
||||
"## Features\n",
|
||||
"\n",
|
||||
"- **High Throughput & Low Latency:** Designed to handle a large number of requests efficiently.\n",
|
||||
"- **Flexible Model Support:** Works with various sentence-transformer models.\n",
|
||||
"- **Scalable:** Easily integrates into larger systems and scales with demand.\n",
|
||||
"- **Batch Processing:** Supports batch processing for better and faster inference.\n",
|
||||
"- **OpenAI Compatible REST API Endpoint:** Provides an OpenAI compatible REST API endpoint.\n",
|
||||
"- **Single Line Command Deployment:** Deploy multiple models via a single command for efficient deployment.\n",
|
||||
"- **Support for Embedding Formats:** Supports binary, float16, and float32 embeddings formats for faster retrieval.\n",
|
||||
"\n",
|
||||
"## Getting Started\n",
|
||||
"\n",
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"Ensure you have Python 3.10 or higher installed. You will also need to install the required dependencies."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation via PyPI\n",
|
||||
"\n",
|
||||
"1. **Install the required dependencies:**\n",
|
||||
"\n",
|
||||
" ```bash\n",
|
||||
" pip install -U textembed\n",
|
||||
" ```\n",
|
||||
"\n",
|
||||
"2. **Start the TextEmbed server with your desired models:**\n",
|
||||
"\n",
|
||||
" ```bash\n",
|
||||
" python -m textembed.server --models sentence-transformers/all-MiniLM-L12-v2 --workers 4 --api-key TextEmbed \n",
|
||||
" ```\n",
|
||||
"\n",
|
||||
"For more information, please read the [documentation](https://github.com/kevaldekivadiya2415/textembed/blob/main/docs/setup.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Import"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import TextEmbedEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = TextEmbedEmbeddings(\n",
|
||||
" model=\"sentence-transformers/all-MiniLM-L12-v2\",\n",
|
||||
" api_url=\"http://0.0.0.0:8000/v1\",\n",
|
||||
" api_key=\"TextEmbed\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed your documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define a list of documents\n",
|
||||
"documents = [\n",
|
||||
" \"Data science involves extracting insights from data.\",\n",
|
||||
" \"Artificial intelligence is transforming various industries.\",\n",
|
||||
" \"Cloud computing provides scalable computing resources over the internet.\",\n",
|
||||
" \"Big data analytics helps in understanding large datasets.\",\n",
|
||||
" \"India has a diverse cultural heritage.\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Define a query\n",
|
||||
"query = \"What is the cultural heritage of India?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Embed all documents\n",
|
||||
"document_embeddings = embeddings.embed_documents(documents)\n",
|
||||
"\n",
|
||||
"# Embed the query\n",
|
||||
"query_embedding = embeddings.embed_query(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'Data science involves extracting insights from data.': 0.05121298956322118,\n",
|
||||
" 'Artificial intelligence is transforming various industries.': -0.0060612142358469345,\n",
|
||||
" 'Cloud computing provides scalable computing resources over the internet.': -0.04877402795301714,\n",
|
||||
" 'Big data analytics helps in understanding large datasets.': 0.016582168576929422,\n",
|
||||
" 'India has a diverse cultural heritage.': 0.7408992963028144}"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Compute Similarity\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"scores = np.array(document_embeddings) @ np.array(query_embedding).T\n",
|
||||
"dict(zip(documents, scores))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "check10",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.14"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -22,7 +22,7 @@
|
||||
"`InfobipAPIWrapper` uses name parameters where you can provide credentials:\n",
|
||||
"\n",
|
||||
"- `infobip_api_key` - [API Key](https://www.infobip.com/docs/essentials/api-authentication#api-key-header) that you can find in your [developer tools](https://portal.infobip.com/dev/api-keys)\n",
|
||||
"- `infobip_base_url` - [Base url](https://www.infobip.com/docs/essentials/base-url) for Infobip API. You can use the default value `https://api.infobip.com/`.\n",
|
||||
"- `infobip_base_url` - [Base url](https://www.infobip.com/docs/essentials/base-url) for Infobip API. You can use default value `https://api.infobip.com/`.\n",
|
||||
"\n",
|
||||
"You can also provide `infobip_api_key` and `infobip_base_url` as environment variables `INFOBIP_API_KEY` and `INFOBIP_BASE_URL`."
|
||||
]
|
||||
@@ -60,7 +60,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sending an Email"
|
||||
"## Sending a Email"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,183 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7d143c73",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Riza Code Interpreter\n",
|
||||
"\n",
|
||||
"> The Riza Code Interpreter is a WASM-based isolated environment for running Python or JavaScript generated by AI agents.\n",
|
||||
"\n",
|
||||
"In this notebook we'll create an example of an agent that uses Python to solve a problem that an LLM can't solve on its own:\n",
|
||||
"counting the number of 'r's in the word \"strawberry.\"\n",
|
||||
"\n",
|
||||
"Before you get started grab an API key from the [Riza dashboard](https://dashboard.riza.io). For more guides and a full API reference\n",
|
||||
"head over to the [Riza Code Interpreter API documentation](https://docs.riza.io)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "894aa87a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Make sure you have the necessary dependencies installed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8265cf7f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-community rizaio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e085eb51",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set up your API keys as an environment variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "45ba8936",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%env ANTHROPIC_API_KEY=<your_anthropic_api_key_here>\n",
|
||||
"%env RIZA_API_KEY=<your_riza_api_key_here>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "efe26fd9-6e33-4f5f-b49b-ea74fa6c4915",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools.riza.command import ExecPython"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "cd5b952e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7bd0b610",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Initialize the `ExecPython` tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "32f1543f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [ExecPython()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "24f952d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Initialize an agent using Anthropic's Claude Haiku model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "71831ea8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatAnthropic(model=\"claude-3-haiku-20240307\", temperature=0)\n",
|
||||
"\n",
|
||||
"prompt_template = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Make sure to use a tool if you need to solve a problem.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent = create_tool_calling_agent(llm, tools, prompt_template)\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"id": "36b24036",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `riza_exec_python` with `{'code': 'word = \"strawberry\"\\nprint(word.count(\"r\"))'}`\n",
|
||||
"responded: [{'id': 'toolu_01JwPLAAqqCNCjVuEnK8Fgut', 'input': {}, 'name': 'riza_exec_python', 'type': 'tool_use', 'index': 0, 'partial_json': '{\"code\": \"word = \\\\\"strawberry\\\\\"\\\\nprint(word.count(\\\\\"r\\\\\"))\"}'}]\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m3\n",
|
||||
"\u001b[0m\u001b[32;1m\u001b[1;3m[{'text': '\\n\\nThe word \"strawberry\" contains 3 \"r\" characters.', 'type': 'text', 'index': 0}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The word \"strawberry\" contains 3 \"r\" characters.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Ask a tough question\n",
|
||||
"result = agent_executor.invoke({\"input\": \"how many rs are in strawberry?\"})\n",
|
||||
"print(result[\"output\"][0][\"text\"])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -11,7 +11,7 @@
|
||||
"source": [
|
||||
"# SQL Database\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"::: {.callout-note}\n",
|
||||
"The `SQLDatabase` adapter utility is a wrapper around a database connection.\n",
|
||||
"\n",
|
||||
"For talking to SQL databases, it uses the [SQLAlchemy] Core API .\n",
|
||||
@@ -405,7 +405,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -102,8 +102,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.invoke(\n",
|
||||
" \"What happened today with Microsoft stocks?\",\n",
|
||||
"agent_chain.run(\n",
|
||||
" \"What happens today with Microsoft stocks?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -147,7 +147,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.invoke(\n",
|
||||
"agent_chain.run(\n",
|
||||
" \"How does Microsoft feels today comparing with Nvidia?\",\n",
|
||||
")"
|
||||
]
|
||||
@@ -188,7 +188,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool.invoke(\"NVDA\")"
|
||||
"tool.run(\"NVDA\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -210,7 +210,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"res = tool.invoke(\"AAPL\")\n",
|
||||
"res = tool.run(\"AAPL\")\n",
|
||||
"print(res)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool.run(\"lex fridman\")"
|
||||
"tool.run(\"lex friedman\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -150,13 +150,7 @@ hide_table_of_contents: true
|
||||
|
||||
## Advanced features
|
||||
|
||||
The following table shows all the chat model classes that support one or more advanced features.
|
||||
|
||||
:::info
|
||||
While all these LangChain classes support the indicated advanced feature, you may have
|
||||
to open the provider-specific documentation to learn which hosted models or backends support
|
||||
the feature.
|
||||
:::
|
||||
The following table shows all the chat models that support one or more advanced features.
|
||||
|
||||
{table}
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ license = "MIT"
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/__package_name_short__"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22__package_name_short__%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
|
||||
@@ -9,7 +9,6 @@ license = "MIT"
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/cli"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-cli%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
|
||||
@@ -16,7 +16,6 @@ cloudpickle>=2.0.0
|
||||
cohere>=4,<6
|
||||
databricks-vectorsearch>=0.21,<0.22
|
||||
datasets>=2.15.0,<3
|
||||
dedoc>=2.2.6,<3
|
||||
dgml-utils>=0.3.0,<0.4
|
||||
elasticsearch>=8.12.0,<9
|
||||
esprima>=4.0.1,<5
|
||||
@@ -26,7 +25,6 @@ fireworks-ai>=0.9.0,<0.10
|
||||
friendli-client>=1.2.4,<2
|
||||
geopandas>=0.13.1
|
||||
gitpython>=3.1.32,<4
|
||||
gliner>=0.2.7
|
||||
google-cloud-documentai>=2.20.1,<3
|
||||
gql>=3.4.1,<4
|
||||
gradientai>=1.4.0,<2
|
||||
@@ -39,7 +37,6 @@ javelin-sdk>=0.1.8,<0.2
|
||||
jinja2>=3,<4
|
||||
jq>=1.4.1,<2
|
||||
jsonschema>1
|
||||
keybert>=0.8.5
|
||||
lxml>=4.9.3,<6.0
|
||||
markdownify>=0.11.6,<0.12
|
||||
motor>=3.3.1,<4
|
||||
@@ -63,7 +60,7 @@ psychicapi>=0.8.0,<0.9
|
||||
py-trello>=0.19.0,<0.20
|
||||
pyjwt>=2.8.0,<3
|
||||
pymupdf>=1.22.3,<2
|
||||
pypdf>=3.4.0,<5
|
||||
pypdf>=3.4.0,<4
|
||||
pypdfium2>=4.10.0,<5
|
||||
pyspark>=3.4.0,<4
|
||||
rank-bm25>=0.2.2,<0.3
|
||||
|
||||
@@ -292,21 +292,17 @@ def _create_api_controller_agent(
|
||||
)
|
||||
if "DELETE" in allowed_operations:
|
||||
delete_llm_chain = LLMChain(llm=llm, prompt=PARSING_DELETE_PROMPT)
|
||||
tools.append(
|
||||
RequestsDeleteToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=delete_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
RequestsDeleteToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=delete_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
if "PATCH" in allowed_operations:
|
||||
patch_llm_chain = LLMChain(llm=llm, prompt=PARSING_PATCH_PROMPT)
|
||||
tools.append(
|
||||
RequestsPatchToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=patch_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
RequestsPatchToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=patch_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
if not tools:
|
||||
raise ValueError("Tools not found")
|
||||
|
||||
@@ -25,7 +25,6 @@ from langchain_core.prompts.chat import (
|
||||
from langchain_community.agent_toolkits.sql.prompt import (
|
||||
SQL_FUNCTIONS_SUFFIX,
|
||||
SQL_PREFIX,
|
||||
SQL_SUFFIX,
|
||||
)
|
||||
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
|
||||
from langchain_community.tools.sql_database.tool import (
|
||||
@@ -141,9 +140,8 @@ def create_sql_agent(
|
||||
toolkit = toolkit or SQLDatabaseToolkit(llm=llm, db=db) # type: ignore[arg-type]
|
||||
agent_type = agent_type or AgentType.ZERO_SHOT_REACT_DESCRIPTION
|
||||
tools = toolkit.get_tools() + list(extra_tools)
|
||||
if prefix is None:
|
||||
prefix = SQL_PREFIX
|
||||
if prompt is None:
|
||||
prefix = prefix or SQL_PREFIX
|
||||
prefix = prefix.format(dialect=toolkit.dialect, top_k=top_k)
|
||||
else:
|
||||
if "top_k" in prompt.input_variables:
|
||||
@@ -172,10 +170,10 @@ def create_sql_agent(
|
||||
)
|
||||
template = "\n\n".join(
|
||||
[
|
||||
prefix,
|
||||
react_prompt.PREFIX,
|
||||
"{tools}",
|
||||
format_instructions,
|
||||
suffix or SQL_SUFFIX,
|
||||
react_prompt.SUFFIX,
|
||||
]
|
||||
)
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
|
||||
@@ -8,12 +8,6 @@ from langchain_core.messages import AIMessage
|
||||
from langchain_core.outputs import ChatGeneration, LLMResult
|
||||
|
||||
MODEL_COST_PER_1K_TOKENS = {
|
||||
# GPT-4o-mini input
|
||||
"gpt-4o-mini": 0.00015,
|
||||
"gpt-4o-mini-2024-07-18": 0.00015,
|
||||
# GPT-4o-mini output
|
||||
"gpt-4o-mini-completion": 0.0006,
|
||||
"gpt-4o-mini-2024-07-18-completion": 0.0006,
|
||||
# GPT-4o input
|
||||
"gpt-4o": 0.005,
|
||||
"gpt-4o-2024-05-13": 0.005,
|
||||
|
||||
@@ -24,7 +24,6 @@ from langchain_core.output_parsers import (
|
||||
from langchain_core.prompts import BasePromptTemplate
|
||||
from langchain_core.pydantic_v1 import BaseModel
|
||||
from langchain_core.runnables import Runnable
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
|
||||
from langchain_community.output_parsers.ernie_functions import (
|
||||
JsonOutputFunctionsParser,
|
||||
@@ -95,7 +94,7 @@ def _get_python_function_arguments(function: Callable, arg_descriptions: dict) -
|
||||
for arg, arg_type in annotations.items():
|
||||
if arg == "return":
|
||||
continue
|
||||
if isinstance(arg_type, type) and is_basemodel_subclass(arg_type):
|
||||
if isinstance(arg_type, type) and issubclass(arg_type, BaseModel):
|
||||
# Mypy error:
|
||||
# "type" has no attribute "schema"
|
||||
properties[arg] = arg_type.schema() # type: ignore[attr-defined]
|
||||
@@ -157,7 +156,7 @@ def convert_to_ernie_function(
|
||||
"""
|
||||
if isinstance(function, dict):
|
||||
return function
|
||||
elif isinstance(function, type) and is_basemodel_subclass(function):
|
||||
elif isinstance(function, type) and issubclass(function, BaseModel):
|
||||
return cast(Dict, convert_pydantic_to_ernie_function(function))
|
||||
elif callable(function):
|
||||
return convert_python_function_to_ernie_function(function)
|
||||
@@ -186,7 +185,7 @@ def get_ernie_output_parser(
|
||||
only the function arguments and not the function name.
|
||||
"""
|
||||
function_names = [convert_to_ernie_function(f)["name"] for f in functions]
|
||||
if isinstance(functions[0], type) and is_basemodel_subclass(functions[0]):
|
||||
if isinstance(functions[0], type) and issubclass(functions[0], BaseModel):
|
||||
if len(functions) > 1:
|
||||
pydantic_schema: Union[Dict, Type[BaseModel]] = {
|
||||
name: fn for name, fn in zip(function_names, functions)
|
||||
|
||||
@@ -311,15 +311,12 @@ class GraphCypherQAChain(Chain):
|
||||
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
||||
callbacks = _run_manager.get_child()
|
||||
question = inputs[self.input_key]
|
||||
args = {
|
||||
"question": question,
|
||||
"schema": self.graph_schema,
|
||||
}
|
||||
args.update(inputs)
|
||||
|
||||
intermediate_steps: List = []
|
||||
|
||||
generated_cypher = self.cypher_generation_chain.run(args, callbacks=callbacks)
|
||||
generated_cypher = self.cypher_generation_chain.run(
|
||||
{"question": question, "schema": self.graph_schema}, callbacks=callbacks
|
||||
)
|
||||
|
||||
# Extract Cypher code if it is wrapped in backticks
|
||||
generated_cypher = extract_cypher(generated_cypher)
|
||||
|
||||
@@ -124,11 +124,6 @@ class PebbloRetrievalQA(Chain):
|
||||
),
|
||||
"doc": doc.page_content,
|
||||
"vector_db": self.retriever.vectorstore.__class__.__name__,
|
||||
**(
|
||||
{"pb_checksum": doc.metadata.get("pb_checksum")}
|
||||
if doc.metadata.get("pb_checksum")
|
||||
else {}
|
||||
),
|
||||
}
|
||||
for doc in docs
|
||||
if isinstance(doc, Document)
|
||||
@@ -462,24 +457,25 @@ class PebbloRetrievalQA(Chain):
|
||||
if self.api_key:
|
||||
if self.classifier_location == "local":
|
||||
if pebblo_resp:
|
||||
resp = json.loads(pebblo_resp.text)
|
||||
if resp:
|
||||
payload["response"].update(
|
||||
resp.get("retrieval_data", {}).get("response", {})
|
||||
)
|
||||
payload["response"].pop("data")
|
||||
payload["prompt"].update(
|
||||
resp.get("retrieval_data", {}).get("prompt", {})
|
||||
)
|
||||
payload["prompt"].pop("data")
|
||||
context = payload["context"]
|
||||
for context_data in context:
|
||||
context_data.pop("doc")
|
||||
payload["context"] = context
|
||||
payload["response"] = (
|
||||
json.loads(pebblo_resp.text)
|
||||
.get("retrieval_data", {})
|
||||
.get("response", {})
|
||||
)
|
||||
payload["context"] = (
|
||||
json.loads(pebblo_resp.text)
|
||||
.get("retrieval_data", {})
|
||||
.get("context", [])
|
||||
)
|
||||
payload["prompt"] = (
|
||||
json.loads(pebblo_resp.text)
|
||||
.get("retrieval_data", {})
|
||||
.get("prompt", {})
|
||||
)
|
||||
else:
|
||||
payload["response"] = {}
|
||||
payload["prompt"] = {}
|
||||
payload["context"] = []
|
||||
payload["response"] = None
|
||||
payload["context"] = None
|
||||
payload["prompt"] = None
|
||||
headers.update({"x-api-key": self.api_key})
|
||||
pebblo_cloud_url = f"{PEBBLO_CLOUD_URL}{PROMPT_URL}"
|
||||
try:
|
||||
|
||||
@@ -129,7 +129,6 @@ class Context(BaseModel):
|
||||
retrieved_from: Optional[str]
|
||||
doc: Optional[str]
|
||||
vector_db: str
|
||||
pb_checksum: Optional[str]
|
||||
|
||||
|
||||
class Prompt(BaseModel):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
from typing import List
|
||||
|
||||
from langchain_core.chat_history import (
|
||||
BaseChatMessageHistory,
|
||||
@@ -11,33 +11,21 @@ from langchain_core.messages import BaseMessage, messages_from_dict, messages_to
|
||||
class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
"""Chat message history that stores history in a local file."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_path: str,
|
||||
*,
|
||||
encoding: Optional[str] = None,
|
||||
ensure_ascii: bool = True,
|
||||
) -> None:
|
||||
def __init__(self, file_path: str) -> None:
|
||||
"""Initialize the file path for the chat history.
|
||||
|
||||
Args:
|
||||
file_path: The path to the local file to store the chat history.
|
||||
encoding: The encoding to use for file operations. Defaults to None.
|
||||
ensure_ascii: If True, escape non-ASCII in JSON. Defaults to True.
|
||||
"""
|
||||
self.file_path = Path(file_path)
|
||||
self.encoding = encoding
|
||||
self.ensure_ascii = ensure_ascii
|
||||
|
||||
if not self.file_path.exists():
|
||||
self.file_path.touch()
|
||||
self.file_path.write_text(
|
||||
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
|
||||
)
|
||||
self.file_path.write_text(json.dumps([]))
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
"""Retrieve the messages from the local file"""
|
||||
items = json.loads(self.file_path.read_text(encoding=self.encoding))
|
||||
items = json.loads(self.file_path.read_text())
|
||||
messages = messages_from_dict(items)
|
||||
return messages
|
||||
|
||||
@@ -45,12 +33,8 @@ class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
"""Append the message to the record in the local file"""
|
||||
messages = messages_to_dict(self.messages)
|
||||
messages.append(messages_to_dict([message])[0])
|
||||
self.file_path.write_text(
|
||||
json.dumps(messages, ensure_ascii=self.ensure_ascii), encoding=self.encoding
|
||||
)
|
||||
self.file_path.write_text(json.dumps(messages))
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear session memory from the local file"""
|
||||
self.file_path.write_text(
|
||||
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
|
||||
)
|
||||
self.file_path.write_text(json.dumps([]))
|
||||
|
||||
@@ -15,43 +15,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RedisChatMessageHistory(BaseChatMessageHistory):
|
||||
"""Chat message history stored in a Redis database.
|
||||
|
||||
Setup:
|
||||
Install ``redis`` python package.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install redis
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.chat_message_histories import RedisChatMessageHistory
|
||||
|
||||
history = RedisChatMessageHistory(
|
||||
session_id = "your-session-id",
|
||||
url="redis://your-host:your-port:your-database", # redis://localhost:6379/0
|
||||
)
|
||||
|
||||
Add and retrieve messages:
|
||||
.. code-block:: python
|
||||
|
||||
# Add single message
|
||||
history.add_message(message)
|
||||
|
||||
# Add batch messages
|
||||
history.add_messages([message1, message2, message3, ...])
|
||||
|
||||
# Add human message
|
||||
history.add_user_message(human_message)
|
||||
|
||||
# Add ai message
|
||||
history.add_ai_message(ai_message)
|
||||
|
||||
# Retrieve messages
|
||||
messages = history.messages
|
||||
""" # noqa: E501
|
||||
"""Chat message history stored in a Redis database."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -60,18 +24,6 @@ class RedisChatMessageHistory(BaseChatMessageHistory):
|
||||
key_prefix: str = "message_store:",
|
||||
ttl: Optional[int] = None,
|
||||
):
|
||||
"""Initialize with a RedisChatMessageHistory instance.
|
||||
|
||||
Args:
|
||||
session_id: str
|
||||
The ID for single chat session. Used to form keys with `key_prefix`.
|
||||
url: Optional[str]
|
||||
String parameter configuration for connecting to the redis.
|
||||
key_prefix: Optional[str]
|
||||
The prefix of the key, combined with `session id` to form the key.
|
||||
ttl: Optional[int]
|
||||
Set the expiration time of `key`, the unit is seconds.
|
||||
"""
|
||||
try:
|
||||
import redis
|
||||
except ImportError:
|
||||
|
||||
@@ -32,6 +32,7 @@ from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.ext.asyncio import (
|
||||
AsyncEngine,
|
||||
AsyncSession,
|
||||
async_sessionmaker,
|
||||
create_async_engine,
|
||||
)
|
||||
from sqlalchemy.orm import (
|
||||
@@ -43,12 +44,6 @@ from sqlalchemy.orm import (
|
||||
sessionmaker,
|
||||
)
|
||||
|
||||
try:
|
||||
from sqlalchemy.ext.asyncio import async_sessionmaker
|
||||
except ImportError:
|
||||
# dummy for sqlalchemy < 2
|
||||
async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
@@ -106,145 +106,10 @@ async def aconnect_httpx_sse(
|
||||
|
||||
|
||||
class ChatBaichuan(BaseChatModel):
|
||||
"""Baichuan chat model integration.
|
||||
"""Baichuan chat models API by Baichuan Intelligent Technology.
|
||||
|
||||
Setup:
|
||||
To use, you should have the environment variable``BAICHUAN_API_KEY`` set with
|
||||
your API KEY.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
export BAICHUAN_API_KEY="your-api-key"
|
||||
|
||||
Key init args — completion params:
|
||||
model: Optional[str]
|
||||
Name of Baichuan model to use.
|
||||
max_tokens: Optional[int]
|
||||
Max number of tokens to generate.
|
||||
streaming: Optional[bool]
|
||||
Whether to stream the results or not.
|
||||
temperature: Optional[float]
|
||||
Sampling temperature.
|
||||
top_p: Optional[float]
|
||||
What probability mass to use.
|
||||
top_k: Optional[int]
|
||||
What search sampling control to use.
|
||||
|
||||
Key init args — client params:
|
||||
api_key: Optional[str]
|
||||
MiniMax API key. If not passed in will be read from env var BAICHUAN_API_KEY.
|
||||
base_url: Optional[str]
|
||||
Base URL for API requests.
|
||||
|
||||
See full list of supported init args and their descriptions in the params section.
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.chat_models import ChatBaichuan
|
||||
|
||||
chat = ChatBaichuan(
|
||||
api_key=api_key,
|
||||
model='Baichuan4',
|
||||
# temperature=...,
|
||||
# other params...
|
||||
)
|
||||
|
||||
Invoke:
|
||||
.. code-block:: python
|
||||
|
||||
messages = [
|
||||
("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"),
|
||||
("human", "我喜欢编程。"),
|
||||
]
|
||||
chat.invoke(messages)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
AIMessage(
|
||||
content='I enjoy programming.',
|
||||
response_metadata={
|
||||
'token_usage': {
|
||||
'prompt_tokens': 93,
|
||||
'completion_tokens': 5,
|
||||
'total_tokens': 98
|
||||
},
|
||||
'model': 'Baichuan4'
|
||||
},
|
||||
id='run-944ff552-6a93-44cf-a861-4e4d849746f9-0'
|
||||
)
|
||||
|
||||
Stream:
|
||||
.. code-block:: python
|
||||
|
||||
for chunk in chat.stream(messages):
|
||||
print(chunk)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
content='I' id='run-f99fcd6f-dd31-46d5-be8f-0b6a22bf77d8'
|
||||
content=' enjoy programming.' id='run-f99fcd6f-dd31-46d5-be8f-0b6a22bf77d8
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
stream = chat.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
full
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
AIMessageChunk(
|
||||
content='I like programming.',
|
||||
id='run-74689970-dc31-461d-b729-3b6aa93508d2'
|
||||
)
|
||||
|
||||
Async:
|
||||
.. code-block:: python
|
||||
|
||||
await chat.ainvoke(messages)
|
||||
|
||||
# stream
|
||||
# async for chunk in chat.astream(messages):
|
||||
# print(chunk)
|
||||
|
||||
# batch
|
||||
# await chat.abatch([messages])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
AIMessage(
|
||||
content='I enjoy programming.',
|
||||
response_metadata={
|
||||
'token_usage': {
|
||||
'prompt_tokens': 93,
|
||||
'completion_tokens': 5,
|
||||
'total_tokens': 98
|
||||
},
|
||||
'model': 'Baichuan4'
|
||||
},
|
||||
id='run-952509ed-9154-4ff9-b187-e616d7ddfbba-0'
|
||||
)
|
||||
|
||||
Response metadata
|
||||
.. code-block:: python
|
||||
|
||||
ai_msg = chat.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
'token_usage': {
|
||||
'prompt_tokens': 93,
|
||||
'completion_tokens': 5,
|
||||
'total_tokens': 98
|
||||
},
|
||||
'model': 'Baichuan4'
|
||||
}
|
||||
|
||||
""" # noqa: E501
|
||||
For more information, see https://platform.baichuan-ai.com/docs/api
|
||||
"""
|
||||
|
||||
@property
|
||||
def lc_secrets(self) -> Dict[str, str]:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from operator import itemgetter
|
||||
@@ -40,17 +39,11 @@ from langchain_core.output_parsers.openai_tools import (
|
||||
PydanticToolsParser,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
||||
from langchain_core.pydantic_v1 import (
|
||||
BaseModel,
|
||||
Field,
|
||||
SecretStr,
|
||||
root_validator,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
|
||||
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -72,7 +65,7 @@ def convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
elif isinstance(message, (FunctionMessage, ToolMessage)):
|
||||
message_dict = {
|
||||
"role": "function",
|
||||
"content": _create_tool_content(message.content),
|
||||
"content": message.content,
|
||||
"name": message.name or message.additional_kwargs.get("name"),
|
||||
}
|
||||
else:
|
||||
@@ -81,20 +74,6 @@ def convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
return message_dict
|
||||
|
||||
|
||||
def _create_tool_content(content: Union[str, List[Union[str, Dict[Any, Any]]]]) -> str:
|
||||
"""Convert tool content to dict scheme."""
|
||||
if isinstance(content, str):
|
||||
try:
|
||||
if isinstance(json.loads(content), dict):
|
||||
return content
|
||||
else:
|
||||
return json.dumps({"tool_result": content})
|
||||
except json.JSONDecodeError:
|
||||
return json.dumps({"tool_result": content})
|
||||
else:
|
||||
return json.dumps({"tool_result": content})
|
||||
|
||||
|
||||
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
|
||||
content = _dict.get("result", "") or ""
|
||||
additional_kwargs: Mapping[str, Any] = {}
|
||||
@@ -130,9 +109,9 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
|
||||
content=content,
|
||||
additional_kwargs=msg_additional_kwargs,
|
||||
usage_metadata=UsageMetadata(
|
||||
input_tokens=usage.get("prompt_tokens", 0),
|
||||
output_tokens=usage.get("completion_tokens", 0),
|
||||
total_tokens=usage.get("total_tokens", 0),
|
||||
input_tokens=usage.prompt_tokens,
|
||||
output_tokens=usage.completion_tokens,
|
||||
total_tokens=usage.total_tokens,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -361,13 +340,13 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
In the case of other model, passing these params will not affect the result.
|
||||
"""
|
||||
|
||||
model: str = "ERNIE-Lite-8K"
|
||||
model: str = "ERNIE-Bot-turbo"
|
||||
"""Model name.
|
||||
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
|
||||
|
||||
preset models are mapping to an endpoint.
|
||||
`model` will be ignored if `endpoint` is set.
|
||||
Default is ERNIE-Lite-8K.
|
||||
Default is ERNIE-Bot-turbo.
|
||||
"""
|
||||
|
||||
endpoint: Optional[str] = None
|
||||
@@ -775,7 +754,7 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
""" # noqa: E501
|
||||
if kwargs:
|
||||
raise ValueError(f"Received unsupported arguments {kwargs}")
|
||||
is_pydantic_schema = isinstance(schema, type) and is_basemodel_subclass(schema)
|
||||
is_pydantic_schema = isinstance(schema, type) and issubclass(schema, BaseModel)
|
||||
llm = self.bind_tools([schema])
|
||||
if is_pydantic_schema:
|
||||
output_parser: OutputParserLike = PydanticToolsParser(
|
||||
|
||||
@@ -57,7 +57,6 @@ from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
|
||||
from langchain_community.utilities.requests import Requests
|
||||
|
||||
@@ -444,7 +443,7 @@ class ChatEdenAI(BaseChatModel):
|
||||
if kwargs:
|
||||
raise ValueError(f"Received unsupported arguments {kwargs}")
|
||||
llm = self.bind_tools([schema], tool_choice="required")
|
||||
if isinstance(schema, type) and is_basemodel_subclass(schema):
|
||||
if isinstance(schema, type) and issubclass(schema, BaseModel):
|
||||
output_parser: OutputParserLike = PydanticToolsParser(
|
||||
tools=[schema], first_tool_only=True
|
||||
)
|
||||
|
||||
@@ -8,7 +8,7 @@ from typing import TYPE_CHECKING, Dict, Optional, Set
|
||||
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.pydantic_v1 import Field, root_validator
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
from langchain_community.adapters.openai import convert_message_to_dict
|
||||
from langchain_community.chat_models.openai import (
|
||||
@@ -79,12 +79,10 @@ class ChatEverlyAI(ChatOpenAI):
|
||||
@root_validator(pre=True)
|
||||
def validate_environment_override(cls, values: dict) -> dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
values["openai_api_key"] = convert_to_secret_str(
|
||||
get_from_dict_or_env(
|
||||
values,
|
||||
"everlyai_api_key",
|
||||
"EVERLYAI_API_KEY",
|
||||
)
|
||||
values["openai_api_key"] = get_from_dict_or_env(
|
||||
values,
|
||||
"everlyai_api_key",
|
||||
"EVERLYAI_API_KEY",
|
||||
)
|
||||
values["openai_api_base"] = DEFAULT_API_BASE
|
||||
|
||||
|
||||
@@ -46,15 +46,10 @@ from langchain_core.output_parsers.openai_tools import (
|
||||
parse_tool_call,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
||||
from langchain_core.pydantic_v1 import (
|
||||
BaseModel,
|
||||
Field,
|
||||
root_validator,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
|
||||
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
|
||||
|
||||
class ChatLlamaCpp(BaseChatModel):
|
||||
@@ -530,7 +525,7 @@ class ChatLlamaCpp(BaseChatModel):
|
||||
|
||||
if kwargs:
|
||||
raise ValueError(f"Received unsupported arguments {kwargs}")
|
||||
is_pydantic_schema = isinstance(schema, type) and is_basemodel_subclass(schema)
|
||||
is_pydantic_schema = isinstance(schema, type) and issubclass(schema, BaseModel)
|
||||
if schema is None:
|
||||
raise ValueError(
|
||||
"schema must be specified when method is 'function_calling'. "
|
||||
|
||||
@@ -92,116 +92,34 @@ def _convert_delta_to_message_chunk(
|
||||
|
||||
|
||||
class ChatSparkLLM(BaseChatModel):
|
||||
"""IFlyTek Spark chat model integration.
|
||||
"""iFlyTek Spark large language model.
|
||||
|
||||
Setup:
|
||||
To use, you should have the environment variable``IFLYTEK_SPARK_API_KEY``,
|
||||
``IFLYTEK_SPARK_API_SECRET`` and ``IFLYTEK_SPARK_APP_ID``.
|
||||
To use, you should pass `app_id`, `api_key`, `api_secret`
|
||||
as a named parameter to the constructor OR set environment
|
||||
variables ``IFLYTEK_SPARK_APP_ID``, ``IFLYTEK_SPARK_API_KEY`` and
|
||||
``IFLYTEK_SPARK_API_SECRET``
|
||||
|
||||
Key init args — completion params:
|
||||
model: Optional[str]
|
||||
Name of IFLYTEK SPARK model to use.
|
||||
temperature: Optional[float]
|
||||
Sampling temperature.
|
||||
top_k: Optional[float]
|
||||
What search sampling control to use.
|
||||
streaming: Optional[bool]
|
||||
Whether to stream the results or not.
|
||||
|
||||
Key init args — client params:
|
||||
api_key: Optional[str]
|
||||
IFLYTEK SPARK API KEY. If not passed in will be read from env var IFLYTEK_SPARK_API_KEY.
|
||||
api_secret: Optional[str]
|
||||
IFLYTEK SPARK API SECRET. If not passed in will be read from env var IFLYTEK_SPARK_API_SECRET.
|
||||
api_url: Optional[str]
|
||||
Base URL for API requests.
|
||||
timeout: Optional[int]
|
||||
Timeout for requests.
|
||||
|
||||
See full list of supported init args and their descriptions in the params section.
|
||||
|
||||
Instantiate:
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.chat_models import ChatSparkLLM
|
||||
client = ChatSparkLLM(
|
||||
spark_app_id="<app_id>",
|
||||
spark_api_key="<api_key>",
|
||||
spark_api_secret="<api_secret>"
|
||||
)
|
||||
|
||||
chat = MiniMaxChat(
|
||||
api_key=api_key,
|
||||
api_secret=ak,
|
||||
model='Spark4.0 Ultra',
|
||||
# temperature=...,
|
||||
# other params...
|
||||
)
|
||||
|
||||
Invoke:
|
||||
.. code-block:: python
|
||||
|
||||
messages = [
|
||||
("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"),
|
||||
("human", "我喜欢编程。"),
|
||||
]
|
||||
chat.invoke(messages)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
AIMessage(
|
||||
content='I like programming.',
|
||||
response_metadata={
|
||||
'token_usage': {
|
||||
'question_tokens': 3,
|
||||
'prompt_tokens': 16,
|
||||
'completion_tokens': 4,
|
||||
'total_tokens': 20
|
||||
}
|
||||
},
|
||||
id='run-af8b3531-7bf7-47f0-bfe8-9262cb2a9d47-0'
|
||||
)
|
||||
|
||||
Stream:
|
||||
.. code-block:: python
|
||||
|
||||
for chunk in chat.stream(messages):
|
||||
print(chunk)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
content='I' id='run-fdbb57c2-2d32-4516-b894-6c5a67605d83'
|
||||
content=' like programming' id='run-fdbb57c2-2d32-4516-b894-6c5a67605d83'
|
||||
content='.' id='run-fdbb57c2-2d32-4516-b894-6c5a67605d83'
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
stream = chat.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
full
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
AIMessageChunk(
|
||||
content='I like programming.',
|
||||
id='run-aca2fa82-c2e4-4835-b7e2-865ddd3c46cb'
|
||||
)
|
||||
|
||||
Response metadata
|
||||
.. code-block:: python
|
||||
|
||||
ai_msg = chat.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
'token_usage': {
|
||||
'question_tokens': 3,
|
||||
'prompt_tokens': 16,
|
||||
'completion_tokens': 4,
|
||||
'total_tokens': 20
|
||||
}
|
||||
}
|
||||
|
||||
""" # noqa: E501
|
||||
Extra infos:
|
||||
1. Get app_id, api_key, api_secret from the iFlyTek Open Platform Console:
|
||||
https://console.xfyun.cn/services/bm35
|
||||
2. By default, iFlyTek Spark LLM V3.5 is invoked.
|
||||
If you need to invoke other versions, please configure the corresponding
|
||||
parameters(spark_api_url and spark_llm_domain) according to the document:
|
||||
https://www.xfyun.cn/doc/spark/Web.html
|
||||
3. It is necessary to ensure that the app_id used has a license for
|
||||
the corresponding model version.
|
||||
4. If you encounter problems during use, try getting help at:
|
||||
https://console.xfyun.cn/workorder/commit
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
@@ -339,7 +257,7 @@ class ChatSparkLLM(BaseChatModel):
|
||||
[_convert_message_to_dict(m) for m in messages],
|
||||
self.spark_user_id,
|
||||
self.model_kwargs,
|
||||
streaming=True,
|
||||
self.streaming,
|
||||
)
|
||||
for content in self.client.subscribe(timeout=self.request_timeout):
|
||||
if "data" not in content:
|
||||
@@ -356,10 +274,9 @@ class ChatSparkLLM(BaseChatModel):
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
stream: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
if stream or self.streaming:
|
||||
if self.streaming:
|
||||
stream_iter = self._stream(
|
||||
messages=messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
|
||||
@@ -53,16 +53,11 @@ from langchain_core.outputs import (
|
||||
ChatGenerationChunk,
|
||||
ChatResult,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import (
|
||||
BaseModel,
|
||||
Field,
|
||||
SecretStr,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr
|
||||
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
from requests.exceptions import HTTPError
|
||||
from tenacity import (
|
||||
before_sleep_log,
|
||||
@@ -870,7 +865,7 @@ class ChatTongyi(BaseChatModel):
|
||||
"""
|
||||
if kwargs:
|
||||
raise ValueError(f"Received unsupported arguments {kwargs}")
|
||||
is_pydantic_schema = isinstance(schema, type) and is_basemodel_subclass(schema)
|
||||
is_pydantic_schema = isinstance(schema, type) and issubclass(schema, BaseModel)
|
||||
llm = self.bind_tools([schema])
|
||||
if is_pydantic_schema:
|
||||
output_parser: OutputParserLike = PydanticToolsParser(
|
||||
|
||||
@@ -142,10 +142,6 @@ if TYPE_CHECKING:
|
||||
from langchain_community.document_loaders.dataframe import (
|
||||
DataFrameLoader,
|
||||
)
|
||||
from langchain_community.document_loaders.dedoc import (
|
||||
DedocAPIFileLoader,
|
||||
DedocFileLoader,
|
||||
)
|
||||
from langchain_community.document_loaders.diffbot import (
|
||||
DiffbotLoader,
|
||||
)
|
||||
@@ -344,7 +340,6 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from langchain_community.document_loaders.pdf import (
|
||||
AmazonTextractPDFLoader,
|
||||
DedocPDFLoader,
|
||||
MathpixPDFLoader,
|
||||
OnlinePDFLoader,
|
||||
PagedPDFSplitter,
|
||||
@@ -575,9 +570,6 @@ _module_lookup = {
|
||||
"CubeSemanticLoader": "langchain_community.document_loaders.cube_semantic",
|
||||
"DataFrameLoader": "langchain_community.document_loaders.dataframe",
|
||||
"DatadogLogsLoader": "langchain_community.document_loaders.datadog_logs",
|
||||
"DedocAPIFileLoader": "langchain_community.document_loaders.dedoc",
|
||||
"DedocFileLoader": "langchain_community.document_loaders.dedoc",
|
||||
"DedocPDFLoader": "langchain_community.document_loaders.pdf",
|
||||
"DiffbotLoader": "langchain_community.document_loaders.diffbot",
|
||||
"DirectoryLoader": "langchain_community.document_loaders.directory",
|
||||
"DiscordChatLoader": "langchain_community.document_loaders.discord",
|
||||
@@ -779,9 +771,6 @@ __all__ = [
|
||||
"CubeSemanticLoader",
|
||||
"DataFrameLoader",
|
||||
"DatadogLogsLoader",
|
||||
"DedocAPIFileLoader",
|
||||
"DedocFileLoader",
|
||||
"DedocPDFLoader",
|
||||
"DiffbotLoader",
|
||||
"DirectoryLoader",
|
||||
"DiscordChatLoader",
|
||||
|
||||
@@ -41,7 +41,6 @@ class BlackboardLoader(WebBaseLoader):
|
||||
basic_auth: Optional[Tuple[str, str]] = None,
|
||||
cookies: Optional[dict] = None,
|
||||
continue_on_failure: bool = False,
|
||||
show_progress: bool = True,
|
||||
):
|
||||
"""Initialize with blackboard course url.
|
||||
|
||||
@@ -57,15 +56,12 @@ class BlackboardLoader(WebBaseLoader):
|
||||
occurs loading a url, emitting a warning instead of raising an
|
||||
exception. Setting this to True makes the loader more robust, but also
|
||||
may result in missing data. Default: False
|
||||
show_progress: whether to show a progress bar while loading. Default: True
|
||||
|
||||
Raises:
|
||||
ValueError: If blackboard course url is invalid.
|
||||
"""
|
||||
super().__init__(
|
||||
web_paths=(blackboard_course_url),
|
||||
continue_on_failure=continue_on_failure,
|
||||
show_progress=show_progress,
|
||||
web_paths=(blackboard_course_url), continue_on_failure=continue_on_failure
|
||||
)
|
||||
# Get base url
|
||||
try:
|
||||
|
||||
@@ -1,546 +0,0 @@
|
||||
import html
|
||||
import json
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import (
|
||||
Dict,
|
||||
Iterator,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_community.document_loaders.base import BaseLoader
|
||||
|
||||
|
||||
class DedocBaseLoader(BaseLoader, ABC):
|
||||
"""
|
||||
Base Loader that uses `dedoc` (https://dedoc.readthedocs.io).
|
||||
|
||||
Loader enables extracting text, tables and attached files from the given file:
|
||||
* `Text` can be split by pages, `dedoc` tree nodes, textual lines
|
||||
(according to the `split` parameter).
|
||||
* `Attached files` (when with_attachments=True)
|
||||
are split according to the `split` parameter.
|
||||
For attachments, langchain Document object has an additional metadata field
|
||||
`type`="attachment".
|
||||
* `Tables` (when with_tables=True) are not split - each table corresponds to one
|
||||
langchain Document object.
|
||||
For tables, Document object has additional metadata fields `type`="table"
|
||||
and `text_as_html` with table HTML representation.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_path: str,
|
||||
*,
|
||||
split: str = "document",
|
||||
with_tables: bool = True,
|
||||
with_attachments: Union[str, bool] = False,
|
||||
recursion_deep_attachments: int = 10,
|
||||
pdf_with_text_layer: str = "auto_tabby",
|
||||
language: str = "rus+eng",
|
||||
pages: str = ":",
|
||||
is_one_column_document: str = "auto",
|
||||
document_orientation: str = "auto",
|
||||
need_header_footer_analysis: Union[str, bool] = False,
|
||||
need_binarization: Union[str, bool] = False,
|
||||
need_pdf_table_analysis: Union[str, bool] = True,
|
||||
delimiter: Optional[str] = None,
|
||||
encoding: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize with file path and parsing parameters.
|
||||
|
||||
Args:
|
||||
file_path: path to the file for processing
|
||||
split: type of document splitting into parts (each part is returned
|
||||
separately), default value "document"
|
||||
"document": document text is returned as a single langchain Document
|
||||
object (don't split)
|
||||
"page": split document text into pages (works for PDF, DJVU, PPTX, PPT,
|
||||
ODP)
|
||||
"node": split document text into tree nodes (title nodes, list item
|
||||
nodes, raw text nodes)
|
||||
"line": split document text into lines
|
||||
with_tables: add tables to the result - each table is returned as a single
|
||||
langchain Document object
|
||||
|
||||
Parameters used for document parsing via `dedoc`
|
||||
(https://dedoc.readthedocs.io/en/latest/parameters/parameters.html):
|
||||
|
||||
with_attachments: enable attached files extraction
|
||||
recursion_deep_attachments: recursion level for attached files
|
||||
extraction, works only when with_attachments==True
|
||||
pdf_with_text_layer: type of handler for parsing PDF documents,
|
||||
available options
|
||||
["true", "false", "tabby", "auto", "auto_tabby" (default)]
|
||||
language: language of the document for PDF without a textual layer and
|
||||
images, available options ["eng", "rus", "rus+eng" (default)],
|
||||
the list of languages can be extended, please see
|
||||
https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html
|
||||
pages: page slice to define the reading range for parsing PDF documents
|
||||
is_one_column_document: detect number of columns for PDF without
|
||||
a textual layer and images, available options
|
||||
["true", "false", "auto" (default)]
|
||||
document_orientation: fix document orientation (90, 180, 270 degrees)
|
||||
for PDF without a textual layer and images, available options
|
||||
["auto" (default), "no_change"]
|
||||
need_header_footer_analysis: remove headers and footers from the output
|
||||
result for parsing PDF and images
|
||||
need_binarization: clean pages background (binarize) for PDF without a
|
||||
textual layer and images
|
||||
need_pdf_table_analysis: parse tables for PDF without a textual layer
|
||||
and images
|
||||
delimiter: column separator for CSV, TSV files
|
||||
encoding: encoding of TXT, CSV, TSV
|
||||
"""
|
||||
self.parsing_parameters = {
|
||||
key: value
|
||||
for key, value in locals().items()
|
||||
if key not in {"self", "file_path", "split", "with_tables"}
|
||||
}
|
||||
self.valid_split_values = {"document", "page", "node", "line"}
|
||||
if split not in self.valid_split_values:
|
||||
raise ValueError(
|
||||
f"Got {split} for `split`, but should be one of "
|
||||
f"`{self.valid_split_values}`"
|
||||
)
|
||||
self.split = split
|
||||
self.with_tables = with_tables
|
||||
self.file_path = file_path
|
||||
|
||||
structure_type = "tree" if self.split == "node" else "linear"
|
||||
self.parsing_parameters["structure_type"] = structure_type
|
||||
self.parsing_parameters["need_content_analysis"] = with_attachments
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""Lazily load documents."""
|
||||
import tempfile
|
||||
|
||||
try:
|
||||
from dedoc import DedocManager
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"`dedoc` package not found, please install it with `pip install dedoc`"
|
||||
)
|
||||
dedoc_manager = DedocManager(manager_config=self._make_config())
|
||||
dedoc_manager.config["logger"].disabled = True
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
document_tree = dedoc_manager.parse(
|
||||
file_path=self.file_path,
|
||||
parameters={**self.parsing_parameters, "attachments_dir": tmpdir},
|
||||
)
|
||||
yield from self._split_document(
|
||||
document_tree=document_tree.to_api_schema().dict(), split=self.split
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def _make_config(self) -> dict:
|
||||
"""
|
||||
Make configuration for DedocManager according to the file extension and
|
||||
parsing parameters.
|
||||
"""
|
||||
pass
|
||||
|
||||
def _json2txt(self, paragraph: dict) -> str:
|
||||
"""Get text (recursively) of the document tree node."""
|
||||
subparagraphs_text = "\n".join(
|
||||
[
|
||||
self._json2txt(subparagraph)
|
||||
for subparagraph in paragraph["subparagraphs"]
|
||||
]
|
||||
)
|
||||
text = (
|
||||
f"{paragraph['text']}\n{subparagraphs_text}"
|
||||
if subparagraphs_text
|
||||
else paragraph["text"]
|
||||
)
|
||||
return text
|
||||
|
||||
def _parse_subparagraphs(
|
||||
self, document_tree: dict, document_metadata: dict
|
||||
) -> Iterator[Document]:
|
||||
"""Parse recursively document tree obtained by `dedoc`."""
|
||||
if len(document_tree["subparagraphs"]) > 0:
|
||||
for subparagraph in document_tree["subparagraphs"]:
|
||||
yield from self._parse_subparagraphs(
|
||||
document_tree=subparagraph, document_metadata=document_metadata
|
||||
)
|
||||
else:
|
||||
yield Document(
|
||||
page_content=document_tree["text"],
|
||||
metadata={**document_metadata, **document_tree["metadata"]},
|
||||
)
|
||||
|
||||
def _split_document(
|
||||
self,
|
||||
document_tree: dict,
|
||||
split: str,
|
||||
additional_metadata: Optional[dict] = None,
|
||||
) -> Iterator[Document]:
|
||||
"""Split document into parts according to the `split` parameter."""
|
||||
document_metadata = document_tree["metadata"]
|
||||
if additional_metadata:
|
||||
document_metadata = {**document_metadata, **additional_metadata}
|
||||
|
||||
if split == "document":
|
||||
text = self._json2txt(paragraph=document_tree["content"]["structure"])
|
||||
yield Document(page_content=text, metadata=document_metadata)
|
||||
|
||||
elif split == "page":
|
||||
nodes = document_tree["content"]["structure"]["subparagraphs"]
|
||||
page_id = nodes[0]["metadata"]["page_id"]
|
||||
page_text = ""
|
||||
|
||||
for node in nodes:
|
||||
if node["metadata"]["page_id"] == page_id:
|
||||
page_text += self._json2txt(node)
|
||||
else:
|
||||
yield Document(
|
||||
page_content=page_text,
|
||||
metadata={**document_metadata, "page_id": page_id},
|
||||
)
|
||||
page_id = node["metadata"]["page_id"]
|
||||
page_text = self._json2txt(node)
|
||||
|
||||
yield Document(
|
||||
page_content=page_text,
|
||||
metadata={**document_metadata, "page_id": page_id},
|
||||
)
|
||||
|
||||
elif split == "line":
|
||||
for node in document_tree["content"]["structure"]["subparagraphs"]:
|
||||
line_metadata = node["metadata"]
|
||||
yield Document(
|
||||
page_content=self._json2txt(node),
|
||||
metadata={**document_metadata, **line_metadata},
|
||||
)
|
||||
|
||||
elif split == "node":
|
||||
yield from self._parse_subparagraphs(
|
||||
document_tree=document_tree["content"]["structure"],
|
||||
document_metadata=document_metadata,
|
||||
)
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Got {split} for `split`, but should be one of "
|
||||
f"`{self.valid_split_values}`"
|
||||
)
|
||||
|
||||
if self.with_tables:
|
||||
for table in document_tree["content"]["tables"]:
|
||||
table_text, table_html = self._get_table(table)
|
||||
yield Document(
|
||||
page_content=table_text,
|
||||
metadata={
|
||||
**table["metadata"],
|
||||
"type": "table",
|
||||
"text_as_html": table_html,
|
||||
},
|
||||
)
|
||||
|
||||
for attachment in document_tree["attachments"]:
|
||||
yield from self._split_document(
|
||||
document_tree=attachment,
|
||||
split=self.split,
|
||||
additional_metadata={"type": "attachment"},
|
||||
)
|
||||
|
||||
def _get_table(self, table: dict) -> Tuple[str, str]:
|
||||
"""Get text and HTML representation of the table."""
|
||||
table_text = ""
|
||||
for row in table["cells"]:
|
||||
for cell in row:
|
||||
table_text += " ".join(line["text"] for line in cell["lines"])
|
||||
table_text += "\t"
|
||||
table_text += "\n"
|
||||
|
||||
table_html = (
|
||||
'<table border="1" style="border-collapse: collapse; width: 100%;'
|
||||
'">\n<tbody>\n'
|
||||
)
|
||||
for row in table["cells"]:
|
||||
table_html += "<tr>\n"
|
||||
for cell in row:
|
||||
cell_text = "\n".join(line["text"] for line in cell["lines"])
|
||||
cell_text = html.escape(cell_text)
|
||||
table_html += "<td"
|
||||
if cell["invisible"]:
|
||||
table_html += ' style="display: none" '
|
||||
table_html += (
|
||||
f' colspan="{cell["colspan"]}" rowspan='
|
||||
f'"{cell["rowspan"]}">{cell_text}</td>\n'
|
||||
)
|
||||
table_html += "</tr>\n"
|
||||
table_html += "</tbody>\n</table>"
|
||||
|
||||
return table_text, table_html
|
||||
|
||||
|
||||
class DedocFileLoader(DedocBaseLoader):
|
||||
"""
|
||||
DedocFileLoader document loader integration to load files using `dedoc`.
|
||||
|
||||
The file loader automatically detects the file type (with the correct extension).
|
||||
The list of supported file types is gives at
|
||||
https://dedoc.readthedocs.io/en/latest/index.html#id1.
|
||||
Please see the documentation of DedocBaseLoader to get more details.
|
||||
|
||||
Setup:
|
||||
Install ``dedoc`` package.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U dedoc
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.document_loaders import DedocFileLoader
|
||||
|
||||
loader = DedocFileLoader(
|
||||
file_path="example.pdf",
|
||||
# split=...,
|
||||
# with_tables=...,
|
||||
# pdf_with_text_layer=...,
|
||||
# pages=...,
|
||||
# ...
|
||||
)
|
||||
|
||||
Load:
|
||||
.. code-block:: python
|
||||
|
||||
docs = loader.load()
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
Some text
|
||||
{
|
||||
'file_name': 'example.pdf',
|
||||
'file_type': 'application/pdf',
|
||||
# ...
|
||||
}
|
||||
|
||||
Lazy load:
|
||||
.. code-block:: python
|
||||
|
||||
docs = []
|
||||
docs_lazy = loader.lazy_load()
|
||||
|
||||
for doc in docs_lazy:
|
||||
docs.append(doc)
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
Some text
|
||||
{
|
||||
'file_name': 'example.pdf',
|
||||
'file_type': 'application/pdf',
|
||||
# ...
|
||||
}
|
||||
"""
|
||||
|
||||
def _make_config(self) -> dict:
|
||||
from dedoc.utils.langchain import make_manager_config
|
||||
|
||||
return make_manager_config(
|
||||
file_path=self.file_path,
|
||||
parsing_params=self.parsing_parameters,
|
||||
split=self.split,
|
||||
)
|
||||
|
||||
|
||||
class DedocAPIFileLoader(DedocBaseLoader):
|
||||
"""
|
||||
Load files using `dedoc` API.
|
||||
The file loader automatically detects the file type (even with the wrong extension).
|
||||
By default, the loader makes a call to the locally hosted `dedoc` API.
|
||||
More information about `dedoc` API can be found in `dedoc` documentation:
|
||||
https://dedoc.readthedocs.io/en/latest/dedoc_api_usage/api.html
|
||||
|
||||
Please see the documentation of DedocBaseLoader to get more details.
|
||||
|
||||
Setup:
|
||||
You don't need to install `dedoc` library for using this loader.
|
||||
Instead, the `dedoc` API needs to be run.
|
||||
You may use Docker container for this purpose.
|
||||
Please see `dedoc` documentation for more details:
|
||||
https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker pull dedocproject/dedoc
|
||||
docker run -p 1231:1231
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.document_loaders import DedocAPIFileLoader
|
||||
|
||||
loader = DedocAPIFileLoader(
|
||||
file_path="example.pdf",
|
||||
# url=...,
|
||||
# split=...,
|
||||
# with_tables=...,
|
||||
# pdf_with_text_layer=...,
|
||||
# pages=...,
|
||||
# ...
|
||||
)
|
||||
|
||||
Load:
|
||||
.. code-block:: python
|
||||
|
||||
docs = loader.load()
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
Some text
|
||||
{
|
||||
'file_name': 'example.pdf',
|
||||
'file_type': 'application/pdf',
|
||||
# ...
|
||||
}
|
||||
|
||||
Lazy load:
|
||||
.. code-block:: python
|
||||
|
||||
docs = []
|
||||
docs_lazy = loader.lazy_load()
|
||||
|
||||
for doc in docs_lazy:
|
||||
docs.append(doc)
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
Some text
|
||||
{
|
||||
'file_name': 'example.pdf',
|
||||
'file_type': 'application/pdf',
|
||||
# ...
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_path: str,
|
||||
*,
|
||||
url: str = "http://0.0.0.0:1231",
|
||||
split: str = "document",
|
||||
with_tables: bool = True,
|
||||
with_attachments: Union[str, bool] = False,
|
||||
recursion_deep_attachments: int = 10,
|
||||
pdf_with_text_layer: str = "auto_tabby",
|
||||
language: str = "rus+eng",
|
||||
pages: str = ":",
|
||||
is_one_column_document: str = "auto",
|
||||
document_orientation: str = "auto",
|
||||
need_header_footer_analysis: Union[str, bool] = False,
|
||||
need_binarization: Union[str, bool] = False,
|
||||
need_pdf_table_analysis: Union[str, bool] = True,
|
||||
delimiter: Optional[str] = None,
|
||||
encoding: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Initialize with file path, API url and parsing parameters.
|
||||
|
||||
Args:
|
||||
file_path: path to the file for processing
|
||||
url: URL to call `dedoc` API
|
||||
split: type of document splitting into parts (each part is returned
|
||||
separately), default value "document"
|
||||
"document": document is returned as a single langchain Document object
|
||||
(don't split)
|
||||
"page": split document into pages (works for PDF, DJVU, PPTX, PPT, ODP)
|
||||
"node": split document into tree nodes (title nodes, list item nodes,
|
||||
raw text nodes)
|
||||
"line": split document into lines
|
||||
with_tables: add tables to the result - each table is returned as a single
|
||||
langchain Document object
|
||||
|
||||
Parameters used for document parsing via `dedoc`
|
||||
(https://dedoc.readthedocs.io/en/latest/parameters/parameters.html):
|
||||
|
||||
with_attachments: enable attached files extraction
|
||||
recursion_deep_attachments: recursion level for attached files
|
||||
extraction, works only when with_attachments==True
|
||||
pdf_with_text_layer: type of handler for parsing PDF documents,
|
||||
available options
|
||||
["true", "false", "tabby", "auto", "auto_tabby" (default)]
|
||||
language: language of the document for PDF without a textual layer and
|
||||
images, available options ["eng", "rus", "rus+eng" (default)],
|
||||
the list of languages can be extended, please see
|
||||
https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html
|
||||
pages: page slice to define the reading range for parsing PDF documents
|
||||
is_one_column_document: detect number of columns for PDF without
|
||||
a textual layer and images, available options
|
||||
["true", "false", "auto" (default)]
|
||||
document_orientation: fix document orientation (90, 180, 270 degrees)
|
||||
for PDF without a textual layer and images, available options
|
||||
["auto" (default), "no_change"]
|
||||
need_header_footer_analysis: remove headers and footers from the output
|
||||
result for parsing PDF and images
|
||||
need_binarization: clean pages background (binarize) for PDF without a
|
||||
textual layer and images
|
||||
need_pdf_table_analysis: parse tables for PDF without a textual layer
|
||||
and images
|
||||
delimiter: column separator for CSV, TSV files
|
||||
encoding: encoding of TXT, CSV, TSV
|
||||
"""
|
||||
super().__init__(
|
||||
file_path=file_path,
|
||||
split=split,
|
||||
with_tables=with_tables,
|
||||
with_attachments=with_attachments,
|
||||
recursion_deep_attachments=recursion_deep_attachments,
|
||||
pdf_with_text_layer=pdf_with_text_layer,
|
||||
language=language,
|
||||
pages=pages,
|
||||
is_one_column_document=is_one_column_document,
|
||||
document_orientation=document_orientation,
|
||||
need_header_footer_analysis=need_header_footer_analysis,
|
||||
need_binarization=need_binarization,
|
||||
need_pdf_table_analysis=need_pdf_table_analysis,
|
||||
delimiter=delimiter,
|
||||
encoding=encoding,
|
||||
)
|
||||
self.url = url
|
||||
self.parsing_parameters["return_format"] = "json"
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""Lazily load documents."""
|
||||
doc_tree = self._send_file(
|
||||
url=self.url, file_path=self.file_path, parameters=self.parsing_parameters
|
||||
)
|
||||
yield from self._split_document(document_tree=doc_tree, split=self.split)
|
||||
|
||||
def _make_config(self) -> dict:
|
||||
return {}
|
||||
|
||||
def _send_file(
|
||||
self, url: str, file_path: str, parameters: dict
|
||||
) -> Dict[str, Union[list, dict, str]]:
|
||||
"""Send POST-request to `dedoc` API and return the results"""
|
||||
import requests
|
||||
|
||||
file_name = os.path.basename(file_path)
|
||||
with open(file_path, "rb") as file:
|
||||
files = {"file": (file_name, file)}
|
||||
r = requests.post(f"{url}/upload", files=files, data=parameters)
|
||||
|
||||
if r.status_code != 200:
|
||||
raise ValueError(f"Error during file handling: {r.content.decode()}")
|
||||
|
||||
result = json.loads(r.content.decode())
|
||||
return result
|
||||
@@ -20,7 +20,6 @@ class GitbookLoader(WebBaseLoader):
|
||||
base_url: Optional[str] = None,
|
||||
content_selector: str = "main",
|
||||
continue_on_failure: bool = False,
|
||||
show_progress: bool = True,
|
||||
):
|
||||
"""Initialize with web page and whether to load all paths.
|
||||
|
||||
@@ -37,7 +36,6 @@ class GitbookLoader(WebBaseLoader):
|
||||
occurs loading a url, emitting a warning instead of raising an
|
||||
exception. Setting this to True makes the loader more robust, but also
|
||||
may result in missing data. Default: False
|
||||
show_progress: whether to show a progress bar while loading. Default: True
|
||||
"""
|
||||
self.base_url = base_url or web_page
|
||||
if self.base_url.endswith("/"):
|
||||
@@ -45,11 +43,7 @@ class GitbookLoader(WebBaseLoader):
|
||||
if load_all_paths:
|
||||
# set web_path to the sitemap if we want to crawl all paths
|
||||
web_page = f"{self.base_url}/sitemap.xml"
|
||||
super().__init__(
|
||||
web_paths=(web_page,),
|
||||
continue_on_failure=continue_on_failure,
|
||||
show_progress=show_progress,
|
||||
)
|
||||
super().__init__(web_paths=(web_page,), continue_on_failure=continue_on_failure)
|
||||
self.load_all_paths = load_all_paths
|
||||
self.content_selector = content_selector
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
# 4. For service accounts visit
|
||||
# https://cloud.google.com/iam/docs/service-accounts-create
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Sequence, Union
|
||||
|
||||
@@ -107,13 +108,7 @@ class GoogleDriveLoader(BaseLoader, BaseModel):
|
||||
return v
|
||||
|
||||
def _load_credentials(self) -> Any:
|
||||
"""Load credentials.
|
||||
The order of loading credentials:
|
||||
1. Service account key if file exists
|
||||
2. Token path (for OAuth Client) if file exists
|
||||
3. Credentials path (for OAuth Client) if file exists
|
||||
4. Default credentials. if no credentials found, raise DefaultCredentialsError
|
||||
"""
|
||||
"""Load credentials."""
|
||||
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
|
||||
try:
|
||||
from google.auth import default
|
||||
@@ -131,31 +126,30 @@ class GoogleDriveLoader(BaseLoader, BaseModel):
|
||||
)
|
||||
|
||||
creds = None
|
||||
# From service account
|
||||
if self.service_account_key.exists():
|
||||
return service_account.Credentials.from_service_account_file(
|
||||
str(self.service_account_key), scopes=SCOPES
|
||||
)
|
||||
|
||||
# From Oauth Client
|
||||
if self.token_path.exists():
|
||||
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
|
||||
|
||||
if not creds or not creds.valid:
|
||||
if creds and creds.expired and creds.refresh_token:
|
||||
creds.refresh(Request())
|
||||
elif self.credentials_path.exists():
|
||||
elif "GOOGLE_APPLICATION_CREDENTIALS" not in os.environ:
|
||||
creds, project = default()
|
||||
creds = creds.with_scopes(SCOPES)
|
||||
# no need to write to file
|
||||
if creds:
|
||||
return creds
|
||||
else:
|
||||
flow = InstalledAppFlow.from_client_secrets_file(
|
||||
str(self.credentials_path), SCOPES
|
||||
)
|
||||
creds = flow.run_local_server(port=0)
|
||||
if creds:
|
||||
with open(self.token_path, "w") as token:
|
||||
token.write(creds.to_json())
|
||||
|
||||
# From Application Default Credentials
|
||||
if not creds:
|
||||
creds, _ = default(scopes=SCOPES)
|
||||
with open(self.token_path, "w") as token:
|
||||
token.write(creds.to_json())
|
||||
|
||||
return creds
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import warnings
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
Mapping,
|
||||
@@ -28,7 +27,6 @@ if TYPE_CHECKING:
|
||||
import pdfplumber.page
|
||||
import pypdf._page
|
||||
import pypdfium2._helpers.page
|
||||
from pypdf import PageObject
|
||||
from textractor.data.text_linearization_config import TextLinearizationConfig
|
||||
|
||||
|
||||
@@ -85,17 +83,10 @@ class PyPDFParser(BaseBlobParser):
|
||||
"""Load `PDF` using `pypdf`"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
password: Optional[Union[str, bytes]] = None,
|
||||
extract_images: bool = False,
|
||||
*,
|
||||
extraction_mode: str = "plain",
|
||||
extraction_kwargs: Optional[Dict[str, Any]] = None,
|
||||
self, password: Optional[Union[str, bytes]] = None, extract_images: bool = False
|
||||
):
|
||||
self.password = password
|
||||
self.extract_images = extract_images
|
||||
self.extraction_mode = extraction_mode
|
||||
self.extraction_kwargs = extraction_kwargs or {}
|
||||
|
||||
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
|
||||
"""Lazily parse the blob."""
|
||||
@@ -107,23 +98,11 @@ class PyPDFParser(BaseBlobParser):
|
||||
"`pip install pypdf`"
|
||||
)
|
||||
|
||||
def _extract_text_from_page(page: "PageObject") -> str:
|
||||
"""
|
||||
Extract text from image given the version of pypdf.
|
||||
"""
|
||||
if pypdf.__version__.startswith("3"):
|
||||
return page.extract_text()
|
||||
else:
|
||||
return page.extract_text(
|
||||
extraction_mode=self.extraction_mode, **self.extraction_kwargs
|
||||
)
|
||||
|
||||
with blob.as_bytes_io() as pdf_file_obj: # type: ignore[attr-defined]
|
||||
pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password)
|
||||
|
||||
yield from [
|
||||
Document(
|
||||
page_content=_extract_text_from_page(page=page)
|
||||
page_content=page.extract_text()
|
||||
+ self._extract_images_from_page(page),
|
||||
metadata={"source": blob.source, "page": page_number}, # type: ignore[attr-defined]
|
||||
)
|
||||
|
||||
@@ -26,7 +26,6 @@ from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
from langchain_community.document_loaders.base import BaseLoader
|
||||
from langchain_community.document_loaders.blob_loaders import Blob
|
||||
from langchain_community.document_loaders.dedoc import DedocBaseLoader
|
||||
from langchain_community.document_loaders.parsers.pdf import (
|
||||
AmazonTextractPDFParser,
|
||||
DocumentIntelligenceParser,
|
||||
@@ -172,9 +171,6 @@ class PyPDFLoader(BasePDFLoader):
|
||||
password: Optional[Union[str, bytes]] = None,
|
||||
headers: Optional[Dict] = None,
|
||||
extract_images: bool = False,
|
||||
*,
|
||||
extraction_mode: str = "plain",
|
||||
extraction_kwargs: Optional[Dict] = None,
|
||||
) -> None:
|
||||
"""Initialize with a file path."""
|
||||
try:
|
||||
@@ -184,12 +180,7 @@ class PyPDFLoader(BasePDFLoader):
|
||||
"pypdf package not found, please install it with " "`pip install pypdf`"
|
||||
)
|
||||
super().__init__(file_path, headers=headers)
|
||||
self.parser = PyPDFParser(
|
||||
password=password,
|
||||
extract_images=extract_images,
|
||||
extraction_mode=extraction_mode,
|
||||
extraction_kwargs=extraction_kwargs,
|
||||
)
|
||||
self.parser = PyPDFParser(password=password, extract_images=extract_images)
|
||||
|
||||
def lazy_load(
|
||||
self,
|
||||
@@ -739,104 +730,6 @@ class AmazonTextractPDFLoader(BasePDFLoader):
|
||||
raise ValueError(f"unsupported mime type: {blob.mimetype}") # type: ignore[attr-defined]
|
||||
|
||||
|
||||
class DedocPDFLoader(DedocBaseLoader):
|
||||
"""
|
||||
DedocPDFLoader document loader integration to load PDF files using `dedoc`.
|
||||
The file loader can automatically detect the correctness of a textual layer in the
|
||||
PDF document.
|
||||
Note that `__init__` method supports parameters that differ from ones of
|
||||
DedocBaseLoader.
|
||||
|
||||
Setup:
|
||||
Install ``dedoc`` package.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install -U dedoc
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.document_loaders import DedocPDFLoader
|
||||
|
||||
loader = DedocPDFLoader(
|
||||
file_path="example.pdf",
|
||||
# split=...,
|
||||
# with_tables=...,
|
||||
# pdf_with_text_layer=...,
|
||||
# pages=...,
|
||||
# ...
|
||||
)
|
||||
|
||||
Load:
|
||||
.. code-block:: python
|
||||
|
||||
docs = loader.load()
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
Some text
|
||||
{
|
||||
'file_name': 'example.pdf',
|
||||
'file_type': 'application/pdf',
|
||||
# ...
|
||||
}
|
||||
|
||||
Lazy load:
|
||||
.. code-block:: python
|
||||
|
||||
docs = []
|
||||
docs_lazy = loader.lazy_load()
|
||||
|
||||
for doc in docs_lazy:
|
||||
docs.append(doc)
|
||||
print(docs[0].page_content[:100])
|
||||
print(docs[0].metadata)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
Some text
|
||||
{
|
||||
'file_name': 'example.pdf',
|
||||
'file_type': 'application/pdf',
|
||||
# ...
|
||||
}
|
||||
|
||||
Parameters used for document parsing via `dedoc`
|
||||
(https://dedoc.readthedocs.io/en/latest/parameters/pdf_handling.html):
|
||||
|
||||
with_attachments: enable attached files extraction
|
||||
recursion_deep_attachments: recursion level for attached files extraction,
|
||||
works only when with_attachments==True
|
||||
pdf_with_text_layer: type of handler for parsing, available options
|
||||
["true", "false", "tabby", "auto", "auto_tabby" (default)]
|
||||
language: language of the document for PDF without a textual layer,
|
||||
available options ["eng", "rus", "rus+eng" (default)], the list of
|
||||
languages can be extended, please see
|
||||
https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html
|
||||
pages: page slice to define the reading range for parsing
|
||||
is_one_column_document: detect number of columns for PDF without a textual
|
||||
layer, available options ["true", "false", "auto" (default)]
|
||||
document_orientation: fix document orientation (90, 180, 270 degrees) for PDF
|
||||
without a textual layer, available options ["auto" (default), "no_change"]
|
||||
need_header_footer_analysis: remove headers and footers from the output result
|
||||
need_binarization: clean pages background (binarize) for PDF without a textual
|
||||
layer
|
||||
need_pdf_table_analysis: parse tables for PDF without a textual layer
|
||||
"""
|
||||
|
||||
def _make_config(self) -> dict:
|
||||
from dedoc.utils.langchain import make_manager_pdf_config
|
||||
|
||||
return make_manager_pdf_config(
|
||||
file_path=self.file_path,
|
||||
parsing_params=self.parsing_parameters,
|
||||
split=self.split,
|
||||
)
|
||||
|
||||
|
||||
class DocumentIntelligenceLoader(BasePDFLoader):
|
||||
"""Load a PDF with Azure Document Intelligence"""
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import logging
|
||||
import os
|
||||
import uuid
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Dict, Iterator, List, Optional
|
||||
from typing import Any, Dict, Iterator, List, Optional, Union
|
||||
|
||||
import requests # type: ignore
|
||||
from langchain_core.documents import Document
|
||||
@@ -61,7 +61,7 @@ class PebbloSafeLoader(BaseLoader):
|
||||
self.source_path = get_loader_full_path(self.loader)
|
||||
self.source_owner = PebbloSafeLoader.get_file_owner_from_path(self.source_path)
|
||||
self.docs: List[Document] = []
|
||||
self.docs_with_id: List[IndexedDocument] = []
|
||||
self.docs_with_id: Union[List[IndexedDocument], List[Document], List] = []
|
||||
loader_name = str(type(self.loader)).split(".")[-1].split("'")[0]
|
||||
self.source_type = get_loader_type(loader_name)
|
||||
self.source_path_size = self.get_source_size(self.source_path)
|
||||
@@ -89,13 +89,17 @@ class PebbloSafeLoader(BaseLoader):
|
||||
list: Documents fetched from load method of the wrapped `loader`.
|
||||
"""
|
||||
self.docs = self.loader.load()
|
||||
# Add pebblo-specific metadata to docs
|
||||
self._add_pebblo_specific_metadata()
|
||||
if not self.load_semantic:
|
||||
self._classify_doc(self.docs, loading_end=True)
|
||||
return self.docs
|
||||
self.docs_with_id = self._index_docs()
|
||||
classified_docs = self._classify_doc(loading_end=True)
|
||||
self._add_pebblo_specific_metadata(classified_docs)
|
||||
if self.load_semantic:
|
||||
self.docs = self._add_semantic_to_docs(classified_docs)
|
||||
else:
|
||||
self.docs = self._unindex_docs() # type: ignore
|
||||
classified_docs = self._classify_doc(self.docs_with_id, loading_end=True)
|
||||
self.docs_with_id = self._add_semantic_to_docs(
|
||||
self.docs_with_id, classified_docs
|
||||
)
|
||||
self.docs = self._unindex_docs(self.docs_with_id) # type: ignore
|
||||
return self.docs
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
@@ -121,14 +125,19 @@ class PebbloSafeLoader(BaseLoader):
|
||||
self.docs = []
|
||||
break
|
||||
self.docs = list((doc,))
|
||||
self.docs_with_id = self._index_docs()
|
||||
classified_doc = self._classify_doc()
|
||||
self._add_pebblo_specific_metadata(classified_doc)
|
||||
if self.load_semantic:
|
||||
self.docs = self._add_semantic_to_docs(classified_doc)
|
||||
# Add pebblo-specific metadata to docs
|
||||
self._add_pebblo_specific_metadata()
|
||||
if not self.load_semantic:
|
||||
self._classify_doc(self.docs, loading_end=True)
|
||||
yield self.docs[0]
|
||||
else:
|
||||
self.docs = self._unindex_docs()
|
||||
yield self.docs[0]
|
||||
self.docs_with_id = self._index_docs()
|
||||
classified_doc = self._classify_doc(self.docs)
|
||||
self.docs_with_id = self._add_semantic_to_docs(
|
||||
self.docs_with_id, classified_doc
|
||||
)
|
||||
self.docs = self._unindex_docs(self.docs_with_id) # type: ignore
|
||||
yield self.docs[0]
|
||||
|
||||
@classmethod
|
||||
def set_discover_sent(cls) -> None:
|
||||
@@ -138,12 +147,13 @@ class PebbloSafeLoader(BaseLoader):
|
||||
def set_loader_sent(cls) -> None:
|
||||
cls._loader_sent = True
|
||||
|
||||
def _classify_doc(self, loading_end: bool = False) -> dict:
|
||||
def _classify_doc(self, loaded_docs: list, loading_end: bool = False) -> list:
|
||||
"""Send documents fetched from loader to pebblo-server. Then send
|
||||
classified documents to Daxa cloud(If api_key is present). Internal method.
|
||||
|
||||
Args:
|
||||
|
||||
loaded_docs (list): List of documents fetched from loader's load operation.
|
||||
loading_end (bool, optional): Flag indicating the halt of data
|
||||
loading by loader. Defaults to False.
|
||||
"""
|
||||
@@ -153,8 +163,9 @@ class PebbloSafeLoader(BaseLoader):
|
||||
}
|
||||
if loading_end is True:
|
||||
PebbloSafeLoader.set_loader_sent()
|
||||
doc_content = [doc.dict() for doc in self.docs_with_id]
|
||||
doc_content = [doc.dict() for doc in loaded_docs]
|
||||
docs = []
|
||||
classified_docs = []
|
||||
for doc in doc_content:
|
||||
doc_metadata = doc.get("metadata", {})
|
||||
doc_authorized_identities = doc_metadata.get("authorized_identities", [])
|
||||
@@ -172,12 +183,12 @@ class PebbloSafeLoader(BaseLoader):
|
||||
page_content = str(doc.get("page_content"))
|
||||
page_content_size = self.calculate_content_size(page_content)
|
||||
self.source_aggregate_size += page_content_size
|
||||
doc_id = doc.get("pb_id", None) or 0
|
||||
doc_id = doc.get("id", None) or 0
|
||||
docs.append(
|
||||
{
|
||||
"doc": page_content,
|
||||
"source_path": doc_source_path,
|
||||
"pb_id": doc_id,
|
||||
"id": doc_id,
|
||||
"last_modified": doc.get("metadata", {}).get("last_modified"),
|
||||
"file_owner": doc_source_owner,
|
||||
**(
|
||||
@@ -210,7 +221,6 @@ class PebbloSafeLoader(BaseLoader):
|
||||
self.source_aggregate_size
|
||||
)
|
||||
payload = Doc(**payload).dict(exclude_unset=True)
|
||||
classified_docs = {}
|
||||
# Raw payload to be sent to classifier
|
||||
if self.classifier_location == "local":
|
||||
load_doc_url = f"{self.classifier_url}{LOADER_DOC_URL}"
|
||||
@@ -218,10 +228,7 @@ class PebbloSafeLoader(BaseLoader):
|
||||
pebblo_resp = requests.post(
|
||||
load_doc_url, headers=headers, json=payload, timeout=300
|
||||
)
|
||||
|
||||
# Updating the structure of pebblo response docs for efficient searching
|
||||
for classified_doc in json.loads(pebblo_resp.text).get("docs", []):
|
||||
classified_docs.update({classified_doc["pb_id"]: classified_doc})
|
||||
classified_docs = json.loads(pebblo_resp.text).get("docs", None)
|
||||
if pebblo_resp.status_code not in [
|
||||
HTTPStatus.OK,
|
||||
HTTPStatus.BAD_GATEWAY,
|
||||
@@ -250,21 +257,7 @@ class PebbloSafeLoader(BaseLoader):
|
||||
|
||||
if self.api_key:
|
||||
if self.classifier_location == "local":
|
||||
docs = payload["docs"]
|
||||
for doc_data in docs:
|
||||
classified_data = classified_docs.get(doc_data["pb_id"], {})
|
||||
doc_data.update(
|
||||
{
|
||||
"pb_checksum": classified_data.get("pb_checksum", None),
|
||||
"loader_source_path": classified_data.get(
|
||||
"loader_source_path", None
|
||||
),
|
||||
"entities": classified_data.get("entities", {}),
|
||||
"topics": classified_data.get("topics", {}),
|
||||
}
|
||||
)
|
||||
doc_data.pop("doc")
|
||||
|
||||
payload["docs"] = classified_docs
|
||||
headers.update({"x-api-key": self.api_key})
|
||||
pebblo_cloud_url = f"{PEBBLO_CLOUD_URL}{LOADER_DOC_URL}"
|
||||
try:
|
||||
@@ -460,29 +453,33 @@ class PebbloSafeLoader(BaseLoader):
|
||||
List[IndexedDocument]: A list of IndexedDocument objects with unique IDs.
|
||||
"""
|
||||
docs_with_id = [
|
||||
IndexedDocument(pb_id=str(i), **doc.dict())
|
||||
IndexedDocument(id=hex(i)[2:], **doc.dict())
|
||||
for i, doc in enumerate(self.docs)
|
||||
]
|
||||
return docs_with_id
|
||||
|
||||
def _add_semantic_to_docs(self, classified_docs: Dict) -> List[Document]:
|
||||
def _add_semantic_to_docs(
|
||||
self, docs_with_id: List[IndexedDocument], classified_docs: List[dict]
|
||||
) -> List[Document]:
|
||||
"""
|
||||
Adds semantic metadata to the given list of documents.
|
||||
|
||||
Args:
|
||||
classified_docs (Dict): A dictionary of dictionaries containing the
|
||||
classified documents with pb_id as key.
|
||||
docs_with_id (List[IndexedDocument]): A list of IndexedDocument objects
|
||||
containing the documents with their IDs.
|
||||
classified_docs (List[dict]): A list of dictionaries containing the
|
||||
classified documents.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of Document objects with added semantic metadata.
|
||||
"""
|
||||
indexed_docs = {
|
||||
doc.pb_id: Document(page_content=doc.page_content, metadata=doc.metadata)
|
||||
for doc in self.docs_with_id
|
||||
doc.id: Document(page_content=doc.page_content, metadata=doc.metadata)
|
||||
for doc in docs_with_id
|
||||
}
|
||||
|
||||
for classified_doc in classified_docs.values():
|
||||
doc_id = classified_doc.get("pb_id")
|
||||
for classified_doc in classified_docs:
|
||||
doc_id = classified_doc.get("id")
|
||||
if doc_id in indexed_docs:
|
||||
self._add_semantic_to_doc(indexed_docs[doc_id], classified_doc)
|
||||
|
||||
@@ -490,16 +487,19 @@ class PebbloSafeLoader(BaseLoader):
|
||||
|
||||
return semantic_metadata_docs
|
||||
|
||||
def _unindex_docs(self) -> List[Document]:
|
||||
def _unindex_docs(self, docs_with_id: List[IndexedDocument]) -> List[Document]:
|
||||
"""
|
||||
Converts a list of IndexedDocument objects to a list of Document objects.
|
||||
|
||||
Args:
|
||||
docs_with_id (List[IndexedDocument]): A list of IndexedDocument objects.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of Document objects.
|
||||
"""
|
||||
docs = [
|
||||
Document(page_content=doc.page_content, metadata=doc.metadata)
|
||||
for i, doc in enumerate(self.docs_with_id)
|
||||
for i, doc in enumerate(docs_with_id)
|
||||
]
|
||||
return docs
|
||||
|
||||
@@ -522,16 +522,12 @@ class PebbloSafeLoader(BaseLoader):
|
||||
)
|
||||
return doc
|
||||
|
||||
def _add_pebblo_specific_metadata(self, classified_docs: dict) -> None:
|
||||
def _add_pebblo_specific_metadata(self) -> None:
|
||||
"""Add Pebblo specific metadata to documents."""
|
||||
for doc in self.docs_with_id:
|
||||
for doc in self.docs:
|
||||
doc_metadata = doc.metadata
|
||||
doc_metadata["full_path"] = get_full_path(
|
||||
doc_metadata.get(
|
||||
"full_path", doc_metadata.get("source", self.source_path)
|
||||
)
|
||||
)
|
||||
doc_metadata["pb_id"] = doc.pb_id
|
||||
doc_metadata["pb_checksum"] = classified_docs.get(doc.pb_id, {}).get(
|
||||
"pb_checksum", None
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user