mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-10 11:10:23 +00:00
Compare commits
147 Commits
langchain=
...
langchain-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8f3c052db1 | ||
|
|
29a3b3a711 | ||
|
|
20fe4deea0 | ||
|
|
3a55f4bfe9 | ||
|
|
fea9ff3831 | ||
|
|
b55f6105c6 | ||
|
|
4585eaef1b | ||
|
|
f337f3ed36 | ||
|
|
22175738ac | ||
|
|
12c3454fd9 | ||
|
|
e271965d1e | ||
|
|
b9bea36dd4 | ||
|
|
da06d4d7af | ||
|
|
5f73c836a6 | ||
|
|
597be7d501 | ||
|
|
379803751e | ||
|
|
ad18afc3ec | ||
|
|
464a525a5a | ||
|
|
0f45ac4088 | ||
|
|
ac41c97d21 | ||
|
|
aaf788b7cb | ||
|
|
47ae06698f | ||
|
|
03881c6743 | ||
|
|
2d6b0bf3e3 | ||
|
|
ee3955c68c | ||
|
|
325068bb53 | ||
|
|
bff6ca78a2 | ||
|
|
6878bc39b5 | ||
|
|
55e66aa40c | ||
|
|
9b7db08184 | ||
|
|
8691a5a37f | ||
|
|
4919d5d6df | ||
|
|
918e1c8a93 | ||
|
|
58def6e34d | ||
|
|
e787532479 | ||
|
|
e80b0932ee | ||
|
|
9e06991aae | ||
|
|
a14e02ab33 | ||
|
|
378db2e1a5 | ||
|
|
a197a8e184 | ||
|
|
0bb54ab9f0 | ||
|
|
f47b4edcc2 | ||
|
|
837a3d400b | ||
|
|
20b72a044c | ||
|
|
70c71efcab | ||
|
|
a5a3d28776 | ||
|
|
2a70a07aad | ||
|
|
5ac936a284 | ||
|
|
3c4652c906 | ||
|
|
2c6b9e8771 | ||
|
|
1639ccfd15 | ||
|
|
ab036c1a4c | ||
|
|
3dce2e1d35 | ||
|
|
c48e99e7f2 | ||
|
|
8a140ee77c | ||
|
|
df357f82ca | ||
|
|
236e957abb | ||
|
|
199e64d372 | ||
|
|
1f01c0fd98 | ||
|
|
884f76e05a | ||
|
|
a45337ea07 | ||
|
|
1318d534af | ||
|
|
10e3982b59 | ||
|
|
721f709dec | ||
|
|
02f0a29293 | ||
|
|
dcba7df2fe | ||
|
|
0f7569ddbc | ||
|
|
5ade0187d0 | ||
|
|
0f6737cbfe | ||
|
|
7ab82eb8cc | ||
|
|
37b89fb7fc | ||
|
|
40c02cedaf | ||
|
|
cecd875cdc | ||
|
|
0c6a3fdd6b | ||
|
|
d98b830e4b | ||
|
|
6b08a33fa4 | ||
|
|
947628311b | ||
|
|
c1d1fc13c2 | ||
|
|
74e3d796f1 | ||
|
|
7b28359719 | ||
|
|
5e48f35fba | ||
|
|
838464de25 | ||
|
|
f4ee3c8a22 | ||
|
|
50cb0a03bc | ||
|
|
842065a9cc | ||
|
|
27ad6a4bb3 | ||
|
|
dda9438e87 | ||
|
|
604dfe2d99 | ||
|
|
f101c759ed | ||
|
|
372c27f2e5 | ||
|
|
6a45bf9554 | ||
|
|
f5856680fe | ||
|
|
07715f815b | ||
|
|
020cc1cf3e | ||
|
|
9aae8ef416 | ||
|
|
06f47678ae | ||
|
|
9c3da11910 | ||
|
|
5affbada61 | ||
|
|
f9d64d22e5 | ||
|
|
3691701d58 | ||
|
|
ef049769f0 | ||
|
|
cd19ba9a07 | ||
|
|
83f3d95ffa | ||
|
|
b5acb91080 | ||
|
|
f99369a54c | ||
|
|
242b085be7 | ||
|
|
c3308f31bc | ||
|
|
c50dd79512 | ||
|
|
aade9bfde5 | ||
|
|
0ee6ed76ca | ||
|
|
62b6965d2a | ||
|
|
ef22ebe431 | ||
|
|
f62b323108 | ||
|
|
b2bc15e640 | ||
|
|
61ea7bf60b | ||
|
|
4c651ba13a | ||
|
|
334fc1ed1c | ||
|
|
ba74341eee | ||
|
|
3adf710f1d | ||
|
|
07c5c60f63 | ||
|
|
aade1550c6 | ||
|
|
63c60a31f0 | ||
|
|
242de9aa5e | ||
|
|
916b813107 | ||
|
|
1c65529fd7 | ||
|
|
6182a402f1 | ||
|
|
0dec72cab0 | ||
|
|
570566b858 | ||
|
|
f9baaae3ec | ||
|
|
4da1df568a | ||
|
|
96ccba9c27 | ||
|
|
a4c101ae97 | ||
|
|
c5a07e2dd8 | ||
|
|
80f3d48195 | ||
|
|
7d83189b19 | ||
|
|
eb26b5535a | ||
|
|
96bac8e20d | ||
|
|
034a8c7c1b | ||
|
|
a402de3dae | ||
|
|
a47f69a120 | ||
|
|
cc2cbfabfc | ||
|
|
9e4a0e76f6 | ||
|
|
81639243e2 | ||
|
|
61976a4147 | ||
|
|
b5360e2e5f | ||
|
|
4cf67084d3 | ||
|
|
bcb5f354ad |
19
.github/scripts/get_min_versions.py
vendored
19
.github/scripts/get_min_versions.py
vendored
@@ -1,6 +1,11 @@
|
||||
import sys
|
||||
|
||||
import tomllib
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
else:
|
||||
# for python 3.10 and below, which doesnt have stdlib tomllib
|
||||
import tomli as tomllib
|
||||
|
||||
from packaging.version import parse as parse_version
|
||||
import re
|
||||
|
||||
@@ -12,6 +17,8 @@ MIN_VERSION_LIBS = [
|
||||
"SQLAlchemy",
|
||||
]
|
||||
|
||||
SKIP_IF_PULL_REQUEST = ["langchain-core"]
|
||||
|
||||
|
||||
def get_min_version(version: str) -> str:
|
||||
# base regex for x.x.x with cases for rc/post/etc
|
||||
@@ -38,7 +45,7 @@ def get_min_version(version: str) -> str:
|
||||
raise ValueError(f"Unrecognized version format: {version}")
|
||||
|
||||
|
||||
def get_min_version_from_toml(toml_path: str):
|
||||
def get_min_version_from_toml(toml_path: str, versions_for: str):
|
||||
# Parse the TOML file
|
||||
with open(toml_path, "rb") as file:
|
||||
toml_data = tomllib.load(file)
|
||||
@@ -51,6 +58,10 @@ def get_min_version_from_toml(toml_path: str):
|
||||
|
||||
# Iterate over the libs in MIN_VERSION_LIBS
|
||||
for lib in MIN_VERSION_LIBS:
|
||||
if versions_for == "pull_request" and lib in SKIP_IF_PULL_REQUEST:
|
||||
# some libs only get checked on release because of simultaneous
|
||||
# changes
|
||||
continue
|
||||
# Check if the lib is present in the dependencies
|
||||
if lib in dependencies:
|
||||
# Get the version string
|
||||
@@ -71,8 +82,10 @@ def get_min_version_from_toml(toml_path: str):
|
||||
if __name__ == "__main__":
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
versions_for = sys.argv[2]
|
||||
assert versions_for in ["release", "pull_request"]
|
||||
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
min_versions = get_min_version_from_toml(toml_file, versions_for)
|
||||
|
||||
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
|
||||
|
||||
4
.github/workflows/_release.yml
vendored
4
.github/workflows/_release.yml
vendored
@@ -189,7 +189,7 @@ jobs:
|
||||
--extra-index-url https://test.pypi.org/simple/ \
|
||||
"$PKG_NAME==$VERSION" || \
|
||||
( \
|
||||
sleep 5 && \
|
||||
sleep 15 && \
|
||||
poetry run pip install \
|
||||
--extra-index-url https://test.pypi.org/simple/ \
|
||||
"$PKG_NAME==$VERSION" \
|
||||
@@ -231,7 +231,7 @@ jobs:
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
|
||||
19
.github/workflows/_test.yml
vendored
19
.github/workflows/_test.yml
vendored
@@ -65,3 +65,22 @@ jobs:
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging tomli
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
# Temporarily disabled until we can get the minimum versions working
|
||||
# - name: Run unit tests with minimum dependency versions
|
||||
# if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
# env:
|
||||
# MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
# run: |
|
||||
# poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
|
||||
# make tests
|
||||
# working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
@@ -64,7 +64,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -355,7 +355,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U --quiet langchain langchain_community openai chromadb langchain-experimental\n",
|
||||
"%pip install -U --quiet langchain langchain-chroma langchain-community openai langchain-experimental\n",
|
||||
"%pip install --quiet \"unstructured[all-docs]\" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken"
|
||||
]
|
||||
},
|
||||
@@ -344,8 +344,8 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import VertexAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub chromadb langchain-anthropic"
|
||||
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub langchain-chroma langchain-anthropic"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -645,7 +645,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"\n",
|
||||
"# Initialize all_texts with leaf_texts\n",
|
||||
"all_texts = leaf_texts.copy()\n",
|
||||
|
||||
@@ -58,4 +58,5 @@ Notebook | Description
|
||||
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
|
||||
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
|
||||
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
|
||||
[rag-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag-locally-on-intel-cpu.ipynb) | Perform Retrieval-Augmented-Generation (RAG) on locally downloaded open-source models using langchain and open source tools and execute it on Intel Xeon CPU. We showed an example of how to apply RAG on Llama 2 model and enable it to answer the queries related to Intel Q1 2024 earnings release.
|
||||
[rag-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag-locally-on-intel-cpu.ipynb) | Perform Retrieval-Augmented-Generation (RAG) on locally downloaded open-source models using langchain and open source tools and execute it on Intel Xeon CPU. We showed an example of how to apply RAG on Llama 2 model and enable it to answer the queries related to Intel Q1 2024 earnings release.
|
||||
[visual_RAG_vdms.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/visual_RAG_vdms.ipynb) | Performs Visual Retrieval-Augmented-Generation (RAG) using videos and scene descriptions generated by open source models.
|
||||
@@ -39,7 +39,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain unstructured[all-docs] pydantic lxml langchainhub"
|
||||
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml langchainhub"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -320,7 +320,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain unstructured[all-docs] pydantic lxml"
|
||||
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -375,7 +375,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -59,7 +59,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain unstructured[all-docs] pydantic lxml"
|
||||
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -378,8 +378,8 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import GPT4AllEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain openai langchain_chroma langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -132,7 +132,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"baseline = Chroma.from_texts(\n",
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-airbyte"
|
||||
"%pip install -qU langchain-airbyte langchain_chroma"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -123,7 +123,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tiktoken\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"enc = tiktoken.get_encoding(\"cl100k_base\")\n",
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet"
|
||||
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub langchain-chroma hnswlib --upgrade --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -547,7 +547,7 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --quiet pypdf chromadb tiktoken openai \n",
|
||||
"%pip install --quiet pypdf langchain-chroma tiktoken openai \n",
|
||||
"%pip uninstall -y langchain-fireworks\n",
|
||||
"%pip install --editable /mnt/disks/data/langchain/libs/partners/fireworks"
|
||||
]
|
||||
@@ -138,7 +138,7 @@
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"# Add to vectorDB\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_fireworks.embeddings import FireworksEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(\n",
|
||||
|
||||
@@ -170,7 +170,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"with open(\"../../state_of_the_union.txt\") as f:\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
|
||||
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,8 +30,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph tavily-python"
|
||||
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph tavily-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -77,8 +77,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
@@ -180,8 +180,8 @@
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
|
||||
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -86,8 +86,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"urls = [\n",
|
||||
@@ -188,7 +188,7 @@
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -187,7 +187,7 @@
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_experimental.open_clip import OpenCLIPEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain"
|
||||
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -167,7 +167,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)"
|
||||
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -194,7 +194,7 @@
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter"
|
||||
]
|
||||
|
||||
@@ -80,7 +80,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
|
||||
@@ -36,10 +36,10 @@
|
||||
"from bs4 import BeautifulSoup as Soup\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryByteStore, LocalFileStore\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders.recursive_url_loader import (\n",
|
||||
" RecursiveUrlLoader,\n",
|
||||
")\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"# For our example, we'll load docs from the web\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
@@ -370,13 +370,14 @@
|
||||
],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n",
|
||||
"from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
|
||||
"from langchain_huggingface.llms import HuggingFacePipeline\n",
|
||||
"from optimum.intel.ipex import IPEXModelForCausalLM\n",
|
||||
"from transformers import AutoTokenizer, pipeline\n",
|
||||
"\n",
|
||||
"model_id = \"Intel/neural-chat-7b-v3-3\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(\n",
|
||||
" model_id, device_map=\"auto\", torch_dtype=torch.bfloat16\n",
|
||||
"model = IPEXModelForCausalLM.from_pretrained(\n",
|
||||
" model_id, torch_dtype=torch.bfloat16, export=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=100)\n",
|
||||
@@ -581,7 +582,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
"version": "3.10.14"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -740,7 +740,7 @@ Even this relatively large model will most likely fail to generate more complica
|
||||
|
||||
|
||||
```bash
|
||||
poetry run pip install pyyaml chromadb
|
||||
poetry run pip install pyyaml langchain_chroma
|
||||
import yaml
|
||||
```
|
||||
|
||||
@@ -994,7 +994,7 @@ from langchain.prompts import FewShotPromptTemplate, PromptTemplate
|
||||
from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX
|
||||
from langchain_huggingface import HuggingFaceEmbeddings
|
||||
from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector
|
||||
from langchain_community.vectorstores import Chroma
|
||||
from langchain_chroma import Chroma
|
||||
|
||||
example_prompt = PromptTemplate(
|
||||
input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"],
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install --quiet pypdf chromadb tiktoken openai langchain-together"
|
||||
"! pip install --quiet pypdf tiktoken openai langchain-chroma langchain-together"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -45,8 +45,8 @@
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"# Add to vectorDB\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
"from langchain_together.embeddings import TogetherEmbeddings\n",
|
||||
|
||||
677
cookbook/visual_RAG_vdms.ipynb
Normal file
677
cookbook/visual_RAG_vdms.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -38,6 +38,8 @@ generate-files:
|
||||
|
||||
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/tool_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/document_loader_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
|
||||
|
||||
@@ -236,7 +236,7 @@ This is where information like log-probs and token usage may be stored.
|
||||
These represent a decision from an language model to call a tool. They are included as part of an `AIMessage` output.
|
||||
They can be accessed from there with the `.tool_calls` property.
|
||||
|
||||
This property returns a list of dictionaries. Each dictionary has the following keys:
|
||||
This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with the following arguments:
|
||||
|
||||
- `name`: The name of the tool that should be called.
|
||||
- `args`: The arguments to that tool.
|
||||
@@ -246,13 +246,18 @@ This property returns a list of dictionaries. Each dictionary has the following
|
||||
|
||||
This represents a system message, which tells the model how to behave. Not every model provider supports this.
|
||||
|
||||
#### FunctionMessage
|
||||
|
||||
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
|
||||
|
||||
#### ToolMessage
|
||||
|
||||
This represents the result of a tool call. This is distinct from a FunctionMessage in order to match OpenAI's `function` and `tool` message types. In addition to `role` and `content`, this message has a `tool_call_id` parameter which conveys the id of the call to the tool that was called to produce this result.
|
||||
This represents the result of a tool call. In addition to `role` and `content`, this message has:
|
||||
|
||||
- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result.
|
||||
- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
|
||||
|
||||
#### (Legacy) FunctionMessage
|
||||
|
||||
This is a legacy message type, corresponding to OpenAI's legacy function-calling API. ToolMessage should be used instead to correspond to the updated tool-calling API.
|
||||
|
||||
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
|
||||
|
||||
|
||||
### Prompt templates
|
||||
@@ -496,35 +501,87 @@ For specifics on how to use retrievers, see the [relevant how-to guides here](/d
|
||||
### Tools
|
||||
<span data-heading-keywords="tool,tools"></span>
|
||||
|
||||
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
|
||||
Tools are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models.
|
||||
Tools are needed whenever you want a model to control parts of your code or call out to external APIs.
|
||||
|
||||
A tool consists of the following components:
|
||||
A tool consists of:
|
||||
|
||||
1. The name of the tool
|
||||
2. A description of what the tool does
|
||||
3. JSON schema of what the inputs to the tool are
|
||||
4. The function to call
|
||||
5. Whether the result of a tool should be returned directly to the user (only relevant for agents)
|
||||
1. The name of the tool.
|
||||
2. A description of what the tool does.
|
||||
3. A JSON schema defining the inputs to the tool.
|
||||
4. A function (and, optionally, an async variant of the function).
|
||||
|
||||
The name, description and JSON schema are provided as context
|
||||
to the LLM, allowing the LLM to determine how to use the tool
|
||||
appropriately.
|
||||
When a tool is bound to a model, the name, description and JSON schema are provided as context to the model.
|
||||
Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs.
|
||||
Typical usage may look like the following:
|
||||
|
||||
Given a list of available tools and a prompt, an LLM can request
|
||||
that one or more tools be invoked with appropriate arguments.
|
||||
```python
|
||||
tools = [...] # Define a list of tools
|
||||
llm_with_tools = llm.bind_tools(tools)
|
||||
ai_msg = llm_with_tools.invoke("do xyz...") # AIMessage(tool_calls=[ToolCall(...), ...], ...)
|
||||
```
|
||||
|
||||
Generally, when designing tools to be used by a chat model or LLM, it is important to keep in mind the following:
|
||||
The `AIMessage` returned from the model MAY have `tool_calls` associated with it.
|
||||
Read [this guide](/docs/concepts/#aimessage) for more information on what the response type may look like.
|
||||
|
||||
- Chat models that have been fine-tuned for tool calling will be better at tool calling than non-fine-tuned models.
|
||||
- Non fine-tuned models may not be able to use tools at all, especially if the tools are complex or require multiple tool calls.
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
|
||||
- Simpler tools are generally easier for models to use than more complex tools.
|
||||
Once the chosen tools are invoked, the results can be passed back to the model so that it can complete whatever task
|
||||
it's performing.
|
||||
There are generally two different ways to invoke the tool and pass back the response:
|
||||
|
||||
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
|
||||
#### Invoke with just the arguments
|
||||
|
||||
To use an existing pre-built tool, see [here](/docs/integrations/tools/) for a list of pre-built tools.
|
||||
When you invoke a tool with just the arguments, you will get back the raw tool output (usually a string).
|
||||
This generally looks like:
|
||||
|
||||
```python
|
||||
# You will want to previously check that the LLM returned tool calls
|
||||
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
|
||||
tool_output = tool.invoke(tool_call["args"])
|
||||
tool_message = ToolMessage(content=tool_output, tool_call_id=tool_call["id"], name=tool_call["name"])
|
||||
```
|
||||
|
||||
Note that the `content` field will generally be passed back to the model.
|
||||
If you do not want the raw tool response to be passed to the model, but you still want to keep it around,
|
||||
you can transform the tool output but also pass it as an artifact (read more about [`ToolMessage.artifact` here](/docs/concepts/#toolmessage))
|
||||
|
||||
```python
|
||||
... # Same code as above
|
||||
response_for_llm = transform(response)
|
||||
tool_message = ToolMessage(content=response_for_llm, tool_call_id=tool_call["id"], name=tool_call["name"], artifact=tool_output)
|
||||
```
|
||||
|
||||
#### Invoke with `ToolCall`
|
||||
|
||||
The other way to invoke a tool is to call it with the full `ToolCall` that was generated by the model.
|
||||
When you do this, the tool will return a ToolMessage.
|
||||
The benefits of this are that you don't have to write the logic yourself to transform the tool output into a ToolMessage.
|
||||
This generally looks like:
|
||||
|
||||
```python
|
||||
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
|
||||
tool_message = tool.invoke(tool_call)
|
||||
# -> ToolMessage(content="tool result foobar...", tool_call_id=..., name="tool_name")
|
||||
```
|
||||
|
||||
If you are invoking the tool this way and want to include an [artifact](/docs/concepts/#toolmessage) for the ToolMessage, you will need to have the tool return two things.
|
||||
Read more about [defining tools that return artifacts here](/docs/how_to/tool_artifacts/).
|
||||
|
||||
#### Best practices
|
||||
|
||||
When designing tools to be used by a model, it is important to keep in mind that:
|
||||
|
||||
- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models.
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering.
|
||||
- Simple, narrowly scoped tools are easier for models to use than complex tools.
|
||||
|
||||
#### Related
|
||||
|
||||
For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools).
|
||||
|
||||
To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/).
|
||||
|
||||
### Toolkits
|
||||
<span data-heading-keywords="toolkit,toolkits"></span>
|
||||
|
||||
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
|
||||
|
||||
|
||||
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
" def parse(self, text: str) -> List[str]:\n",
|
||||
" lines = text.strip().split(\"\\n\")\n",
|
||||
" return lines\n",
|
||||
" return list(filter(None, lines)) # Remove empty lines\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"output_parser = LineListOutputParser()\n",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
|
||||
"\n",
|
||||
"This guide requires `langchain-openai >= 0.1.8`."
|
||||
"This guide requires `langchain-openai >= 0.1.9`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
"#### OpenAI\n",
|
||||
"\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
|
||||
@@ -220,6 +220,57 @@
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14002ec8-7ee5-4f91-9315-dd21c3808776",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### `LLMListwiseRerank`\n",
|
||||
"\n",
|
||||
"[LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
|
||||
"\n",
|
||||
"Note that `LLMListwiseRerank` requires a model with the [with_structured_output](/docs/integrations/chat/) method implemented."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "4ab9ee9f-917e-4d6f-9344-eb7f01533228",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers.document_compressors import LLMListwiseRerank\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"\n",
|
||||
"_filter = LLMListwiseRerank.from_llm(llm, top_n=1)\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=_filter, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7194da42",
|
||||
@@ -295,7 +346,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"id": "617a1756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "9a8bceb3-95bd-4496-bb9e-57655136e070",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use Runnables as Tools\n",
|
||||
"# How to convert Runnables as Tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -267,9 +267,9 @@
|
||||
"We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
"/>\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "5436020b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create custom tools\n",
|
||||
"# How to create tools\n",
|
||||
"\n",
|
||||
"When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
|
||||
"\n",
|
||||
|
||||
@@ -44,6 +44,7 @@ This highlights functionality that is core to using LangChain.
|
||||
- [How to: inspect runnables](/docs/how_to/inspect)
|
||||
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
|
||||
- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains)
|
||||
- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets)
|
||||
|
||||
## Components
|
||||
|
||||
@@ -80,7 +81,6 @@ These are the core building blocks you can use when building applications.
|
||||
- [How to: stream a response back](/docs/how_to/chat_streaming)
|
||||
- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
|
||||
- [How to: track response metadata across providers](/docs/how_to/response_metadata)
|
||||
- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: stream tool calls](/docs/how_to/tool_streaming)
|
||||
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
|
||||
@@ -185,19 +185,21 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
|
||||
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. Refer [here](/docs/integrations/tools/) for a list of pre-buit tools.
|
||||
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: create tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use chat models to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool outputs to chat models](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_choice)
|
||||
- [How to: access the `RunnableConfig` object within a custom tool](/docs/how_to/tool_configure)
|
||||
- [How to: stream events from child runs within a custom tool](/docs/how_to/tool_stream_events)
|
||||
- [How to: return extra artifacts from a tool](/docs/how_to/tool_artifacts/)
|
||||
- [How to: add a human-in-the-loop for tools](/docs/how_to/tools_human)
|
||||
- [How to: handle tool errors](/docs/how_to/tools_error)
|
||||
- [How to: force models to call a tool](/docs/how_to/tool_choice)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel)
|
||||
- [How to: access the `RunnableConfig` from a tool](/docs/how_to/tool_configure)
|
||||
- [How to: stream events from a tool](/docs/how_to/tool_stream_events)
|
||||
- [How to: return artifacts from a tool](/docs/how_to/tool_artifacts/)
|
||||
- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
|
||||
- [How to: add ad-hoc tool calling capability to models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass in runtime secrets](/docs/how_to/runnable_runtime_secrets)
|
||||
|
||||
### Multimodal
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
" * document addition by id (`add_documents` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with `ids` argument)\n",
|
||||
"\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MongoDBAtlasVectorSearch`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
" \n",
|
||||
"## Caution\n",
|
||||
"\n",
|
||||
|
||||
@@ -284,17 +284,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 1,
|
||||
"id": "173e1a9c-2a18-4669-b0de-136f39197786",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Arr, matey! I be sailin' the high seas with me crew, searchin' for buried treasure and adventure! How be ye doin' on this fine day?\""
|
||||
"\"Arrr, I be doin' well, me heartie! Just sailin' the high seas in search of treasure and adventure. How be ye?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -316,14 +316,20 @@
|
||||
"\n",
|
||||
"history = InMemoryChatMessageHistory()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_history():\n",
|
||||
" return history\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(chain, lambda x: history)\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_history,\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke(\n",
|
||||
" {\"input\": \"how are you?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"42\"}},\n",
|
||||
")"
|
||||
"wrapped_chain.invoke({\"input\": \"how are you?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -340,17 +346,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 2,
|
||||
"id": "4e05994f-1fbc-4699-bf2e-62cb0e4deeb8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Ahoy there! What be ye wantin' from this old pirate?\", response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 29, 'total_tokens': 44}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-1846d5f5-0dda-43b6-bb49-864e541f9c29-0', usage_metadata={'input_tokens': 29, 'output_tokens': 15, 'total_tokens': 44})"
|
||||
"'Ahoy matey! What can this old pirate do for ye today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -370,9 +376,16 @@
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(chain, get_session_history)\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_session_history,\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke(\"Hello!\", config={\"configurable\": {\"session_id\": \"abc123\"}})"
|
||||
"wrapped_chain.invoke(\n",
|
||||
" {\"input\": \"Hello!\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -790,7 +803,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
78
docs/docs/how_to/runnable_runtime_secrets.ipynb
Normal file
78
docs/docs/how_to/runnable_runtime_secrets.ipynb
Normal file
@@ -0,0 +1,78 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6fcd2994-0092-4fa3-9bb1-c9c84babadc5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass runtime secrets to runnables\n",
|
||||
"\n",
|
||||
":::info Requires `langchain-core >= 0.2.22`\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"We can pass in secrets to our runnables at runtime using the `RunnableConfig`. Specifically we can pass in secrets with a `__` prefix to the `configurable` field. This will ensure that these secrets aren't traced as part of the invocation:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "92e42e91-c277-49de-aa7a-dfb5c993c817",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"7"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def foo(x: int, config: RunnableConfig) -> int:\n",
|
||||
" \"\"\"Sum x and a secret int\"\"\"\n",
|
||||
" return x + config[\"configurable\"][\"__top_secret_int\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"foo.invoke({\"x\": 5}, {\"configurable\": {\"__top_secret_int\": 2, \"traced_key\": \"bar\"}})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae3a4fb9-2ce7-46b2-b654-35dff0ae7197",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace for this run, we can see that \"traced_key\" was recorded (as part of Metadata) while our secret int was not: https://smith.langchain.com/public/aa7e3289-49ca-422d-a408-f6b927210170/r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -452,7 +452,7 @@
|
||||
"source": [
|
||||
"#### Generator Functions\n",
|
||||
"\n",
|
||||
"Le'ts fix the streaming using a generator function that can operate on the **input stream**.\n",
|
||||
"Let's fix the streaming using a generator function that can operate on the **input stream**.\n",
|
||||
"\n",
|
||||
":::{.callout-tip}\n",
|
||||
"A generator function (a function that uses `yield`) allows writing code that operates on **input streams**\n",
|
||||
|
||||
@@ -5,11 +5,12 @@
|
||||
"id": "503e36ae-ca62-4f8a-880c-4fe78ff5df93",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to return extra artifacts from a tool\n",
|
||||
"# How to return artifacts from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [ToolMessage](/docs/concepts/#toolmessage)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
|
||||
"\n",
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use a model to call tools\n",
|
||||
"# How to use chat models to call tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -82,30 +82,24 @@
|
||||
"## Passing tools to chat models\n",
|
||||
"\n",
|
||||
"Chat models that support tool calling features implement a `.bind_tools` method, which \n",
|
||||
"receives a list of LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
|
||||
"receives a list of functions, Pydantic models, or LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
|
||||
"and binds them to the chat model in its expected format. Subsequent invocations of the \n",
|
||||
"chat model will include tool schemas in its calls to the LLM.\n",
|
||||
"\n",
|
||||
"For example, we can define the schema for custom tools using the `@tool` decorator \n",
|
||||
"on Python functions:"
|
||||
"For example, below we implement simple tools for arithmetic:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
@@ -118,12 +112,14 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Or below, we define the schema using [Pydantic](https://docs.pydantic.dev):"
|
||||
"LangChain also implements a `@tool` decorator that allows for further control of the tool schema, such as tool names and argument descriptions. See the how-to guide [here](/docs/how_to/custom_tools/#creating-tools-from-functions) for detail.\n",
|
||||
"\n",
|
||||
"We can also define the schema using [Pydantic](https://docs.pydantic.dev):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -343,7 +339,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -4,7 +4,13 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Disabling parallel tool calling (OpenAI only)\n",
|
||||
"# How to disable parallel tool calling\n",
|
||||
"\n",
|
||||
":::info OpenAI-specific\n",
|
||||
"\n",
|
||||
"This API is currently only supported by OpenAI.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"OpenAI tool calling performs tool calling in parallel by default. That means that if we ask a question like \"What is the weather in Tokyo, New York, and Chicago?\" and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the ``parallel_tool_call`` parameter."
|
||||
]
|
||||
@@ -99,10 +105,24 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to force tool calling behavior\n",
|
||||
"# How to force models to call a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -125,10 +125,24 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to access the RunnableConfig object within a custom tool\n",
|
||||
"# How to access the RunnableConfig from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -110,7 +110,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -124,9 +124,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass tool outputs to the model\n",
|
||||
"# How to pass tool outputs to chat models\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass run time values to a tool\n",
|
||||
"# How to pass run time values to tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -15,26 +15,25 @@
|
||||
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Supported models\n",
|
||||
"\n",
|
||||
"This how-to guide uses models with native tool calling capability.\n",
|
||||
"You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Using with LangGraph\n",
|
||||
":::info Using with LangGraph\n",
|
||||
"\n",
|
||||
"If you're using LangGraph, please refer to [this how-to guide](https://langchain-ai.github.io/langgraph/how-tos/pass-run-time-values-to-tools/)\n",
|
||||
"which shows how to create an agent that keeps track of a given user's favorite pets.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::caution Added in `langchain-core==0.2.21`\n",
|
||||
"\n",
|
||||
"Must have `langchain-core>=0.2.21` to use this functionality.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n",
|
||||
"\n",
|
||||
"Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n",
|
||||
"\n",
|
||||
"Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic.\n",
|
||||
"\n",
|
||||
"This how-to guide shows a simple design pattern that creates the tool dynamically at run time and binds to them appropriate values."
|
||||
"This how-to guide shows you how to prevent the model from generating certain tool arguments and injecting them in directly at runtime."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -57,23 +56,12 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"# %pip install -qU langchain langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -90,10 +78,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Passing request time information\n",
|
||||
"## Hiding arguments from the model\n",
|
||||
"\n",
|
||||
"The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n",
|
||||
"this information may be the user ID as resolved from the request itself."
|
||||
"We can use the InjectedToolArg annotation to mark certain parameters of our Tool, like `user_id` as being injected at runtime, meaning they shouldn't be generated by the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -104,46 +91,88 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_core.tools import BaseTool, tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import InjectedToolArg, tool\n",
|
||||
"from typing_extensions import Annotated\n",
|
||||
"\n",
|
||||
"user_to_pets = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_tools_for_user(user_id: str) -> List[BaseTool]:\n",
|
||||
" \"\"\"Generate a set of tools that have a user id associated with them.\"\"\"\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def update_favorite_pets(\n",
|
||||
" pets: List[str], user_id: Annotated[str, InjectedToolArg]\n",
|
||||
") -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def update_favorite_pets(pets: List[str]) -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
" Args:\n",
|
||||
" pets: List of favorite pets to set.\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def delete_favorite_pets() -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def list_favorite_pets() -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def delete_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\n",
|
||||
"\n",
|
||||
" return [update_favorite_pets, delete_favorite_pets, list_favorite_pets]"
|
||||
" Args:\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def list_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Verify that the tools work correctly"
|
||||
"If we look at the input schemas for these tools, we'll see that user_id is still listed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_petsSchema',\n",
|
||||
" 'description': 'Add the list of favorite pets.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But if we look at the tool call schema, which is what is passed to the model for tool-calling, user_id has been removed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -152,46 +181,60 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'eugene': ['cat', 'dog']}\n",
|
||||
"['cat', 'dog']\n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Add the list of favorite pets.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_pets, delete_pets, list_pets = generate_tools_for_user(\"eugene\")\n",
|
||||
"update_pets.invoke({\"pets\": [\"cat\", \"dog\"]})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_pets.invoke({}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_run_time_request(user_id: str, query: str):\n",
|
||||
" \"\"\"Handle run time request.\"\"\"\n",
|
||||
" tools = generate_tools_for_user(user_id)\n",
|
||||
" llm_with_tools = llm.bind_tools(tools)\n",
|
||||
" prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", \"You are a helpful assistant.\")],\n",
|
||||
" )\n",
|
||||
" chain = prompt | llm_with_tools\n",
|
||||
" return llm_with_tools.invoke(query)"
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This code will allow the LLM to invoke the tools, but the LLM is **unaware** of the fact that a **user ID** even exists!"
|
||||
"So when we invoke our tool, we need to pass in user_id:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'123': ['lizard', 'dog']}\n",
|
||||
"['lizard', 'dog']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"user_id = \"123\"\n",
|
||||
"update_favorite_pets.invoke({\"pets\": [\"lizard\", \"dog\"], \"user_id\": user_id})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_favorite_pets.invoke({\"user_id\": user_id}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But when the model calls the tool, no user_id argument will be generated:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,7 +247,8 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots']},\n",
|
||||
" 'id': 'call_jJvjPXsNbFO5MMgW0q84iqCN'}]"
|
||||
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -213,30 +257,349 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_message = handle_run_time_request(\n",
|
||||
" \"eugene\", \"my favorite animals are cats and parrots.\"\n",
|
||||
")\n",
|
||||
"ai_message.tool_calls"
|
||||
"tools = [\n",
|
||||
" update_favorite_pets,\n",
|
||||
" delete_favorite_pets,\n",
|
||||
" list_favorite_pets,\n",
|
||||
"]\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||||
"ai_msg = llm_with_tools.invoke(\"my favorite animals are cats and parrots\")\n",
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-important}\n",
|
||||
"## Injecting arguments at runtime"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to actually execute our tools using the model-generated tool call, we'll need to inject the user_id ourselves:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots'], 'user_id': '123'},\n",
|
||||
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from copy import deepcopy\n",
|
||||
"\n",
|
||||
"Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.\n",
|
||||
"from langchain_core.runnables import chain\n",
|
||||
"\n",
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling).\n",
|
||||
":::"
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def inject_user_id(ai_msg):\n",
|
||||
" tool_calls = []\n",
|
||||
" for tool_call in ai_msg.tool_calls:\n",
|
||||
" tool_call_copy = deepcopy(tool_call)\n",
|
||||
" tool_call_copy[\"args\"][\"user_id\"] = user_id\n",
|
||||
" tool_calls.append(tool_call_copy)\n",
|
||||
" return tool_calls\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"inject_user_id.invoke(ai_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now we can chain together our model, injection code, and the actual tools to create a tool-executing chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ToolMessage(content='null', name='update_favorite_pets', tool_call_id='call_HUyF6AihqANzEYxQnTUKxkXj')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool_map = {tool.name: tool for tool in tools}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def tool_router(tool_call):\n",
|
||||
" return tool_map[tool_call[\"name\"]]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | inject_user_id | tool_router.map()\n",
|
||||
"chain.invoke(\"my favorite animals are cats and parrots\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the user_to_pets dict, we can see that it's been updated to include cats and parrots:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'123': ['cats', 'parrots']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"user_to_pets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Other ways of annotating args\n",
|
||||
"\n",
|
||||
"Here are a few other ways of annotating our tool args:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'UpdateFavoritePetsSchema',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class UpdateFavoritePetsSchema(BaseModel):\n",
|
||||
" \"\"\"Update list of favorite pets\"\"\"\n",
|
||||
"\n",
|
||||
" pets: List[str] = Field(..., description=\"List of favorite pets to set.\")\n",
|
||||
" user_id: Annotated[str, InjectedToolArg] = Field(..., description=\"User's ID.\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(args_schema=UpdateFavoritePetsSchema)\n",
|
||||
"def update_favorite_pets(pets, user_id):\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'UpdateFavoritePetsSchema',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class UpdateFavoritePets(BaseTool):\n",
|
||||
" name: str = \"update_favorite_pets\"\n",
|
||||
" description: str = \"Update list of favorite pets\"\n",
|
||||
" args_schema: Optional[Type[BaseModel]] = UpdateFavoritePetsSchema\n",
|
||||
"\n",
|
||||
" def _run(self, pets, user_id):\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets().get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets().tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_petsSchema',\n",
|
||||
" 'description': 'Use the tool.\\n\\nAdd run_manager: Optional[CallbackManagerForToolRun] = None\\nto child implementations to enable tracing.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id', 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class UpdateFavoritePets2(BaseTool):\n",
|
||||
" name: str = \"update_favorite_pets\"\n",
|
||||
" description: str = \"Update list of favorite pets\"\n",
|
||||
"\n",
|
||||
" def _run(self, pets: List[str], user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets2().get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets2().tool_call_schema.schema()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -248,7 +611,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to stream events from child runs within a custom tool\n",
|
||||
"# How to stream events from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -294,7 +294,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -228,7 +228,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -540,7 +540,7 @@
|
||||
"id": "137662a6"
|
||||
},
|
||||
"source": [
|
||||
"## Example usage within a Conversation Chains"
|
||||
"## Example usage within RunnableWithMessageHistory "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -550,7 +550,7 @@
|
||||
"id": "79efa62d"
|
||||
},
|
||||
"source": [
|
||||
"Like any other integration, ChatNVIDIA is fine to support chat utilities like conversation buffers by default. Below, we show the [LangChain ConversationBufferMemory](https://python.langchain.com/docs/modules/memory/types/buffer) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
|
||||
"Like any other integration, ChatNVIDIA is fine to support chat utilities like RunnableWithMessageHistory which is analogous to using `ConversationChain`. Below, we show the [LangChain RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -572,8 +572,19 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"# store is a dictionary that maps session IDs to their corresponding chat histories.\n",
|
||||
"store = {} # memory is maintained outside the chain\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# A function that returns the chat history for a given session ID.\n",
|
||||
"def get_session_history(session_id: str) -> InMemoryChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = InMemoryChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chat = ChatNVIDIA(\n",
|
||||
" model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n",
|
||||
@@ -582,24 +593,18 @@
|
||||
" top_p=1.0,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation = ConversationChain(llm=chat, memory=ConversationBufferMemory())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f644ff28",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 268
|
||||
},
|
||||
"id": "f644ff28",
|
||||
"outputId": "bae354cc-2118-4e01-ce20-a717ac94d27d"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation.invoke(\"Hi there!\")[\"response\"]"
|
||||
"# Define a RunnableConfig object, with a `configurable` key. session_id determines thread\n",
|
||||
"config = {\"configurable\": {\"session_id\": \"1\"}}\n",
|
||||
"\n",
|
||||
"conversation = RunnableWithMessageHistory(\n",
|
||||
" chat,\n",
|
||||
" get_session_history,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation.invoke(\n",
|
||||
" \"Hi I'm Srijan Dubey.\", # input or query\n",
|
||||
" config=config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -616,26 +621,30 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation.invoke(\"I'm doing well! Just having a conversation with an AI.\")[\n",
|
||||
" \"response\"\n",
|
||||
"]"
|
||||
"conversation.invoke(\n",
|
||||
" \"I'm doing well! Just having a conversation with an AI.\",\n",
|
||||
" config=config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "LyD1xVKmVSs4",
|
||||
"id": "uHIMZxVSVNBC",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 350
|
||||
"height": 284
|
||||
},
|
||||
"id": "LyD1xVKmVSs4",
|
||||
"outputId": "a1714513-a8fd-4d14-f974-233e39d5c4f5"
|
||||
"id": "uHIMZxVSVNBC",
|
||||
"outputId": "79acc89d-a820-4f2c-bac2-afe99da95580"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation.invoke(\"Tell me about yourself.\")[\"response\"]"
|
||||
"conversation.invoke(\n",
|
||||
" \"Tell me about yourself.\",\n",
|
||||
" config=config,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
@@ -11,6 +12,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatOllama\n",
|
||||
@@ -23,6 +25,18 @@
|
||||
"\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/ollama) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_ollama.chat_models.ChatOllama.html) | [langchain-ollama](https://api.python.langchain.com/en/latest/ollama_api_reference.html) | ✅ | ❌ | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
@@ -40,307 +54,285 @@
|
||||
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
|
||||
"* To view all pulled models, use `ollama list`\n",
|
||||
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"\n",
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html).\n",
|
||||
"\n",
|
||||
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` interface.\n",
|
||||
"\n",
|
||||
"This includes [special tokens](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) for system message and user input.\n",
|
||||
"\n",
|
||||
"## Interacting with Models \n",
|
||||
"\n",
|
||||
"Here are a few ways to interact with pulled local models\n",
|
||||
"\n",
|
||||
"#### In the terminal:\n",
|
||||
"\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
|
||||
"\n",
|
||||
"#### Via an API\n",
|
||||
"\n",
|
||||
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"curl http://localhost:11434/api/generate -d '{\n",
|
||||
" \"model\": \"llama3\",\n",
|
||||
" \"prompt\":\"Why is the sky blue?\"\n",
|
||||
"}'\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"See the Ollama [API documentation](https://github.com/jmorganca/ollama/blob/main/docs/api.md) for all endpoints.\n",
|
||||
"\n",
|
||||
"#### Via LangChain\n",
|
||||
"\n",
|
||||
"See a typical basic example of using Ollama via the `ChatOllama` chat model in your LangChain application. \n",
|
||||
"\n",
|
||||
"View the [API Reference for ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ollama.ChatOllama.html#langchain_community.chat_models.ollama.ChatOllama) for more."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Why did the astronaut break up with his girlfriend?\n",
|
||||
"\n",
|
||||
"Because he needed space!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# LangChain supports many other chat models. Here, we're using Ollama\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"# supports many more optional parameters. Hover on your `ChatOllama(...)`\n",
|
||||
"# class to view the latest available supported parameters\n",
|
||||
"llm = ChatOllama(model=\"llama3\")\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",
|
||||
"\n",
|
||||
"# using LangChain Expressive Language chain syntax\n",
|
||||
"# learn more about the LCEL on\n",
|
||||
"# /docs/concepts/#langchain-expression-language-lcel\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"# for brevity, response is printed in terminal\n",
|
||||
"# You can use LangServe to deploy your application for\n",
|
||||
"# production\n",
|
||||
"print(chain.invoke({\"topic\": \"Space travel\"}))"
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"LCEL chains, out of the box, provide extra functionalities, such as streaming of responses, and async support"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Why\n",
|
||||
" did\n",
|
||||
" the\n",
|
||||
" astronaut\n",
|
||||
" break\n",
|
||||
" up\n",
|
||||
" with\n",
|
||||
" his\n",
|
||||
" girlfriend\n",
|
||||
" before\n",
|
||||
" going\n",
|
||||
" to\n",
|
||||
" Mars\n",
|
||||
"?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Because\n",
|
||||
" he\n",
|
||||
" needed\n",
|
||||
" space\n",
|
||||
"!\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic = {\"topic\": \"Space travel\"}\n",
|
||||
"\n",
|
||||
"for chunks in chain.stream(topic):\n",
|
||||
" print(chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For streaming async support, here's an example - all possible via the single chain created above."
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"topic = {\"topic\": \"Space travel\"}\n",
|
||||
"\n",
|
||||
"async for chunks in chain.astream(topic):\n",
|
||||
" print(chunks)"
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Take a look at the [LangChain Expressive Language (LCEL) Interface](/docs/concepts#interface) for the other available interfaces for use when a chain is created.\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"## Building from source\n",
|
||||
"\n",
|
||||
"For up to date instructions on building from source, check the Ollama documentation on [Building from Source](https://github.com/ollama/ollama?tab=readme-ov-file#building)"
|
||||
"The LangChain Ollama integration lives in the `langchain-ollama` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extraction\n",
|
||||
" \n",
|
||||
"Use the latest version of Ollama and supply the [`format`](https://github.com/jmorganca/ollama/blob/main/docs/api.md#json-mode) flag. The `format` flag will force the model to produce the response in JSON.\n",
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"> **Note:** You can also try out the experimental [OllamaFunctions](/docs/integrations/chat/ollama_functions) wrapper for convenience."
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(\n",
|
||||
" model=\"llama3\",\n",
|
||||
" temperature=0,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"llama3\", format=\"json\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='{ \"morning\": \"blue\", \"noon\": \"clear blue\", \"afternoon\": \"hazy yellow\", \"evening\": \"orange-red\" }\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n ' id='run-e893700f-e2d0-4df8-ad86-17525dcee318-0'\n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Je adore le programmation.\\n\\n(Note: \"programmation\" is not commonly used in French, but I translated it as \"le programmation\" to maintain the same grammatical structure and meaning as the original English sentence.)', response_metadata={'model': 'llama3', 'created_at': '2024-07-22T17:43:54.731273Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 11094839375, 'load_duration': 10121854667, 'prompt_eval_count': 36, 'prompt_eval_duration': 146569000, 'eval_count': 46, 'eval_duration': 816593000}, id='run-befccbdc-e1f9-42a9-85cf-e69b926d6b8b-0', usage_metadata={'input_tokens': 36, 'output_tokens': 46, 'total_tokens': 82})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"What color is the sky at different times of the day? Respond using JSON\"\n",
|
||||
" )\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chat_model_response = llm.invoke(messages)\n",
|
||||
"print(chat_model_response)"
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Je adore le programmation.\n",
|
||||
"\n",
|
||||
"Name: John\n",
|
||||
"Age: 35\n",
|
||||
"Likes: Pizza\n"
|
||||
"(Note: \"programmation\" is not commonly used in French, but I translated it as \"le programmation\" to maintain the same grammatical structure and meaning as the original English sentence.)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"json_schema = {\n",
|
||||
" \"title\": \"Person\",\n",
|
||||
" \"description\": \"Identifying information about a person.\",\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"name\": {\"title\": \"Name\", \"description\": \"The person's name\", \"type\": \"string\"},\n",
|
||||
" \"age\": {\"title\": \"Age\", \"description\": \"The person's age\", \"type\": \"integer\"},\n",
|
||||
" \"fav_food\": {\n",
|
||||
" \"title\": \"Fav Food\",\n",
|
||||
" \"description\": \"The person's favorite food\",\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"name\", \"age\"],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"llama2\")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Please tell me about a person using the following JSON schema:\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"{dumps}\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Now, considering the schema, tell me about a person named John who is 35 years old and loves pizza.\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(messages)\n",
|
||||
"dumps = json.dumps(json_schema, indent=2)\n",
|
||||
"\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"print(chain.invoke({\"dumps\": dumps}))"
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren!\\n\\n(Note: \"Ich liebe\" means \"I love\", \"Programmieren\" is the verb for \"programming\")', response_metadata={'model': 'llama3', 'created_at': '2024-07-04T04:22:33.864132Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 1310800083, 'load_duration': 1782000, 'prompt_eval_count': 16, 'prompt_eval_duration': 250199000, 'eval_count': 29, 'eval_duration': 1057192000}, id='run-cbadbe59-2de2-4ec0-a18a-b3220226c3d2-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f51345d-0a9d-43f1-8fca-d0662cb8e21b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"We can use [tool calling](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/) with an LLM [that has been fine-tuned for tool use](https://ollama.com/library/llama3-groq-tool-use): \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"ollama pull llama3-groq-tool-use\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"We can just pass normal Python functions directly as tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "5250bceb-1029-41ff-b447-983518704d88",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'validate_user',\n",
|
||||
" 'args': {'addresses': ['123 Fake St, Boston MA',\n",
|
||||
" '234 Pretend Boulevard, Houston TX'],\n",
|
||||
" 'user_id': 123},\n",
|
||||
" 'id': 'fe2148d3-95fb-48e9-845a-4bfecc1f1f96',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"from typing_extensions import TypedDict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def validate_user(user_id: int, addresses: List) -> bool:\n",
|
||||
" \"\"\"Validate user using historical addresses.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: (int) the user ID.\n",
|
||||
" addresses: Previous addresses.\n",
|
||||
" \"\"\"\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(\n",
|
||||
" model=\"llama3-groq-tool-use\",\n",
|
||||
" temperature=0,\n",
|
||||
").bind_tools([validate_user])\n",
|
||||
"\n",
|
||||
"result = llm.invoke(\n",
|
||||
" \"Could you validate user 123? They previously lived at \"\n",
|
||||
" \"123 Fake St in Boston MA and 234 Pretend Boulevard in \"\n",
|
||||
" \"Houston TX.\"\n",
|
||||
")\n",
|
||||
"result.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bb034ff-218f-4865-afea-3f5e57d3bdee",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We look at the LangSmith trace to see that the tool call was performed: \n",
|
||||
"\n",
|
||||
"https://smith.langchain.com/public/4169348a-d6be-45df-a7cf-032f6baa4697/r\n",
|
||||
"\n",
|
||||
"In particular, the trace shows how the tool schema was populated."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4c5e0197",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal\n",
|
||||
"\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.com/library/bakllava) and [llava](https://ollama.com/library/llava).\n",
|
||||
"\n",
|
||||
"Browse the full set of versions for models with `tags`, such as [Llava](https://ollama.ai/library/llava/tags).\n",
|
||||
" ollama pull bakllava\n",
|
||||
"\n",
|
||||
"Download the desired LLM via `ollama pull bakllava`\n",
|
||||
"\n",
|
||||
"Be sure to update Ollama so that you have the most recent version to support multi-modal.\n",
|
||||
"\n",
|
||||
"Check out the typical example of how to use ChatOllama multi-modal support below:"
|
||||
"Be sure to update Ollama so that you have the most recent version to support multi-modal."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install --upgrade --quiet pillow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 11,
|
||||
"id": "36c9b1c2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -399,7 +391,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 12,
|
||||
"id": "32b3ba7b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -411,8 +404,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"bakllava\", temperature=0)\n",
|
||||
"\n",
|
||||
@@ -449,20 +442,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Concurrency Features\n",
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
|
||||
"\n",
|
||||
"Start the Ollama server with:\n",
|
||||
"\n",
|
||||
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
|
||||
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
|
||||
"\n",
|
||||
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
|
||||
"\n",
|
||||
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
|
||||
"For detailed documentation of all ChatOllama features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_ollama.chat_models.ChatOllama.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -486,5 +471,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Ollama Functions\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
@@ -15,16 +16,16 @@
|
||||
"source": [
|
||||
"# OllamaFunctions\n",
|
||||
"\n",
|
||||
":::warning\n",
|
||||
"\n",
|
||||
"This was an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. The [primary Ollama integration](/docs/integrations/chat/ollama/) now supports tool calling, and should be used instead.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"This notebook shows how to use an experimental wrapper around Ollama that gives it [tool calling capabilities](https://python.langchain.com/v0.2/docs/concepts/#functiontool-calling).\n",
|
||||
"\n",
|
||||
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use llama3 and phi3 models.\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
|
||||
"\n",
|
||||
":::warning\n",
|
||||
"\n",
|
||||
"This is an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. Use with caution.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
|
||||
@@ -82,9 +82,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# By default it will use the model which was deployed through the platform\n",
|
||||
"# in my case it will is \"claude-3-haiku\"\n",
|
||||
"# in my case it will is \"gpt-4o\"\n",
|
||||
"\n",
|
||||
"chat = ChatPremAI(project_id=8)"
|
||||
"chat = ChatPremAI(project_id=1234, model_name=\"gpt-4o\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -107,7 +107,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I am an artificial intelligence created by Anthropic. I'm here to help with a wide variety of tasks, from research and analysis to creative projects and open-ended conversation. I have general knowledge and capabilities, but I'm not a real person - I'm an AI assistant. Please let me know if you have any other questions!\n"
|
||||
"I am an AI language model created by OpenAI, designed to assist with answering questions and providing information based on the context provided. How can I help you today?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -133,7 +133,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I am an artificial intelligence created by Anthropic. My purpose is to assist and converse with humans in a friendly and helpful way. I have a broad knowledge base that I can use to provide information, answer questions, and engage in discussions on a wide range of topics. Please let me know if you have any other questions - I'm here to help!\")"
|
||||
"AIMessage(content=\"I'm your friendly assistant! How can I help you today?\", response_metadata={'document_chunks': [{'repository_id': 1985, 'document_id': 1306, 'chunk_id': 173899, 'document_name': '[D] Difference between sparse and dense informati…', 'similarity_score': 0.3209080100059509, 'content': \"with the difference or anywhere\\nwhere I can read about it?\\n\\n\\n 17 9\\n\\n\\n u/ScotiabankCanada • Promoted\\n\\n\\n Accelerate your study permit process\\n with Scotiabank's Student GIC\\n Program. We're here to help you tur…\\n\\n\\n startright.scotiabank.com Learn More\\n\\n\\n Add a Comment\\n\\n\\nSort by: Best\\n\\n\\n DinosParkour • 1y ago\\n\\n\\n Dense Retrieval (DR) m\"}]}, id='run-510bbd0e-3f8f-4095-9b1f-c2d29fd89719-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -160,10 +160,18 @@
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/anindya/prem/langchain/libs/community/langchain_community/chat_models/premai.py:355: UserWarning: WARNING: Parameter top_p is not supported in kwargs.\n",
|
||||
" warnings.warn(f\"WARNING: Parameter {key} is not supported in kwargs.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I am an artificial intelligence created by Anthropic')"
|
||||
"AIMessage(content=\"Hello! I'm your friendly assistant. How can I\", response_metadata={'document_chunks': [{'repository_id': 1985, 'document_id': 1306, 'chunk_id': 173899, 'document_name': '[D] Difference between sparse and dense informati…', 'similarity_score': 0.3209080100059509, 'content': \"with the difference or anywhere\\nwhere I can read about it?\\n\\n\\n 17 9\\n\\n\\n u/ScotiabankCanada • Promoted\\n\\n\\n Accelerate your study permit process\\n with Scotiabank's Student GIC\\n Program. We're here to help you tur…\\n\\n\\n startright.scotiabank.com Learn More\\n\\n\\n Add a Comment\\n\\n\\nSort by: Best\\n\\n\\n DinosParkour • 1y ago\\n\\n\\n Dense Retrieval (DR) m\"}]}, id='run-c4b06b98-4161-4cca-8495-fd2fc98fa8f8-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -195,13 +203,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"what is the diameter of individual Galaxy\"\n",
|
||||
"query = \"Which models are used for dense retrieval\"\n",
|
||||
"repository_ids = [\n",
|
||||
" 1991,\n",
|
||||
" 1985,\n",
|
||||
"]\n",
|
||||
"repositories = dict(ids=repository_ids, similarity_threshold=0.3, limit=3)"
|
||||
]
|
||||
@@ -219,9 +227,34 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Dense retrieval models typically include:\n",
|
||||
"\n",
|
||||
"1. **BERT-based Models**: Such as DPR (Dense Passage Retrieval) which uses BERT for encoding queries and passages.\n",
|
||||
"2. **ColBERT**: A model that combines BERT with late interaction mechanisms.\n",
|
||||
"3. **ANCE (Approximate Nearest Neighbor Negative Contrastive Estimation)**: Uses BERT and focuses on efficient retrieval.\n",
|
||||
"4. **TCT-ColBERT**: A variant of ColBERT that uses a two-tower\n",
|
||||
"{\n",
|
||||
" \"document_chunks\": [\n",
|
||||
" {\n",
|
||||
" \"repository_id\": 1985,\n",
|
||||
" \"document_id\": 1306,\n",
|
||||
" \"chunk_id\": 173899,\n",
|
||||
" \"document_name\": \"[D] Difference between sparse and dense informati\\u2026\",\n",
|
||||
" \"similarity_score\": 0.3209080100059509,\n",
|
||||
" \"content\": \"with the difference or anywhere\\nwhere I can read about it?\\n\\n\\n 17 9\\n\\n\\n u/ScotiabankCanada \\u2022 Promoted\\n\\n\\n Accelerate your study permit process\\n with Scotiabank's Student GIC\\n Program. We're here to help you tur\\u2026\\n\\n\\n startright.scotiabank.com Learn More\\n\\n\\n Add a Comment\\n\\n\\nSort by: Best\\n\\n\\n DinosParkour \\u2022 1y ago\\n\\n\\n Dense Retrieval (DR) m\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
@@ -262,7 +295,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -288,7 +321,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template_id = \"78069ce8-xxxxx-xxxxx-xxxx-xxx\"\n",
|
||||
"response = chat.invoke([human_message], template_id=template_id)\n",
|
||||
"response = chat.invoke([human_messages], template_id=template_id)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
@@ -310,14 +343,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! As an AI language model, I don't have feelings or a physical state, but I'm functioning properly and ready to assist you with any questions or tasks you might have. How can I help you today?"
|
||||
"It looks like your message got cut off. If you need information about Dense Retrieval (DR) or any other topic, please provide more details or clarify your question."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -338,14 +371,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! As an AI language model, I don't have feelings or a physical form, but I'm functioning properly and ready to assist you. How can I help you today?"
|
||||
"Woof! 🐾 How can I help you today? Want to play fetch or maybe go for a walk 🐶🦴"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -365,6 +398,275 @@
|
||||
" sys.stdout.write(chunk.content)\n",
|
||||
" sys.stdout.flush()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Tool/Function Calling\n",
|
||||
"\n",
|
||||
"LangChain PremAI supports tool/function calling. Tool/function calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. \n",
|
||||
"\n",
|
||||
"- You can learn all about tool calling in details [in our documentation here](https://docs.premai.io/get-started/function-calling).\n",
|
||||
"- You can learn more about langchain tool calling in [this part of the docs](https://python.langchain.com/v0.1/docs/modules/model_io/chat/function_calling).\n",
|
||||
"\n",
|
||||
"**NOTE:**\n",
|
||||
"The current version of LangChain ChatPremAI do not support function/tool calling with streaming support. Streaming support along with function calling will come soon. \n",
|
||||
"\n",
|
||||
"#### Passing tools to model\n",
|
||||
"\n",
|
||||
"In order to pass tools and let the LLM choose the tool it needs to call, we need to pass a tool schema. A tool schema is the function definition along with proper docstring on what does the function do, what each argument of the function is etc. Below are some simple arithmetic functions with their schema. \n",
|
||||
"\n",
|
||||
"**NOTE:** When defining function/tool schema, do not forget to add information around the function arguments, otherwise it would throw error."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the schema for function arguments\n",
|
||||
"class OperationInput(BaseModel):\n",
|
||||
" a: int = Field(description=\"First number\")\n",
|
||||
" b: int = Field(description=\"Second number\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Now define the function where schema for argument will be OperationInput\n",
|
||||
"@tool(\"add\", args_schema=OperationInput, return_direct=True)\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(\"multiply\", args_schema=OperationInput, return_direct=True)\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a * b"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Binding tool schemas with our LLM\n",
|
||||
"\n",
|
||||
"We will now use the `bind_tools` method to convert our above functions to a \"tool\" and binding it with the model. This means we are going to pass these tool informations everytime we invoke the model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [add, multiply]\n",
|
||||
"llm_with_tools = chat.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After this, we get the response from the model which is now binded with the tools. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What is 3 * 12? Also, what is 11 + 49?\"\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, when our chat model is binded with tools, then based on the given prompt, it calls the correct set of the tools and sequentially. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'multiply',\n",
|
||||
" 'args': {'a': 3, 'b': 12},\n",
|
||||
" 'id': 'call_A9FL20u12lz6TpOLaiS6rFa8'},\n",
|
||||
" {'name': 'add',\n",
|
||||
" 'args': {'a': 11, 'b': 49},\n",
|
||||
" 'id': 'call_MPKYGLHbf39csJIyb5BZ9xIk'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We append this message shown above to the LLM which acts as a context and makes the LLM aware that what all functions it has called. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages.append(ai_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Since tool calling happens into two phases, where:\n",
|
||||
"\n",
|
||||
"1. in our first call, we gathered all the tools that the LLM decided to tool, so that it can get the result as an added context to give more accurate and hallucination free result. \n",
|
||||
"\n",
|
||||
"2. in our second call, we will parse those set of tools decided by LLM and run them (in our case it will be the functions we defined, with the LLM's extracted arguments) and pass this result to the LLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import ToolMessage\n",
|
||||
"\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, we call the LLM (binded with the tools) with the function response added in it's context. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The final answers are:\n",
|
||||
"\n",
|
||||
"- 3 * 12 = 36\n",
|
||||
"- 11 + 49 = 60\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = llm_with_tools.invoke(messages)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Defining tool schemas: Pydantic class\n",
|
||||
"\n",
|
||||
"Above we have shown how to define schema using `tool` decorator, however we can equivalently define the schema using Pydantic. Pydantic is useful when your tool inputs are more complex:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class add(BaseModel):\n",
|
||||
" \"\"\"Add two integers together.\"\"\"\n",
|
||||
"\n",
|
||||
" a: int = Field(..., description=\"First integer\")\n",
|
||||
" b: int = Field(..., description=\"Second integer\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class multiply(BaseModel):\n",
|
||||
" \"\"\"Multiply two integers together.\"\"\"\n",
|
||||
"\n",
|
||||
" a: int = Field(..., description=\"First integer\")\n",
|
||||
" b: int = Field(..., description=\"Second integer\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, we can bind them to chat models and directly get the result:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[multiply(a=3, b=12), add(a=11, b=49)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = llm_with_tools | PydanticToolsParser(tools=[multiply, add])\n",
|
||||
"chain.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, as done above, we parse this and run this functions and call the LLM once again to get the result."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -383,7 +685,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
"version": "3.9.19"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
484
docs/docs/integrations/document_loaders/dedoc.ipynb
Normal file
484
docs/docs/integrations/document_loaders/dedoc.ipynb
Normal file
@@ -0,0 +1,484 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b74f73d-1763-42d0-9c24-8f65f445bb72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Dedoc\n",
|
||||
"\n",
|
||||
"This sample demonstrates the use of `Dedoc` in combination with `LangChain` as a `DocumentLoader`.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"[Dedoc](https://dedoc.readthedocs.io) is an [open-source](https://github.com/ispras/dedoc)\n",
|
||||
"library/service that extracts texts, tables, attached files and document structure\n",
|
||||
"(e.g., titles, list items, etc.) from files of various formats.\n",
|
||||
"\n",
|
||||
"`Dedoc` supports `DOCX`, `XLSX`, `PPTX`, `EML`, `HTML`, `PDF`, images and more.\n",
|
||||
"Full list of supported formats can be found [here](https://dedoc.readthedocs.io/en/latest/#id1).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support |\n",
|
||||
"|:-----------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------|:-----:|:------------:|:----------:|\n",
|
||||
"| [DedocFileLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocFileLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ |\n",
|
||||
"| [DedocPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.DedocPDFLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ | \n",
|
||||
"| [DedocAPIFileLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocAPIFileLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ | \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Loader features\n",
|
||||
"\n",
|
||||
"Methods for lazy loading and async loading are available, but in fact, document loading is executed synchronously.\n",
|
||||
"\n",
|
||||
"| Source | Document Lazy Loading | Async Support |\n",
|
||||
"|:------------------:|:---------------------:|:-------------:| \n",
|
||||
"| DedocFileLoader | ❌ | ❌ |\n",
|
||||
"| DedocPDFLoader | ❌ | ❌ | \n",
|
||||
"| DedocAPIFileLoader | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"* To access `DedocFileLoader` and `DedocPDFLoader` document loaders, you'll need to install the `dedoc` integration package.\n",
|
||||
"* To access `DedocAPIFileLoader`, you'll need to run the `Dedoc` service, e.g. `Docker` container (please see [the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker) \n",
|
||||
"for more details):\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"docker pull dedocproject/dedoc\n",
|
||||
"docker run -p 1231:1231\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"`Dedoc` installation instruction is given [here](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "511c109d-a5c3-42ba-914e-5d1b385bc40f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Install package\n",
|
||||
"%pip install --quiet \"dedoc[torch]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6820c0e9-d56d-4899-b8c8-374760360e2b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c1f98cae-71ec-4d60-87fb-96c1a76851d8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocFileLoader\n",
|
||||
"\n",
|
||||
"loader = DedocFileLoader(\"./example_data/state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d7bc2b3-73a0-4cd6-8014-cc7184aa9d4a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "b9097c14-6168-4726-819e-24abb9a63b13",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and t'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0].page_content[:100]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ed8bd46-0047-4ccc-b2d6-beb7761f7312",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "6ae12d7e-8105-4bbe-9031-0e968475f6bf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and t\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.lazy_load()\n",
|
||||
"\n",
|
||||
"for doc in docs:\n",
|
||||
" print(doc.page_content[:100])\n",
|
||||
" break"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8772ae40-6239-4751-bb2d-b4a9415c1ad1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed information on configuring and calling `Dedoc` loaders, please see the API references: \n",
|
||||
"\n",
|
||||
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocFileLoader.html\n",
|
||||
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.DedocPDFLoader.html\n",
|
||||
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocAPIFileLoader.html"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c4d5e702-0e21-4cad-a4c3-b9b3bff77203",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading any file\n",
|
||||
"\n",
|
||||
"For automatic handling of any file in a [supported format](https://dedoc.readthedocs.io/en/latest/#id1),\n",
|
||||
"`DedocFileLoader` can be useful.\n",
|
||||
"The file loader automatically detects the file type with a correct extension.\n",
|
||||
"\n",
|
||||
"File parsing process can be configured through `dedoc_kwargs` during the `DedocFileLoader` class initialization.\n",
|
||||
"Here the basic examples of some options usage are given, \n",
|
||||
"please see the documentation of `DedocFileLoader` and \n",
|
||||
"[dedoc documentation](https://dedoc.readthedocs.io/en/latest/parameters/parameters.html) \n",
|
||||
"to get more details about configuration parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "de97d0ed-d6b1-44e0-b392-1f3d89c762f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Basic example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "50ffeeee-db12-4801-b208-7e32ea3d72ad",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\n\\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\n\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\n\\n\\nWith a duty to one another to the American people to '"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocFileLoader\n",
|
||||
"\n",
|
||||
"loader = DedocFileLoader(\"./example_data/state_of_the_union.txt\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "457e5d4c-a4ee-4f31-ae74-3f75a1bbd0af",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Modes of split\n",
|
||||
"\n",
|
||||
"`DedocFileLoader` supports different types of document splitting into parts (each part is returned separately).\n",
|
||||
"For this purpose, `split` parameter is used with the following options:\n",
|
||||
"* `document` (default value): document text is returned as a single langchain `Document` object (don't split);\n",
|
||||
"* `page`: split document text into pages (works for `PDF`, `DJVU`, `PPTX`, `PPT`, `ODP`);\n",
|
||||
"* `node`: split document text into `Dedoc` tree nodes (title nodes, list item nodes, raw text nodes);\n",
|
||||
"* `line`: split document text into textual lines."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "eec54d31-ae7a-4a3c-aa10-4ae276b1e4c4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"2"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DedocFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" split=\"page\",\n",
|
||||
" pages=\":2\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61e11769-4780-4f77-b10e-27db6936f226",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Handling tables\n",
|
||||
"\n",
|
||||
"`DedocFileLoader` supports tables handling when `with_tables` parameter is \n",
|
||||
"set to `True` during loader initialization (`with_tables=True` by default). \n",
|
||||
"\n",
|
||||
"Tables are not split - each table corresponds to one langchain `Document` object.\n",
|
||||
"For tables, `Document` object has additional `metadata` fields `type=\"table\"` \n",
|
||||
"and `text_as_html` with table `HTML` representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "bbeb2f8a-ac5e-4b59-8026-7ea3fc14c928",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"('table',\n",
|
||||
" '<table border=\"1\" style=\"border-collapse: collapse; width: 100%;\">\\n<tbody>\\n<tr>\\n<td colspan=\"1\" rowspan=\"1\">Team</td>\\n<td colspan=\"1\" rowspan=\"1\"> "Payroll (millions)"</td>\\n<td colspan=\"1\" r')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DedocFileLoader(\"./example_data/mlb_teams_2012.csv\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[1].metadata[\"type\"], docs[1].metadata[\"text_as_html\"][:200]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4a2b872-2aba-4e4c-8b2f-83a5a81ee1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Handling attached files\n",
|
||||
"\n",
|
||||
"`DedocFileLoader` supports attached files handling when `with_attachments` is set \n",
|
||||
"to `True` during loader initialization (`with_attachments=False` by default). \n",
|
||||
"\n",
|
||||
"Attachments are split according to the `split` parameter.\n",
|
||||
"For attachments, langchain `Document` object has an additional metadata \n",
|
||||
"field `type=\"attachment\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "bb9d6c1c-e24c-4979-88a0-38d54abd6332",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"('attachment',\n",
|
||||
" '\\nContent-Type\\nmultipart/mixed; boundary=\"0000000000005d654405f082adb7\"\\nDate\\nFri, 23 Dec 2022 12:08:48 -0600\\nFrom\\nMallori Harrell <mallori@unstructured.io>\\nMIME-Version\\n1.0\\nMessage-ID\\n<CAPgNNXSzLVJ-d1OCX_TjFgJU7ugtQrjFybPtAMmmYZzphxNFYg@mail.gmail.com>\\nSubject\\nFake email with attachment\\nTo\\nMallori Harrell <mallori@unstructured.io>')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = DedocFileLoader(\n",
|
||||
" \"./example_data/fake-email-attachment.eml\",\n",
|
||||
" with_attachments=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[1].metadata[\"type\"], docs[1].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d435c3f6-703a-4064-8307-ace140de967a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading PDF file\n",
|
||||
"\n",
|
||||
"If you want to handle only `PDF` documents, you can use `DedocPDFLoader` with only `PDF` support.\n",
|
||||
"The loader supports the same parameters for document split, tables and attachments extraction.\n",
|
||||
"\n",
|
||||
"`Dedoc` can extract `PDF` with or without a textual layer, \n",
|
||||
"as well as automatically detect its presence and correctness.\n",
|
||||
"Several `PDF` handlers are available, you can use `pdf_with_text_layer` \n",
|
||||
"parameter to choose one of them.\n",
|
||||
"Please see [parameters description](https://dedoc.readthedocs.io/en/latest/parameters/pdf_handling.html) \n",
|
||||
"to get more details.\n",
|
||||
"\n",
|
||||
"For `PDF` without a textual layer, `Tesseract OCR` and its language packages should be installed.\n",
|
||||
"In this case, [the instruction](https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html) can be useful."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0103a7f3-6b5e-4444-8f4d-83dd3724a9af",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n2\\n\\nZ. Shen et al.\\n\\n37], layout detection [38, 22], table detection [26], and scene text detection [4].\\n\\nA generalized learning-based framework dramatically reduces the need for the\\n\\nmanual specification of complicated rules, which is the status quo with traditional\\n\\nmethods. DL has the potential to transform DIA pipelines and benefit a broad\\n\\nspectrum of large-scale document digitization projects.\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocPDFLoader\n",
|
||||
"\n",
|
||||
"loader = DedocPDFLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", pdf_with_text_layer=\"true\", pages=\"2:2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13061995-1805-40c2-a77a-a6cd80999e20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dedoc API\n",
|
||||
"\n",
|
||||
"If you want to get up and running with less set up, you can use `Dedoc` as a service.\n",
|
||||
"**`DedocAPIFileLoader` can be used without installation of `dedoc` library.**\n",
|
||||
"The loader supports the same parameters as `DedocFileLoader` and\n",
|
||||
"also automatically detects input file types.\n",
|
||||
"\n",
|
||||
"To use `DedocAPIFileLoader`, you should run the `Dedoc` service, e.g. `Docker` container (please see [the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker) \n",
|
||||
"for more details):\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"docker pull dedocproject/dedoc\n",
|
||||
"docker run -p 1231:1231\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Please do not use our demo URL `https://dedoc-readme.hf.space` in your code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "211fc0b5-6080-4974-a6c1-f982bafd87d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\n\\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\n\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\n\\n\\nWith a duty to one another to the American people to '"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import DedocAPIFileLoader\n",
|
||||
"\n",
|
||||
"loader = DedocAPIFileLoader(\n",
|
||||
" \"./example_data/state_of_the_union.txt\",\n",
|
||||
" url=\"https://dedoc-readme.hf.space\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "faaff475-5209-436f-bcde-97d58daed05c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.19"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -162,7 +162,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib"
|
||||
"!poetry run pip install --upgrade langchain-openai tiktoken langchain-chroma hnswlib"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -211,7 +211,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embedding = OpenAIEmbeddings()\n",
|
||||
@@ -365,7 +365,7 @@
|
||||
"source": [
|
||||
"from langchain.chains.query_constructor.schema import AttributeInfo\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"\n",
|
||||
"EXCLUDE_KEYS = [\"id\", \"xpath\", \"structure\"]\n",
|
||||
"metadata_field_info = [\n",
|
||||
@@ -540,7 +540,7 @@
|
||||
"source": [
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
|
||||
@@ -316,7 +316,7 @@
|
||||
"id": "eb00a625-a6c9-4766-b3f0-eaed024851c9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return SQARQL query\n",
|
||||
"## Return SPARQL query\n",
|
||||
"You can return the SPARQL query step from the Sparql QA Chain using the `return_sparql_query` parameter"
|
||||
]
|
||||
},
|
||||
@@ -358,7 +358,7 @@
|
||||
"\u001b[32;1m\u001b[1;3m[]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"SQARQL query: PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n",
|
||||
"SPARQL query: PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n",
|
||||
"SELECT ?workHomepage\n",
|
||||
"WHERE {\n",
|
||||
" ?person foaf:name \"Tim Berners-Lee\" .\n",
|
||||
@@ -370,7 +370,7 @@
|
||||
],
|
||||
"source": [
|
||||
"result = chain(\"What is Tim Berners-Lee's work homepage?\")\n",
|
||||
"print(f\"SQARQL query: {result['sparql_query']}\")\n",
|
||||
"print(f\"SPARQL query: {result['sparql_query']}\")\n",
|
||||
"print(f\"Final answer: {result['result']}\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet transformers --quiet"
|
||||
"%pip install --upgrade --quiet transformers"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -194,12 +194,37 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e4a1e0f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For certain requirements, there is an option to pass the IBM's [`APIClient`](https://ibm.github.io/watsonx-ai-python-sdk/base.html#apiclient) object into the `WatsonxLLM` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4b28afc1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from ibm_watsonx_ai import APIClient\n",
|
||||
"\n",
|
||||
"api_client = APIClient(...)\n",
|
||||
"\n",
|
||||
"watsonx_llm = WatsonxLLM(\n",
|
||||
" model_id=\"ibm/granite-13b-instruct-v2\",\n",
|
||||
" watsonx_client=api_client,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7c4a632b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also pass the IBM's [`ModelInference`](https://ibm.github.io/watsonx-ai-python-sdk/fm_model_inference.html) object into `WatsonxLLM` class."
|
||||
"You can also pass the IBM's [`ModelInference`](https://ibm.github.io/watsonx-ai-python-sdk/fm_model_inference.html) object into the `WatsonxLLM` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,10 +1,21 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Ollama\n",
|
||||
"---\n",
|
||||
"sidebar_label: Ollama\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OllamaLLM\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of Ollama models as [text completion models](/docs/concepts/#llms). Many popular Ollama models are [chat completion models](/docs/concepts/#chat-models).\n",
|
||||
@@ -12,21 +23,35 @@
|
||||
"You may be looking for [this page instead](/docs/integrations/chat/ollama/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally.\n",
|
||||
"\n",
|
||||
"Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. \n",
|
||||
"\n",
|
||||
"It optimizes setup and configuration details, including GPU usage.\n",
|
||||
"\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/ollama/ollama#model-library).\n",
|
||||
"This page goes over how to use LangChain to interact with `Ollama` models.\n",
|
||||
"\n",
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# install package\n",
|
||||
"%pip install -U langchain-ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance:\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
"\n",
|
||||
"* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n",
|
||||
"* Fetch available LLM model via `ollama pull <name-of-model>`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library) and pull to use locally with the command `ollama pull llama3`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library)\n",
|
||||
" * e.g., `ollama pull llama3`\n",
|
||||
"* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n",
|
||||
"\n",
|
||||
"> On Mac, the models will be download to `~/.ollama/models`\n",
|
||||
@@ -34,194 +59,67 @@
|
||||
"> On Linux (or WSL), the models will be stored at `/usr/share/ollama/.ollama/models`\n",
|
||||
"\n",
|
||||
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
|
||||
"* To view all pulled models on your local instance, use `ollama list`\n",
|
||||
"* To view all pulled models, use `ollama list`\n",
|
||||
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
|
||||
"* View the [Ollama documentation](https://github.com/ollama/ollama) for more commands. \n",
|
||||
"* Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"\n",
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html).\n",
|
||||
"\n",
|
||||
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` [interface](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/).\n",
|
||||
"\n",
|
||||
"This includes [special tokens](https://ollama.com/library/llama3) for system message and user input.\n",
|
||||
"\n",
|
||||
"## Interacting with Models \n",
|
||||
"\n",
|
||||
"Here are a few ways to interact with pulled local models\n",
|
||||
"\n",
|
||||
"#### In the terminal:\n",
|
||||
"\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
|
||||
"\n",
|
||||
"#### Via the API\n",
|
||||
"\n",
|
||||
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"curl http://localhost:11434/api/generate -d '{\n",
|
||||
" \"model\": \"llama3\",\n",
|
||||
" \"prompt\":\"Why is the sky blue?\"\n",
|
||||
"}'\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"See the Ollama [API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) for all endpoints.\n",
|
||||
"\n",
|
||||
"#### via LangChain\n",
|
||||
"\n",
|
||||
"See a typical basic example of using [Ollama chat model](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/) in your LangChain application."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Here's one:\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\\n\\nHope that made you smile! Do you want to hear another one?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"llm = Ollama(\n",
|
||||
" model=\"llama3\"\n",
|
||||
") # assuming you have Ollama installed and have llama3 model pulled with `ollama pull llama3 `\n",
|
||||
"\n",
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To stream tokens, use the `.stream(...)` method:"
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"S\n",
|
||||
"ure\n",
|
||||
",\n",
|
||||
" here\n",
|
||||
"'\n",
|
||||
"s\n",
|
||||
" one\n",
|
||||
":\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Why\n",
|
||||
" don\n",
|
||||
"'\n",
|
||||
"t\n",
|
||||
" scient\n",
|
||||
"ists\n",
|
||||
" trust\n",
|
||||
" atoms\n",
|
||||
"?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"B\n",
|
||||
"ecause\n",
|
||||
" they\n",
|
||||
" make\n",
|
||||
" up\n",
|
||||
" everything\n",
|
||||
"!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"I\n",
|
||||
" hope\n",
|
||||
" you\n",
|
||||
" found\n",
|
||||
" that\n",
|
||||
" am\n",
|
||||
"using\n",
|
||||
"!\n",
|
||||
" Do\n",
|
||||
" you\n",
|
||||
" want\n",
|
||||
" to\n",
|
||||
" hear\n",
|
||||
" another\n",
|
||||
" one\n",
|
||||
"?\n",
|
||||
"\n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'A great start!\\n\\nLangChain is a type of AI model that uses language processing techniques to generate human-like text based on input prompts or chains of reasoning. In other words, it can have a conversation with humans, understanding the context and responding accordingly.\\n\\nHere\\'s a possible breakdown:\\n\\n* \"Lang\" likely refers to its focus on natural language processing (NLP) and linguistic analysis.\\n* \"Chain\" suggests that LangChain is designed to generate text in response to a series of connected ideas or prompts, rather than simply generating random text.\\n\\nSo, what do you think LangChain\\'s capabilities might be?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"Tell me a joke\"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_ollama.llms import OllamaLLM\n",
|
||||
"\n",
|
||||
"for chunks in llm.stream(query):\n",
|
||||
" print(chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To learn more about the LangChain Expressive Language and the available methods on an LLM, see the [LCEL Interface](/docs/concepts#interface)"
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"model = OllamaLLM(model=\"llama3\")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"\n",
|
||||
"chain.invoke({\"question\": \"What is LangChain?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2d85456",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal\n",
|
||||
"\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n",
|
||||
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.com/library/bakllava) and [llava](https://ollama.com/library/llava).\n",
|
||||
"\n",
|
||||
"`ollama pull bakllava`\n",
|
||||
" ollama pull bakllava\n",
|
||||
"\n",
|
||||
"Be sure to update Ollama so that you have the most recent version to support multi-modal."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"bakllava = Ollama(model=\"bakllava\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4043e202",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -279,7 +177,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 4,
|
||||
"id": "79aaf863",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -288,38 +187,24 @@
|
||||
"'90%'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_image_context = bakllava.bind(images=[image_b64])\n",
|
||||
"from langchain_ollama import OllamaLLM\n",
|
||||
"\n",
|
||||
"llm = OllamaLLM(model=\"bakllava\")\n",
|
||||
"\n",
|
||||
"llm_with_image_context = llm.bind(images=[image_b64])\n",
|
||||
"llm_with_image_context.invoke(\"What is the dollar based gross retention rate:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Concurrency Features\n",
|
||||
"\n",
|
||||
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
|
||||
"\n",
|
||||
"Start the Ollama server with:\n",
|
||||
"\n",
|
||||
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
|
||||
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
|
||||
"\n",
|
||||
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
|
||||
"\n",
|
||||
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -333,9 +218,14 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.8"
|
||||
"version": "3.12.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -88,6 +88,7 @@
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
@@ -116,6 +117,7 @@
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
@@ -175,9 +177,7 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"sambastudio_base_url = \"<Your SambaStudio environment URL>\"\n",
|
||||
"sambastudio_base_uri = (\n",
|
||||
" \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/nlp\" set as default\n",
|
||||
")\n",
|
||||
"sambastudio_base_uri = \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/generic\" set as default\n",
|
||||
"sambastudio_project_id = \"<Your SambaStudio project id>\"\n",
|
||||
"sambastudio_endpoint_id = \"<Your SambaStudio endpoint id>\"\n",
|
||||
"sambastudio_api_key = \"<Your SambaStudio endpoint API key>\"\n",
|
||||
@@ -271,6 +271,7 @@
|
||||
" \"do_sample\": True,\n",
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" \"select_expert\": \"Meta-Llama-3-8B-Instruct\",\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
|
||||
@@ -0,0 +1,325 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a283d2fd-e26e-4811-a486-d3cf0ecf6749",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Couchbase\n",
|
||||
"> Couchbase is an award-winning distributed NoSQL cloud database that delivers unmatched versatility, performance, scalability, and financial value for all of your cloud, mobile, AI, and edge computing applications. Couchbase embraces AI with coding assistance for developers and vector search for their applications.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use the `CouchbaseChatMessageHistory` class to store the chat message history in a Couchbase cluster\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff868a6c-3e17-4c3d-8d32-67b01f4d7bcc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set Up Couchbase Cluster\n",
|
||||
"To run this demo, you need a Couchbase Cluster. \n",
|
||||
"\n",
|
||||
"You can work with both [Couchbase Capella](https://www.couchbase.com/products/capella/) and your self-managed Couchbase Server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41fa85e7-6968-45e4-a445-de305d80f332",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install Dependencies\n",
|
||||
"`CouchbaseChatMessageHistory` lives inside the `langchain-couchbase` package. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b744ca05-b8c6-458c-91df-f50ca2c20b3c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-couchbase"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41f29205-6452-493b-ba18-8a3b006bcca4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Couchbase Connection Object\n",
|
||||
"We create a connection to the Couchbase cluster initially and then pass the cluster object to the Vector Store. \n",
|
||||
"\n",
|
||||
"Here, we are connecting using the username and password. You can also connect using any other supported way to your cluster. \n",
|
||||
"\n",
|
||||
"For more information on connecting to the Couchbase cluster, please check the [Python SDK documentation](https://docs.couchbase.com/python-sdk/current/hello-world/start-using-sdk.html#connect)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f394908e-f5fe-408a-84d7-b97fdebcfa26",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"COUCHBASE_CONNECTION_STRING = (\n",
|
||||
" \"couchbase://localhost\" # or \"couchbases://localhost\" if using TLS\n",
|
||||
")\n",
|
||||
"DB_USERNAME = \"Administrator\"\n",
|
||||
"DB_PASSWORD = \"Password\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ad4dce21-d80c-465a-b709-fd366ba5ce35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import timedelta\n",
|
||||
"\n",
|
||||
"from couchbase.auth import PasswordAuthenticator\n",
|
||||
"from couchbase.cluster import Cluster\n",
|
||||
"from couchbase.options import ClusterOptions\n",
|
||||
"\n",
|
||||
"auth = PasswordAuthenticator(DB_USERNAME, DB_PASSWORD)\n",
|
||||
"options = ClusterOptions(auth)\n",
|
||||
"cluster = Cluster(COUCHBASE_CONNECTION_STRING, options)\n",
|
||||
"\n",
|
||||
"# Wait until the cluster is ready for use.\n",
|
||||
"cluster.wait_until_ready(timedelta(seconds=5))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3d0210c-e2e6-437a-86f3-7397a1899fef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will now set the bucket, scope, and collection names in the Couchbase cluster that we want to use for storing the message history.\n",
|
||||
"\n",
|
||||
"Note that the bucket, scope, and collection need to exist before using them to store the message history."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e8c7f846-a5c4-4465-a40e-4a9a23ac71bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"BUCKET_NAME = \"langchain-testing\"\n",
|
||||
"SCOPE_NAME = \"_default\"\n",
|
||||
"COLLECTION_NAME = \"conversational_cache\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "283959e1-6af7-4768-9211-5b0facc6ef65",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"In order to store the messages, you need the following:\n",
|
||||
"- Couchbase Cluster object: Valid connection to the Couchbase cluster\n",
|
||||
"- bucket_name: Bucket in cluster to store the chat message history\n",
|
||||
"- scope_name: Scope in bucket to store the message history\n",
|
||||
"- collection_name: Collection in scope to store the message history\n",
|
||||
"- session_id: Unique identifier for the session\n",
|
||||
"\n",
|
||||
"Optionally you can configure the following:\n",
|
||||
"- session_id_key: Field in the chat message documents to store the `session_id`\n",
|
||||
"- message_key: Field in the chat message documents to store the message content\n",
|
||||
"- create_index: Used to specify if the index needs to be created on the collection. By default, an index is created on the `message_key` and the `session_id_key` of the documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "43c3b2d5-aae2-44a9-9e9f-f10adf054cfa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_couchbase.chat_message_histories import CouchbaseChatMessageHistory\n",
|
||||
"\n",
|
||||
"message_history = CouchbaseChatMessageHistory(\n",
|
||||
" cluster=cluster,\n",
|
||||
" bucket_name=BUCKET_NAME,\n",
|
||||
" scope_name=SCOPE_NAME,\n",
|
||||
" collection_name=COLLECTION_NAME,\n",
|
||||
" session_id=\"test-session\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"message_history.add_user_message(\"hi!\")\n",
|
||||
"\n",
|
||||
"message_history.add_ai_message(\"how are you doing?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e7e348ef-79e9-481c-aeef-969ae03dea6a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi!'), AIMessage(content='how are you doing?')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message_history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8b942a7-93fa-4cd9-8414-d047135c2733",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"The chat message history class can be used with [LCEL Runnables](https://python.langchain.com/v0.2/docs/how_to/message_history/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8a9f0d91-d1d6-481d-8137-ea11229f485a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "946d45aa-5a61-49ae-816b-1c3949c56d9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a helpful assistant.\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create the LCEL runnable\n",
|
||||
"chain = prompt | ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "20dfd838-b549-42ed-b3ba-ac005f7e024c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_with_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: CouchbaseChatMessageHistory(\n",
|
||||
" cluster=cluster,\n",
|
||||
" bucket_name=BUCKET_NAME,\n",
|
||||
" scope_name=SCOPE_NAME,\n",
|
||||
" collection_name=COLLECTION_NAME,\n",
|
||||
" session_id=session_id,\n",
|
||||
" ),\n",
|
||||
" input_messages_key=\"question\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "17bd09f4-896d-433d-bb9a-369a06e7aa8a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is where we configure the session id\n",
|
||||
"config = {\"configurable\": {\"session_id\": \"testing\"}}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "4bda1096-2fc2-40d7-a046-0d5d8e3a8f75",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 22, 'total_tokens': 32}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-a0f8a29e-ddf4-4e06-a1fe-cf8c325a2b72-0', usage_metadata={'input_tokens': 22, 'output_tokens': 10, 'total_tokens': 32})"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke({\"question\": \"Hi! I'm bob\"}, config=config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "1cfb31da-51bb-4c5f-909a-b7118b0ae08d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Bob.', response_metadata={'token_usage': {'completion_tokens': 5, 'prompt_tokens': 43, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f764a9eb-999e-4042-96b6-fe47b7ae4779-0', usage_metadata={'input_tokens': 43, 'output_tokens': 5, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke({\"question\": \"Whats my name\"}, config=config)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -89,3 +89,23 @@ set_llm_cache(
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Chat Message History
|
||||
Use Couchbase as the storage for your chat messages.
|
||||
|
||||
See a [usage example](/docs/integrations/memory/couchbase_chat_message_history).
|
||||
|
||||
To use the chat message history in your applications:
|
||||
```python
|
||||
from langchain_couchbase.chat_message_histories import CouchbaseChatMessageHistory
|
||||
|
||||
message_history = CouchbaseChatMessageHistory(
|
||||
cluster=cluster,
|
||||
bucket_name=BUCKET_NAME,
|
||||
scope_name=SCOPE_NAME,
|
||||
collection_name=COLLECTION_NAME,
|
||||
session_id="test-session",
|
||||
)
|
||||
|
||||
message_history.add_user_message("hi!")
|
||||
```
|
||||
56
docs/docs/integrations/providers/dedoc.mdx
Normal file
56
docs/docs/integrations/providers/dedoc.mdx
Normal file
@@ -0,0 +1,56 @@
|
||||
# Dedoc
|
||||
|
||||
>[Dedoc](https://dedoc.readthedocs.io) is an [open-source](https://github.com/ispras/dedoc)
|
||||
library/service that extracts texts, tables, attached files and document structure
|
||||
(e.g., titles, list items, etc.) from files of various formats.
|
||||
|
||||
`Dedoc` supports `DOCX`, `XLSX`, `PPTX`, `EML`, `HTML`, `PDF`, images and more.
|
||||
Full list of supported formats can be found [here](https://dedoc.readthedocs.io/en/latest/#id1).
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
### Dedoc library
|
||||
|
||||
You can install `Dedoc` using `pip`.
|
||||
In this case, you will need to install dependencies,
|
||||
please go [here](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html)
|
||||
to get more information.
|
||||
|
||||
```bash
|
||||
pip install dedoc
|
||||
```
|
||||
|
||||
### Dedoc API
|
||||
|
||||
If you are going to use `Dedoc` API, you don't need to install `dedoc` library.
|
||||
In this case, you should run the `Dedoc` service, e.g. `Docker` container (please see
|
||||
[the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker)
|
||||
for more details):
|
||||
|
||||
```bash
|
||||
docker pull dedocproject/dedoc
|
||||
docker run -p 1231:1231
|
||||
```
|
||||
|
||||
## Document Loader
|
||||
|
||||
* For handling files of any formats (supported by `Dedoc`), you can use `DedocFileLoader`:
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import DedocFileLoader
|
||||
```
|
||||
|
||||
* For handling PDF files (with or without a textual layer), you can use `DedocPDFLoader`:
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import DedocPDFLoader
|
||||
```
|
||||
|
||||
* For handling files of any formats without library installation,
|
||||
you can use `Dedoc API` with `DedocAPIFileLoader`:
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import DedocAPIFileLoader
|
||||
```
|
||||
|
||||
Please see a [usage example](/docs/integrations/document_loaders/dedoc) for more details.
|
||||
@@ -38,7 +38,7 @@ import getpass
|
||||
if "PREMAI_API_KEY" not in os.environ:
|
||||
os.environ["PREMAI_API_KEY"] = getpass.getpass("PremAI API Key:")
|
||||
|
||||
chat = ChatPremAI(project_id=8)
|
||||
chat = ChatPremAI(project_id=1234, model_name="gpt-4o")
|
||||
```
|
||||
|
||||
### Chat Completions
|
||||
@@ -50,7 +50,8 @@ The first one will give us a static result. Whereas the second one will stream t
|
||||
```python
|
||||
human_message = HumanMessage(content="Who are you?")
|
||||
|
||||
chat.invoke([human_message])
|
||||
response = chat.invoke([human_message])
|
||||
print(response.content)
|
||||
```
|
||||
|
||||
You can provide system prompt here like this:
|
||||
@@ -84,8 +85,8 @@ Repositories are also supported in langchain premai. Here is how you can do it.
|
||||
|
||||
```python
|
||||
|
||||
query = "what is the diameter of individual Galaxy"
|
||||
repository_ids = [1991, ]
|
||||
query = "Which models are used for dense retrieval"
|
||||
repository_ids = [1985,]
|
||||
repositories = dict(
|
||||
ids=repository_ids,
|
||||
similarity_threshold=0.3,
|
||||
@@ -100,6 +101,8 @@ First we start by defining our repository with some repository ids. Make sure th
|
||||
Now, we connect the repository with our chat object to invoke RAG based generations.
|
||||
|
||||
```python
|
||||
import json
|
||||
|
||||
response = chat.invoke(query, max_tokens=100, repositories=repositories)
|
||||
|
||||
print(response.content)
|
||||
@@ -109,25 +112,22 @@ print(json.dumps(response.response_metadata, indent=4))
|
||||
This is how an output looks like.
|
||||
|
||||
```bash
|
||||
The diameters of individual galaxies range from 80,000-150,000 light-years.
|
||||
Dense retrieval models typically include:
|
||||
|
||||
1. **BERT-based Models**: Such as DPR (Dense Passage Retrieval) which uses BERT for encoding queries and passages.
|
||||
2. **ColBERT**: A model that combines BERT with late interaction mechanisms.
|
||||
3. **ANCE (Approximate Nearest Neighbor Negative Contrastive Estimation)**: Uses BERT and focuses on efficient retrieval.
|
||||
4. **TCT-ColBERT**: A variant of ColBERT that uses a two-tower
|
||||
{
|
||||
"document_chunks": [
|
||||
{
|
||||
"repository_id": 19xx,
|
||||
"document_id": 13xx,
|
||||
"chunk_id": 173xxx,
|
||||
"document_name": "Kegy 202 Chapter 2",
|
||||
"similarity_score": 0.586126983165741,
|
||||
"content": "n thousands\n of light-years. The diameters of individual\n galaxies range from 80,000-150,000 light\n "
|
||||
},
|
||||
{
|
||||
"repository_id": 19xx,
|
||||
"document_id": 13xx,
|
||||
"chunk_id": 173xxx,
|
||||
"document_name": "Kegy 202 Chapter 2",
|
||||
"similarity_score": 0.4815782308578491,
|
||||
"content": " for development of galaxies. A galaxy contains\n a large number of stars. Galaxies spread over\n vast distances that are measured in thousands\n "
|
||||
},
|
||||
"repository_id": 1985,
|
||||
"document_id": 1306,
|
||||
"chunk_id": 173899,
|
||||
"document_name": "[D] Difference between sparse and dense informati\u2026",
|
||||
"similarity_score": 0.3209080100059509,
|
||||
"content": "with the difference or anywhere\nwhere I can read about it?\n\n\n 17 9\n\n\n u/ScotiabankCanada \u2022 Promoted\n\n\n Accelerate your study permit process\n with Scotiabank's Student GIC\n Program. We're here to help you tur\u2026\n\n\n startright.scotiabank.com Learn More\n\n\n Add a Comment\n\n\nSort by: Best\n\n\n DinosParkour \u2022 1y ago\n\n\n Dense Retrieval (DR) m"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -264,4 +264,164 @@ doc_result[:5]
|
||||
0.0008162345038726926,
|
||||
-0.004556538071483374,
|
||||
0.02918623760342598,
|
||||
-0.02547479420900345]
|
||||
-0.02547479420900345]
|
||||
|
||||
## Tool/Function Calling
|
||||
|
||||
LangChain PremAI supports tool/function calling. Tool/function calling allows a model to respond to a given prompt by generating output that matches a user-defined schema.
|
||||
|
||||
- You can learn all about tool calling in details [in our documentation here](https://docs.premai.io/get-started/function-calling).
|
||||
- You can learn more about langchain tool calling in [this part of the docs](https://python.langchain.com/v0.1/docs/modules/model_io/chat/function_calling).
|
||||
|
||||
**NOTE:**
|
||||
|
||||
> The current version of LangChain ChatPremAI do not support function/tool calling with streaming support. Streaming support along with function calling will come soon.
|
||||
|
||||
### Passing tools to model
|
||||
|
||||
In order to pass tools and let the LLM choose the tool it needs to call, we need to pass a tool schema. A tool schema is the function definition along with proper docstring on what does the function do, what each argument of the function is etc. Below are some simple arithmetic functions with their schema.
|
||||
|
||||
**NOTE:**
|
||||
> When defining function/tool schema, do not forget to add information around the function arguments, otherwise it would throw error.
|
||||
|
||||
```python
|
||||
from langchain_core.tools import tool
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
# Define the schema for function arguments
|
||||
class OperationInput(BaseModel):
|
||||
a: int = Field(description="First number")
|
||||
b: int = Field(description="Second number")
|
||||
|
||||
|
||||
# Now define the function where schema for argument will be OperationInput
|
||||
@tool("add", args_schema=OperationInput, return_direct=True)
|
||||
def add(a: int, b: int) -> int:
|
||||
"""Adds a and b.
|
||||
|
||||
Args:
|
||||
a: first int
|
||||
b: second int
|
||||
"""
|
||||
return a + b
|
||||
|
||||
|
||||
@tool("multiply", args_schema=OperationInput, return_direct=True)
|
||||
def multiply(a: int, b: int) -> int:
|
||||
"""Multiplies a and b.
|
||||
|
||||
Args:
|
||||
a: first int
|
||||
b: second int
|
||||
"""
|
||||
return a * b
|
||||
```
|
||||
|
||||
### Binding tool schemas with our LLM
|
||||
|
||||
We will now use the `bind_tools` method to convert our above functions to a "tool" and binding it with the model. This means we are going to pass these tool informations everytime we invoke the model.
|
||||
|
||||
```python
|
||||
tools = [add, multiply]
|
||||
llm_with_tools = chat.bind_tools(tools)
|
||||
```
|
||||
|
||||
After this, we get the response from the model which is now binded with the tools.
|
||||
|
||||
```python
|
||||
query = "What is 3 * 12? Also, what is 11 + 49?"
|
||||
|
||||
messages = [HumanMessage(query)]
|
||||
ai_msg = llm_with_tools.invoke(messages)
|
||||
```
|
||||
|
||||
As we can see, when our chat model is binded with tools, then based on the given prompt, it calls the correct set of the tools and sequentially.
|
||||
|
||||
```python
|
||||
ai_msg.tool_calls
|
||||
```
|
||||
**Output**
|
||||
|
||||
```python
|
||||
[{'name': 'multiply',
|
||||
'args': {'a': 3, 'b': 12},
|
||||
'id': 'call_A9FL20u12lz6TpOLaiS6rFa8'},
|
||||
{'name': 'add',
|
||||
'args': {'a': 11, 'b': 49},
|
||||
'id': 'call_MPKYGLHbf39csJIyb5BZ9xIk'}]
|
||||
```
|
||||
|
||||
We append this message shown above to the LLM which acts as a context and makes the LLM aware that what all functions it has called.
|
||||
|
||||
```python
|
||||
messages.append(ai_msg)
|
||||
```
|
||||
|
||||
Since tool calling happens into two phases, where:
|
||||
|
||||
1. in our first call, we gathered all the tools that the LLM decided to tool, so that it can get the result as an added context to give more accurate and hallucination free result.
|
||||
|
||||
2. in our second call, we will parse those set of tools decided by LLM and run them (in our case it will be the functions we defined, with the LLM's extracted arguments) and pass this result to the LLM
|
||||
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
for tool_call in ai_msg.tool_calls:
|
||||
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
|
||||
tool_output = selected_tool.invoke(tool_call["args"])
|
||||
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))
|
||||
```
|
||||
|
||||
Finally, we call the LLM (binded with the tools) with the function response added in it's context.
|
||||
|
||||
```python
|
||||
response = llm_with_tools.invoke(messages)
|
||||
print(response.content)
|
||||
```
|
||||
**Output**
|
||||
|
||||
```txt
|
||||
The final answers are:
|
||||
|
||||
- 3 * 12 = 36
|
||||
- 11 + 49 = 60
|
||||
```
|
||||
|
||||
### Defining tool schemas: Pydantic class `Optional`
|
||||
|
||||
Above we have shown how to define schema using `tool` decorator, however we can equivalently define the schema using Pydantic. Pydantic is useful when your tool inputs are more complex:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
|
||||
|
||||
class add(BaseModel):
|
||||
"""Add two integers together."""
|
||||
|
||||
a: int = Field(..., description="First integer")
|
||||
b: int = Field(..., description="Second integer")
|
||||
|
||||
|
||||
class multiply(BaseModel):
|
||||
"""Multiply two integers together."""
|
||||
|
||||
a: int = Field(..., description="First integer")
|
||||
b: int = Field(..., description="Second integer")
|
||||
|
||||
|
||||
tools = [add, multiply]
|
||||
```
|
||||
|
||||
Now, we can bind them to chat models and directly get the result:
|
||||
|
||||
```python
|
||||
chain = llm_with_tools | PydanticToolsParser(tools=[multiply, add])
|
||||
chain.invoke(query)
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
```txt
|
||||
[multiply(a=3, b=12), add(a=11, b=49)]
|
||||
```
|
||||
|
||||
Now, as done above, we parse this and run this functions and call the LLM once again to get the result.
|
||||
135
docs/docs/integrations/retrievers/nanopq.ipynb
Normal file
135
docs/docs/integrations/retrievers/nanopq.ipynb
Normal file
@@ -0,0 +1,135 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "661d5123-8ed2-4504-a846-7df0984e79f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# NanoPQ (Product Quantization)\n",
|
||||
"\n",
|
||||
">[Product Quantization algorithm (k-NN)](https://towardsdatascience.com/similarity-search-product-quantization-b2a1a6397701) in brief is a quantization algorithm that helps in compression of database vectors which helps in semantic search when large datasets are involved. In a nutshell, the embedding is split into M subspaces which further goes through clustering. Upon clustering the vectors the centroid vector gets mapped to the vectors present in the each of the clusters of the subspace. \n",
|
||||
"\n",
|
||||
"This notebook goes over how to use a retriever that under the hood uses a Product Quantization which has been implemented by the [nanopq](https://github.com/matsui528/nanopq) package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "68794637-c13b-4145-944f-3b0c2f1258f9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community langchain-openai nanopq"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "39ecbf50-4623-4ee6-9c8e-fea5da21767e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings\n",
|
||||
"from langchain_community.retrievers import NanoPQRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c1ce742a-5085-408a-a2c2-4bae0f605880",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create New Retriever with Texts"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "6c80020e-bc9e-49e8-8f93-5f75fd823738",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = NanoPQRetriever.from_texts(\n",
|
||||
" [\"Great world\", \"great words\", \"world\", \"planets of the world\"],\n",
|
||||
" SpacyEmbeddings(model_name=\"en_core_web_sm\"),\n",
|
||||
" clusters=2,\n",
|
||||
" subspace=2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "743c26c1-0072-4e46-b41b-c28b3f1737c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use Retriever\n",
|
||||
"\n",
|
||||
"We can now use the retriever!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "f496de2d-9b8f-4f8b-a30f-279ef199259a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"M: 2, Ks: 2, metric : <class 'numpy.uint8'>, code_dtype: l2\n",
|
||||
"iter: 20, seed: 123\n",
|
||||
"Training the subspace: 0 / 2\n",
|
||||
"Training the subspace: 1 / 2\n",
|
||||
"Encoding the subspace: 0 / 2\n",
|
||||
"Encoding the subspace: 1 / 2\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='world'),\n",
|
||||
" Document(page_content='Great world'),\n",
|
||||
" Document(page_content='great words'),\n",
|
||||
" Document(page_content='planets of the world')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.invoke(\"earth\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "617202a7-e3a6-49a8-b807-4b4d771159d5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -156,6 +156,29 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For certain requirements, there is an option to pass the IBM's [`APIClient`](https://ibm.github.io/watsonx-ai-python-sdk/base.html#apiclient) object into the `WatsonxEmbeddings` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from ibm_watsonx_ai import APIClient\n",
|
||||
"\n",
|
||||
"api_client = APIClient(...)\n",
|
||||
"\n",
|
||||
"watsonx_llm = WatsonxEmbeddings(\n",
|
||||
" model_id=\"ibm/slate-125m-english-rtrvr\",\n",
|
||||
" watsonx_client=api_client,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
155
docs/docs/integrations/text_embedding/pinecone.ipynb
Normal file
155
docs/docs/integrations/text_embedding/pinecone.ipynb
Normal file
@@ -0,0 +1,155 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Pinecone Embeddings\n",
|
||||
"\n",
|
||||
"Pinecone's inference API can be accessed via `PineconeEmbeddings`. Providing text embeddings via the Pinecone service. We start by installing prerequisite libraries:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "f4b5d823fee826c2"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install -qU \"langchain-pinecone>=0.2.0\" "
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "3bc5d3a5ed7f5ce3",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Next, we [sign up / log in to Pinecone](https://app.pinecone.io) to get our API key:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "62a77d25c3fd8bd5"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"PINECONE_API_KEY\"] = os.getenv(\"PINECONE_API_KEY\") or getpass(\n",
|
||||
" \"Enter your Pinecone API key: \"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "8162dbcbcf7d3d55",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Check the document for available [models](https://docs.pinecone.io/models/overview). Now we initialize our embedding model like so:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "98d860a0a2d8b907"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_pinecone import PineconeEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = PineconeEmbeddings(model=\"multilingual-e5-large\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "2b3adb72786a5275",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"From here we can create embeddings either sync or async, let's start with sync! We embed a single text as a query embedding (ie what we search with in RAG) using `embed_query`:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "11e24da855517230"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = [\n",
|
||||
" \"Apple is a popular fruit known for its sweetness and crisp texture.\",\n",
|
||||
" \"The tech company Apple is known for its innovative products like the iPhone.\",\n",
|
||||
" \"Many people enjoy eating apples as a healthy snack.\",\n",
|
||||
" \"Apple Inc. has revolutionized the tech industry with its sleek designs and user-friendly interfaces.\",\n",
|
||||
" \"An apple a day keeps the doctor away, as the saying goes.\",\n",
|
||||
"]"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "2da515e2a61ef7e9",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_embeds = embeddings.embed_documents(docs)\n",
|
||||
"doc_embeds"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "2897e0d570c90b2f",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"Tell me about the tech company known as Apple\"\n",
|
||||
"query_embed = embeddings.embed_query(query)\n",
|
||||
"query_embed"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "510784963c0e17a",
|
||||
"execution_count": null
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -101,7 +101,7 @@
|
||||
" sambastudio_embeddings_project_id=sambastudio_project_id,\n",
|
||||
" sambastudio_embeddings_endpoint_id=sambastudio_endpoint_id,\n",
|
||||
" sambastudio_embeddings_api_key=sambastudio_api_key,\n",
|
||||
" batch_size=32,\n",
|
||||
" batch_size=32, # set depending on the deployed endpoint configuration\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -8,87 +8,68 @@
|
||||
"# Sentence Transformers on Hugging Face\n",
|
||||
"\n",
|
||||
">[Hugging Face sentence-transformers](https://huggingface.co/sentence-transformers) is a Python framework for state-of-the-art sentence, text and image embeddings.\n",
|
||||
">One of the embedding models is used in the `HuggingFaceEmbeddings` class.\n",
|
||||
">We have also added an alias for `SentenceTransformerEmbeddings` for users who are more familiar with directly using that package.\n",
|
||||
"\n",
|
||||
"`sentence_transformers` package models are originating from [Sentence-BERT](https://arxiv.org/abs/1908.10084)"
|
||||
">You can use these embedding models from the `HuggingFaceEmbeddings` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "06c9f47d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-huggingface"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ff9be586",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.1.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
|
||||
"[-0.0383385568857193, 0.12346469610929489, -0.028642987832427025, 0.05365273728966713, 0.00884537026...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet sentence_transformers > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "861521a9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_huggingface import HuggingFaceEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ff9be586",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_huggingface import HuggingFaceEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
|
||||
"# Equivalent to SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")"
|
||||
"\n",
|
||||
"text = \"This is a test document.\"\n",
|
||||
"query_result = embeddings.embed_query(text)\n",
|
||||
"\n",
|
||||
"# show only the first 100 characters of the stringified vector\n",
|
||||
"print(str(query_result)[:100] + \"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d0a98ae9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5d6c682b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 9,
|
||||
"id": "bb5e74c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[-0.038338493555784225, 0.12346471846103668, -0.028642840683460236, 0.05365276336669922, 0.00884535...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"doc_result = embeddings.embed_documents([text, \"This is not a test document.\"])"
|
||||
"doc_result = embeddings.embed_documents([text, \"This is not a test document.\"])\n",
|
||||
"print(str(doc_result)[:100] + \"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aaad49f8",
|
||||
"id": "d18544f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -110,7 +91,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
174
docs/docs/integrations/text_embedding/textembed.ipynb
Normal file
174
docs/docs/integrations/text_embedding/textembed.ipynb
Normal file
@@ -0,0 +1,174 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# TextEmbed - Embedding Inference Server\n",
|
||||
"\n",
|
||||
"TextEmbed is a high-throughput, low-latency REST API designed for serving vector embeddings. It supports a wide range of sentence-transformer models and frameworks, making it suitable for various applications in natural language processing.\n",
|
||||
"\n",
|
||||
"## Features\n",
|
||||
"\n",
|
||||
"- **High Throughput & Low Latency:** Designed to handle a large number of requests efficiently.\n",
|
||||
"- **Flexible Model Support:** Works with various sentence-transformer models.\n",
|
||||
"- **Scalable:** Easily integrates into larger systems and scales with demand.\n",
|
||||
"- **Batch Processing:** Supports batch processing for better and faster inference.\n",
|
||||
"- **OpenAI Compatible REST API Endpoint:** Provides an OpenAI compatible REST API endpoint.\n",
|
||||
"- **Single Line Command Deployment:** Deploy multiple models via a single command for efficient deployment.\n",
|
||||
"- **Support for Embedding Formats:** Supports binary, float16, and float32 embeddings formats for faster retrieval.\n",
|
||||
"\n",
|
||||
"## Getting Started\n",
|
||||
"\n",
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"Ensure you have Python 3.10 or higher installed. You will also need to install the required dependencies."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation via PyPI\n",
|
||||
"\n",
|
||||
"1. **Install the required dependencies:**\n",
|
||||
"\n",
|
||||
" ```bash\n",
|
||||
" pip install -U textembed\n",
|
||||
" ```\n",
|
||||
"\n",
|
||||
"2. **Start the TextEmbed server with your desired models:**\n",
|
||||
"\n",
|
||||
" ```bash\n",
|
||||
" python -m textembed.server --models sentence-transformers/all-MiniLM-L12-v2 --workers 4 --api-key TextEmbed \n",
|
||||
" ```\n",
|
||||
"\n",
|
||||
"For more information, please read the [documentation](https://github.com/kevaldekivadiya2415/textembed/blob/main/docs/setup.md)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Import"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import TextEmbedEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = TextEmbedEmbeddings(\n",
|
||||
" model=\"sentence-transformers/all-MiniLM-L12-v2\",\n",
|
||||
" api_url=\"http://0.0.0.0:8000/v1\",\n",
|
||||
" api_key=\"TextEmbed\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed your documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define a list of documents\n",
|
||||
"documents = [\n",
|
||||
" \"Data science involves extracting insights from data.\",\n",
|
||||
" \"Artificial intelligence is transforming various industries.\",\n",
|
||||
" \"Cloud computing provides scalable computing resources over the internet.\",\n",
|
||||
" \"Big data analytics helps in understanding large datasets.\",\n",
|
||||
" \"India has a diverse cultural heritage.\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Define a query\n",
|
||||
"query = \"What is the cultural heritage of India?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Embed all documents\n",
|
||||
"document_embeddings = embeddings.embed_documents(documents)\n",
|
||||
"\n",
|
||||
"# Embed the query\n",
|
||||
"query_embedding = embeddings.embed_query(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'Data science involves extracting insights from data.': 0.05121298956322118,\n",
|
||||
" 'Artificial intelligence is transforming various industries.': -0.0060612142358469345,\n",
|
||||
" 'Cloud computing provides scalable computing resources over the internet.': -0.04877402795301714,\n",
|
||||
" 'Big data analytics helps in understanding large datasets.': 0.016582168576929422,\n",
|
||||
" 'India has a diverse cultural heritage.': 0.7408992963028144}"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Compute Similarity\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"scores = np.array(document_embeddings) @ np.array(query_embedding).T\n",
|
||||
"dict(zip(documents, scores))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "check10",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.14"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
183
docs/docs/integrations/tools/riza.ipynb
Normal file
183
docs/docs/integrations/tools/riza.ipynb
Normal file
@@ -0,0 +1,183 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7d143c73",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Riza Code Interpreter\n",
|
||||
"\n",
|
||||
"> The Riza Code Interpreter is a WASM-based isolated environment for running Python or JavaScript generated by AI agents.\n",
|
||||
"\n",
|
||||
"In this notebook we'll create an example of an agent that uses Python to solve a problem that an LLM can't solve on its own:\n",
|
||||
"counting the number of 'r's in the word \"strawberry.\"\n",
|
||||
"\n",
|
||||
"Before you get started grab an API key from the [Riza dashboard](https://dashboard.riza.io). For more guides and a full API reference\n",
|
||||
"head over to the [Riza Code Interpreter API documentation](https://docs.riza.io)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "894aa87a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Make sure you have the necessary dependencies installed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8265cf7f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-community rizaio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e085eb51",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set up your API keys as an environment variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "45ba8936",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%env ANTHROPIC_API_KEY=<your_anthropic_api_key_here>\n",
|
||||
"%env RIZA_API_KEY=<your_riza_api_key_here>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "efe26fd9-6e33-4f5f-b49b-ea74fa6c4915",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools.riza.command import ExecPython"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "cd5b952e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7bd0b610",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Initialize the `ExecPython` tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "32f1543f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [ExecPython()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "24f952d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Initialize an agent using Anthropic's Claude Haiku model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "71831ea8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatAnthropic(model=\"claude-3-haiku-20240307\", temperature=0)\n",
|
||||
"\n",
|
||||
"prompt_template = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Make sure to use a tool if you need to solve a problem.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent = create_tool_calling_agent(llm, tools, prompt_template)\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"id": "36b24036",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `riza_exec_python` with `{'code': 'word = \"strawberry\"\\nprint(word.count(\"r\"))'}`\n",
|
||||
"responded: [{'id': 'toolu_01JwPLAAqqCNCjVuEnK8Fgut', 'input': {}, 'name': 'riza_exec_python', 'type': 'tool_use', 'index': 0, 'partial_json': '{\"code\": \"word = \\\\\"strawberry\\\\\"\\\\nprint(word.count(\\\\\"r\\\\\"))\"}'}]\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m3\n",
|
||||
"\u001b[0m\u001b[32;1m\u001b[1;3m[{'text': '\\n\\nThe word \"strawberry\" contains 3 \"r\" characters.', 'type': 'text', 'index': 0}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The word \"strawberry\" contains 3 \"r\" characters.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Ask a tough question\n",
|
||||
"result = agent_executor.invoke({\"input\": \"how many rs are in strawberry?\"})\n",
|
||||
"print(result[\"output\"][0][\"text\"])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -11,7 +11,7 @@
|
||||
"source": [
|
||||
"# SQL Database\n",
|
||||
"\n",
|
||||
"::: {.callout-note}\n",
|
||||
":::note\n",
|
||||
"The `SQLDatabase` adapter utility is a wrapper around a database connection.\n",
|
||||
"\n",
|
||||
"For talking to SQL databases, it uses the [SQLAlchemy] Core API .\n",
|
||||
@@ -405,7 +405,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -102,8 +102,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(\n",
|
||||
" \"What happens today with Microsoft stocks?\",\n",
|
||||
"agent_chain.invoke(\n",
|
||||
" \"What happened today with Microsoft stocks?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -147,7 +147,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_chain.run(\n",
|
||||
"agent_chain.invoke(\n",
|
||||
" \"How does Microsoft feels today comparing with Nvidia?\",\n",
|
||||
")"
|
||||
]
|
||||
@@ -188,7 +188,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool.run(\"NVDA\")"
|
||||
"tool.invoke(\"NVDA\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -210,7 +210,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"res = tool.run(\"AAPL\")\n",
|
||||
"res = tool.invoke(\"AAPL\")\n",
|
||||
"print(res)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool.run(\"lex friedman\")"
|
||||
"tool.run(\"lex fridman\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -88,9 +88,10 @@ CHAT_MODEL_FEAT_TABLE = {
|
||||
"link": "/docs/integrations/chat/huggingface/",
|
||||
},
|
||||
"ChatOllama": {
|
||||
"tool_calling": True,
|
||||
"local": True,
|
||||
"json_mode": True,
|
||||
"package": "langchain-community",
|
||||
"package": "langchain-ollama",
|
||||
"link": "/docs/integrations/chat/ollama/",
|
||||
},
|
||||
"vLLM Chat (via ChatOpenAI)": {
|
||||
@@ -150,7 +151,13 @@ hide_table_of_contents: true
|
||||
|
||||
## Advanced features
|
||||
|
||||
The following table shows all the chat models that support one or more advanced features.
|
||||
The following table shows all the chat model classes that support one or more advanced features.
|
||||
|
||||
:::info
|
||||
While all these LangChain classes support the indicated advanced feature, you may have
|
||||
to open the provider-specific documentation to learn which hosted models or backends support
|
||||
the feature.
|
||||
:::
|
||||
|
||||
{table}
|
||||
|
||||
|
||||
181
docs/scripts/tool_feat_table.py
Normal file
181
docs/scripts/tool_feat_table.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
SEARCH_TOOL_FEAT_TABLE = {
|
||||
"Exa Search": {
|
||||
"pricing": "1000 free searches/month",
|
||||
"available_data": "URL, Author, Title, Published Date",
|
||||
"link": "/docs/integrations/tools/exa_search",
|
||||
},
|
||||
"Bing Search": {
|
||||
"pricing": "Paid",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/bing_search",
|
||||
},
|
||||
"DuckDuckgoSearch": {
|
||||
"pricing": "Free",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/ddg",
|
||||
},
|
||||
"Brave Search": {
|
||||
"pricing": "Free",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/brave_search",
|
||||
},
|
||||
"Google Search": {
|
||||
"pricing": "Paid",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/google_search",
|
||||
},
|
||||
"Google Serper": {
|
||||
"pricing": "Free",
|
||||
"available_data": "URL, Snippet, Title, Search Rank, Site Links",
|
||||
"link": "/docs/integrations/tools/google_serper",
|
||||
},
|
||||
"Mojeek Search": {
|
||||
"pricing": "Paid",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/mojeek_search",
|
||||
},
|
||||
"SearxNG Search": {
|
||||
"pricing": "Free",
|
||||
"available_data": "URL, Snippet, Title, Category",
|
||||
"link": "/docs/integrations/tools/searx_search",
|
||||
},
|
||||
"You.com Search": {
|
||||
"pricing": "Free for 60 days",
|
||||
"available_data": "URL, Title, Page Content",
|
||||
"link": "/docs/integrations/tools/you",
|
||||
},
|
||||
"SearchApi": {
|
||||
"pricing": "100 Free Searches on Sign Up",
|
||||
"available_data": "URL, Snippet, Title, Search Rank, Site Links, Authors",
|
||||
"link": "/docs/integrations/tools/searchapi",
|
||||
},
|
||||
"SerpAPI": {
|
||||
"pricing": "100 Free Searches/Month",
|
||||
"available_data": "Answer",
|
||||
"link": "/docs/integrations/tools/serpapi",
|
||||
},
|
||||
}
|
||||
|
||||
CODE_INTERPRETER_TOOL_FEAT_TABLE = {
|
||||
"Bearly Code Interpreter": {
|
||||
"langauges": "Python",
|
||||
"sandbox_lifetime": "Resets on Execution",
|
||||
"upload": True,
|
||||
"return_results": "Text",
|
||||
"link": "/docs/integrations/tools/bearly",
|
||||
},
|
||||
"Riza Code Interpreter": {
|
||||
"langauges": "Python, JavaScript, PHP, Ruby",
|
||||
"sandbox_lifetime": "Resets on Execution",
|
||||
"upload": False,
|
||||
"return_results": "Text",
|
||||
"link": "/docs/integrations/tools/riza",
|
||||
},
|
||||
"E2B Data Analysis": {
|
||||
"langauges": "Python. In beta: JavaScript, R, Java",
|
||||
"sandbox_lifetime": "24 Hours",
|
||||
"upload": True,
|
||||
"return_results": "Text, Images, Videos",
|
||||
"link": "/docs/integrations/tools/e2b_data_analysis",
|
||||
},
|
||||
"Azure Container Apps dynamic sessions": {
|
||||
"langauges": "Python",
|
||||
"sandbox_lifetime": "1 Hour",
|
||||
"upload": True,
|
||||
"return_results": "Text, Images",
|
||||
"link": "/docs/integrations/tools/azure_dynamic_sessions",
|
||||
},
|
||||
}
|
||||
|
||||
TOOLS_TEMPLATE = """\
|
||||
---
|
||||
sidebar_position: 0
|
||||
sidebar_class_name: hidden
|
||||
keywords: [compatibility]
|
||||
custom_edit_url:
|
||||
hide_table_of_contents: true
|
||||
---
|
||||
|
||||
# Tools
|
||||
|
||||
## Search Tools
|
||||
|
||||
The following table shows tools that execute online searches in some shape or form:
|
||||
|
||||
{search_table}
|
||||
|
||||
## Code Interpreter Tools
|
||||
|
||||
The following table shows tools that can be used as code interpreters:
|
||||
|
||||
{code_interpreter_table}
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def get_search_tools_table() -> str:
|
||||
"""Get the table of search tools."""
|
||||
header = ["tool", "pricing", "available_data"]
|
||||
title = ["Tool", "Free/Paid", "Return Data"]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for search_tool, feats in sorted(SEARCH_TOOL_FEAT_TABLE.items()):
|
||||
# Fields are in the order of the header
|
||||
row = [
|
||||
f"[{search_tool}]({feats['link']})",
|
||||
]
|
||||
for h in header[1:]:
|
||||
row.append(feats.get(h))
|
||||
rows.append(row)
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
def get_code_interpreter_table() -> str:
|
||||
"""Get the table of search tools."""
|
||||
header = [
|
||||
"tool",
|
||||
"langauges",
|
||||
"sandbox_lifetime",
|
||||
"upload",
|
||||
"return_results",
|
||||
]
|
||||
title = [
|
||||
"Tool",
|
||||
"Supported Languages",
|
||||
"Sandbox Lifetime",
|
||||
"Supports File Uploads",
|
||||
"Return Types",
|
||||
]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for search_tool, feats in sorted(CODE_INTERPRETER_TOOL_FEAT_TABLE.items()):
|
||||
# Fields are in the order of the header
|
||||
row = [
|
||||
f"[{search_tool}]({feats['link']})",
|
||||
]
|
||||
for h in header[1:]:
|
||||
value = feats.get(h)
|
||||
if h == "upload":
|
||||
if value is True:
|
||||
row.append("✅")
|
||||
else:
|
||||
row.append("❌")
|
||||
else:
|
||||
row.append(value)
|
||||
rows.append(row)
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
output_dir = Path(sys.argv[1])
|
||||
output_integrations_dir = output_dir / "integrations"
|
||||
output_integrations_dir_tools = output_integrations_dir / "tools"
|
||||
output_integrations_dir_tools.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
tools_page = TOOLS_TEMPLATE.format(
|
||||
search_table=get_search_tools_table(),
|
||||
code_interpreter_table=get_code_interpreter_table(),
|
||||
)
|
||||
with open(output_integrations_dir / "tools" / "index.mdx", "w") as f:
|
||||
f.write(tools_page)
|
||||
@@ -243,8 +243,8 @@ module.exports = {
|
||||
},
|
||||
],
|
||||
link: {
|
||||
type: "generated-index",
|
||||
slug: "integrations/tools",
|
||||
type: "doc",
|
||||
id: "integrations/tools/index",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -52,17 +52,17 @@ export default function ChatModelTabs(props) {
|
||||
customVarName,
|
||||
} = props;
|
||||
|
||||
const openAIParamsOrDefault = openaiParams ?? `model="gpt-3.5-turbo-0125"`;
|
||||
const openAIParamsOrDefault = openaiParams ?? `model="gpt-4o-mini"`;
|
||||
const anthropicParamsOrDefault =
|
||||
anthropicParams ?? `model="claude-3-sonnet-20240229"`;
|
||||
const cohereParamsOrDefault = cohereParams ?? `model="command-r"`;
|
||||
anthropicParams ?? `model="claude-3-5-sonnet-20240620"`;
|
||||
const cohereParamsOrDefault = cohereParams ?? `model="command-r-plus"`;
|
||||
const fireworksParamsOrDefault =
|
||||
fireworksParams ??
|
||||
`model="accounts/fireworks/models/mixtral-8x7b-instruct"`;
|
||||
`model="accounts/fireworks/models/llama-v3p1-70b-instruct"`;
|
||||
const groqParamsOrDefault = groqParams ?? `model="llama3-8b-8192"`;
|
||||
const mistralParamsOrDefault =
|
||||
mistralParams ?? `model="mistral-large-latest"`;
|
||||
const googleParamsOrDefault = googleParams ?? `model="gemini-pro"`;
|
||||
const googleParamsOrDefault = googleParams ?? `model="gemini-1.5-flash"`;
|
||||
const togetherParamsOrDefault =
|
||||
togetherParams ??
|
||||
`\n base_url="https://api.together.xyz/v1",\n api_key=os.environ["TOGETHER_API_KEY"],\n model="mistralai/Mixtral-8x7B-Instruct-v0.1",\n`;
|
||||
|
||||
@@ -61,6 +61,10 @@
|
||||
{
|
||||
"source": "/cookbook(/?)",
|
||||
"destination": "/v0.1/docs/cookbook/"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/toolkits/document_comparison_toolkit(/?)",
|
||||
"destination": "/docs/tutorials/rag/"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ license = "MIT"
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/__package_name_short__"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22__package_name_short__%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
@@ -77,8 +78,7 @@ build-backend = "poetry.core.masonry.api"
|
||||
# section of the configuration file raise errors.
|
||||
#
|
||||
# https://github.com/tophat/syrupy
|
||||
# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite.
|
||||
addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
|
||||
addopts = "--strict-markers --strict-config --durations=5"
|
||||
# Registering custom markers.
|
||||
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
|
||||
markers = [
|
||||
|
||||
1520
libs/cli/poetry.lock
generated
1520
libs/cli/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain-cli"
|
||||
version = "0.0.25"
|
||||
version = "0.0.26"
|
||||
description = "CLI for interacting with LangChain"
|
||||
authors = ["Erick Friis <erick@langchain.dev>"]
|
||||
readme = "README.md"
|
||||
@@ -9,6 +9,7 @@ license = "MIT"
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/cli"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-cli%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
|
||||
@@ -16,6 +16,7 @@ cloudpickle>=2.0.0
|
||||
cohere>=4,<6
|
||||
databricks-vectorsearch>=0.21,<0.22
|
||||
datasets>=2.15.0,<3
|
||||
dedoc>=2.2.6,<3
|
||||
dgml-utils>=0.3.0,<0.4
|
||||
elasticsearch>=8.12.0,<9
|
||||
esprima>=4.0.1,<5
|
||||
@@ -25,6 +26,7 @@ fireworks-ai>=0.9.0,<0.10
|
||||
friendli-client>=1.2.4,<2
|
||||
geopandas>=0.13.1
|
||||
gitpython>=3.1.32,<4
|
||||
gliner>=0.2.7
|
||||
google-cloud-documentai>=2.20.1,<3
|
||||
gql>=3.4.1,<4
|
||||
gradientai>=1.4.0,<2
|
||||
@@ -37,6 +39,7 @@ javelin-sdk>=0.1.8,<0.2
|
||||
jinja2>=3,<4
|
||||
jq>=1.4.1,<2
|
||||
jsonschema>1
|
||||
keybert>=0.8.5
|
||||
lxml>=4.9.3,<6.0
|
||||
markdownify>=0.11.6,<0.12
|
||||
motor>=3.3.1,<4
|
||||
@@ -60,7 +63,7 @@ psychicapi>=0.8.0,<0.9
|
||||
py-trello>=0.19.0,<0.20
|
||||
pyjwt>=2.8.0,<3
|
||||
pymupdf>=1.22.3,<2
|
||||
pypdf>=3.4.0,<4
|
||||
pypdf>=3.4.0,<5
|
||||
pypdfium2>=4.10.0,<5
|
||||
pyspark>=3.4.0,<4
|
||||
rank-bm25>=0.2.2,<0.3
|
||||
@@ -86,3 +89,4 @@ upstash-ratelimit>=1.1.0,<2
|
||||
vdms==0.0.20
|
||||
xata>=1.0.0a7,<2
|
||||
xmltodict>=0.13.0,<0.14
|
||||
nanopq==0.2.1
|
||||
|
||||
@@ -292,17 +292,21 @@ def _create_api_controller_agent(
|
||||
)
|
||||
if "DELETE" in allowed_operations:
|
||||
delete_llm_chain = LLMChain(llm=llm, prompt=PARSING_DELETE_PROMPT)
|
||||
RequestsDeleteToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=delete_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
tools.append(
|
||||
RequestsDeleteToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=delete_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
)
|
||||
if "PATCH" in allowed_operations:
|
||||
patch_llm_chain = LLMChain(llm=llm, prompt=PARSING_PATCH_PROMPT)
|
||||
RequestsPatchToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=patch_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
tools.append(
|
||||
RequestsPatchToolWithParsing( # type: ignore[call-arg]
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=patch_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
)
|
||||
if not tools:
|
||||
raise ValueError("Tools not found")
|
||||
|
||||
@@ -25,6 +25,7 @@ from langchain_core.prompts.chat import (
|
||||
from langchain_community.agent_toolkits.sql.prompt import (
|
||||
SQL_FUNCTIONS_SUFFIX,
|
||||
SQL_PREFIX,
|
||||
SQL_SUFFIX,
|
||||
)
|
||||
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
|
||||
from langchain_community.tools.sql_database.tool import (
|
||||
@@ -140,8 +141,9 @@ def create_sql_agent(
|
||||
toolkit = toolkit or SQLDatabaseToolkit(llm=llm, db=db) # type: ignore[arg-type]
|
||||
agent_type = agent_type or AgentType.ZERO_SHOT_REACT_DESCRIPTION
|
||||
tools = toolkit.get_tools() + list(extra_tools)
|
||||
if prefix is None:
|
||||
prefix = SQL_PREFIX
|
||||
if prompt is None:
|
||||
prefix = prefix or SQL_PREFIX
|
||||
prefix = prefix.format(dialect=toolkit.dialect, top_k=top_k)
|
||||
else:
|
||||
if "top_k" in prompt.input_variables:
|
||||
@@ -170,10 +172,10 @@ def create_sql_agent(
|
||||
)
|
||||
template = "\n\n".join(
|
||||
[
|
||||
react_prompt.PREFIX,
|
||||
prefix,
|
||||
"{tools}",
|
||||
format_instructions,
|
||||
react_prompt.SUFFIX,
|
||||
suffix or SQL_SUFFIX,
|
||||
]
|
||||
)
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
|
||||
@@ -8,6 +8,12 @@ from langchain_core.messages import AIMessage
|
||||
from langchain_core.outputs import ChatGeneration, LLMResult
|
||||
|
||||
MODEL_COST_PER_1K_TOKENS = {
|
||||
# GPT-4o-mini input
|
||||
"gpt-4o-mini": 0.00015,
|
||||
"gpt-4o-mini-2024-07-18": 0.00015,
|
||||
# GPT-4o-mini output
|
||||
"gpt-4o-mini-completion": 0.0006,
|
||||
"gpt-4o-mini-2024-07-18-completion": 0.0006,
|
||||
# GPT-4o input
|
||||
"gpt-4o": 0.005,
|
||||
"gpt-4o-2024-05-13": 0.005,
|
||||
@@ -94,12 +100,14 @@ MODEL_COST_PER_1K_TOKENS = {
|
||||
"gpt-3.5-turbo-0613-finetuned": 0.003,
|
||||
"gpt-3.5-turbo-1106-finetuned": 0.003,
|
||||
"gpt-3.5-turbo-0125-finetuned": 0.003,
|
||||
"gpt-4o-mini-2024-07-18-finetuned": 0.0003,
|
||||
# Fine Tuned output
|
||||
"babbage-002-finetuned-completion": 0.0016,
|
||||
"davinci-002-finetuned-completion": 0.012,
|
||||
"gpt-3.5-turbo-0613-finetuned-completion": 0.006,
|
||||
"gpt-3.5-turbo-1106-finetuned-completion": 0.006,
|
||||
"gpt-3.5-turbo-0125-finetuned-completion": 0.006,
|
||||
"gpt-4o-mini-2024-07-18-finetuned-completion": 0.0012,
|
||||
# Azure Fine Tuned input
|
||||
"babbage-002-azure-finetuned": 0.0004,
|
||||
"davinci-002-azure-finetuned": 0.002,
|
||||
|
||||
@@ -24,6 +24,7 @@ from langchain_core.output_parsers import (
|
||||
from langchain_core.prompts import BasePromptTemplate
|
||||
from langchain_core.pydantic_v1 import BaseModel
|
||||
from langchain_core.runnables import Runnable
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
|
||||
from langchain_community.output_parsers.ernie_functions import (
|
||||
JsonOutputFunctionsParser,
|
||||
@@ -94,7 +95,7 @@ def _get_python_function_arguments(function: Callable, arg_descriptions: dict) -
|
||||
for arg, arg_type in annotations.items():
|
||||
if arg == "return":
|
||||
continue
|
||||
if isinstance(arg_type, type) and issubclass(arg_type, BaseModel):
|
||||
if isinstance(arg_type, type) and is_basemodel_subclass(arg_type):
|
||||
# Mypy error:
|
||||
# "type" has no attribute "schema"
|
||||
properties[arg] = arg_type.schema() # type: ignore[attr-defined]
|
||||
@@ -156,7 +157,7 @@ def convert_to_ernie_function(
|
||||
"""
|
||||
if isinstance(function, dict):
|
||||
return function
|
||||
elif isinstance(function, type) and issubclass(function, BaseModel):
|
||||
elif isinstance(function, type) and is_basemodel_subclass(function):
|
||||
return cast(Dict, convert_pydantic_to_ernie_function(function))
|
||||
elif callable(function):
|
||||
return convert_python_function_to_ernie_function(function)
|
||||
@@ -185,7 +186,7 @@ def get_ernie_output_parser(
|
||||
only the function arguments and not the function name.
|
||||
"""
|
||||
function_names = [convert_to_ernie_function(f)["name"] for f in functions]
|
||||
if isinstance(functions[0], type) and issubclass(functions[0], BaseModel):
|
||||
if isinstance(functions[0], type) and is_basemodel_subclass(functions[0]):
|
||||
if len(functions) > 1:
|
||||
pydantic_schema: Union[Dict, Type[BaseModel]] = {
|
||||
name: fn for name, fn in zip(function_names, functions)
|
||||
|
||||
@@ -311,12 +311,15 @@ class GraphCypherQAChain(Chain):
|
||||
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
||||
callbacks = _run_manager.get_child()
|
||||
question = inputs[self.input_key]
|
||||
args = {
|
||||
"question": question,
|
||||
"schema": self.graph_schema,
|
||||
}
|
||||
args.update(inputs)
|
||||
|
||||
intermediate_steps: List = []
|
||||
|
||||
generated_cypher = self.cypher_generation_chain.run(
|
||||
{"question": question, "schema": self.graph_schema}, callbacks=callbacks
|
||||
)
|
||||
generated_cypher = self.cypher_generation_chain.run(args, callbacks=callbacks)
|
||||
|
||||
# Extract Cypher code if it is wrapped in backticks
|
||||
generated_cypher = extract_cypher(generated_cypher)
|
||||
|
||||
@@ -124,6 +124,11 @@ class PebbloRetrievalQA(Chain):
|
||||
),
|
||||
"doc": doc.page_content,
|
||||
"vector_db": self.retriever.vectorstore.__class__.__name__,
|
||||
**(
|
||||
{"pb_checksum": doc.metadata.get("pb_checksum")}
|
||||
if doc.metadata.get("pb_checksum")
|
||||
else {}
|
||||
),
|
||||
}
|
||||
for doc in docs
|
||||
if isinstance(doc, Document)
|
||||
@@ -457,25 +462,24 @@ class PebbloRetrievalQA(Chain):
|
||||
if self.api_key:
|
||||
if self.classifier_location == "local":
|
||||
if pebblo_resp:
|
||||
payload["response"] = (
|
||||
json.loads(pebblo_resp.text)
|
||||
.get("retrieval_data", {})
|
||||
.get("response", {})
|
||||
)
|
||||
payload["context"] = (
|
||||
json.loads(pebblo_resp.text)
|
||||
.get("retrieval_data", {})
|
||||
.get("context", [])
|
||||
)
|
||||
payload["prompt"] = (
|
||||
json.loads(pebblo_resp.text)
|
||||
.get("retrieval_data", {})
|
||||
.get("prompt", {})
|
||||
)
|
||||
resp = json.loads(pebblo_resp.text)
|
||||
if resp:
|
||||
payload["response"].update(
|
||||
resp.get("retrieval_data", {}).get("response", {})
|
||||
)
|
||||
payload["response"].pop("data")
|
||||
payload["prompt"].update(
|
||||
resp.get("retrieval_data", {}).get("prompt", {})
|
||||
)
|
||||
payload["prompt"].pop("data")
|
||||
context = payload["context"]
|
||||
for context_data in context:
|
||||
context_data.pop("doc")
|
||||
payload["context"] = context
|
||||
else:
|
||||
payload["response"] = None
|
||||
payload["context"] = None
|
||||
payload["prompt"] = None
|
||||
payload["response"] = {}
|
||||
payload["prompt"] = {}
|
||||
payload["context"] = []
|
||||
headers.update({"x-api-key": self.api_key})
|
||||
pebblo_cloud_url = f"{PEBBLO_CLOUD_URL}{PROMPT_URL}"
|
||||
try:
|
||||
|
||||
@@ -129,6 +129,7 @@ class Context(BaseModel):
|
||||
retrieved_from: Optional[str]
|
||||
doc: Optional[str]
|
||||
vector_db: str
|
||||
pb_checksum: Optional[str]
|
||||
|
||||
|
||||
class Prompt(BaseModel):
|
||||
|
||||
@@ -28,7 +28,7 @@ class ElasticsearchChatMessageHistory(BaseChatMessageHistory):
|
||||
es_password: Password to use when connecting to Elasticsearch.
|
||||
es_api_key: API key to use when connecting to Elasticsearch.
|
||||
es_connection: Optional pre-existing Elasticsearch connection.
|
||||
esnsure_ascii: Used to escape ASCII symbols in json.dumps. Defaults to True.
|
||||
ensure_ascii: Used to escape ASCII symbols in json.dumps. Defaults to True.
|
||||
index: Name of the index to use.
|
||||
session_id: Arbitrary key that is used to store the messages
|
||||
of a single chat session.
|
||||
@@ -45,11 +45,11 @@ class ElasticsearchChatMessageHistory(BaseChatMessageHistory):
|
||||
es_user: Optional[str] = None,
|
||||
es_api_key: Optional[str] = None,
|
||||
es_password: Optional[str] = None,
|
||||
esnsure_ascii: Optional[bool] = True,
|
||||
ensure_ascii: Optional[bool] = True,
|
||||
):
|
||||
self.index: str = index
|
||||
self.session_id: str = session_id
|
||||
self.ensure_ascii = esnsure_ascii
|
||||
self.ensure_ascii = ensure_ascii
|
||||
|
||||
# Initialize Elasticsearch client from passed client arg or connection info
|
||||
if es_connection is not None:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain_core.chat_history import (
|
||||
BaseChatMessageHistory,
|
||||
@@ -11,21 +11,33 @@ from langchain_core.messages import BaseMessage, messages_from_dict, messages_to
|
||||
class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
"""Chat message history that stores history in a local file."""
|
||||
|
||||
def __init__(self, file_path: str) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
file_path: str,
|
||||
*,
|
||||
encoding: Optional[str] = None,
|
||||
ensure_ascii: bool = True,
|
||||
) -> None:
|
||||
"""Initialize the file path for the chat history.
|
||||
|
||||
Args:
|
||||
file_path: The path to the local file to store the chat history.
|
||||
encoding: The encoding to use for file operations. Defaults to None.
|
||||
ensure_ascii: If True, escape non-ASCII in JSON. Defaults to True.
|
||||
"""
|
||||
self.file_path = Path(file_path)
|
||||
self.encoding = encoding
|
||||
self.ensure_ascii = ensure_ascii
|
||||
|
||||
if not self.file_path.exists():
|
||||
self.file_path.touch()
|
||||
self.file_path.write_text(json.dumps([]))
|
||||
self.file_path.write_text(
|
||||
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
|
||||
)
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
"""Retrieve the messages from the local file"""
|
||||
items = json.loads(self.file_path.read_text())
|
||||
items = json.loads(self.file_path.read_text(encoding=self.encoding))
|
||||
messages = messages_from_dict(items)
|
||||
return messages
|
||||
|
||||
@@ -33,8 +45,12 @@ class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
"""Append the message to the record in the local file"""
|
||||
messages = messages_to_dict(self.messages)
|
||||
messages.append(messages_to_dict([message])[0])
|
||||
self.file_path.write_text(json.dumps(messages))
|
||||
self.file_path.write_text(
|
||||
json.dumps(messages, ensure_ascii=self.ensure_ascii), encoding=self.encoding
|
||||
)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear session memory from the local file"""
|
||||
self.file_path.write_text(json.dumps([]))
|
||||
self.file_path.write_text(
|
||||
json.dumps([], ensure_ascii=self.ensure_ascii), encoding=self.encoding
|
||||
)
|
||||
|
||||
@@ -15,7 +15,43 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RedisChatMessageHistory(BaseChatMessageHistory):
|
||||
"""Chat message history stored in a Redis database."""
|
||||
"""Chat message history stored in a Redis database.
|
||||
|
||||
Setup:
|
||||
Install ``redis`` python package.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install redis
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.chat_message_histories import RedisChatMessageHistory
|
||||
|
||||
history = RedisChatMessageHistory(
|
||||
session_id = "your-session-id",
|
||||
url="redis://your-host:your-port:your-database", # redis://localhost:6379/0
|
||||
)
|
||||
|
||||
Add and retrieve messages:
|
||||
.. code-block:: python
|
||||
|
||||
# Add single message
|
||||
history.add_message(message)
|
||||
|
||||
# Add batch messages
|
||||
history.add_messages([message1, message2, message3, ...])
|
||||
|
||||
# Add human message
|
||||
history.add_user_message(human_message)
|
||||
|
||||
# Add ai message
|
||||
history.add_ai_message(ai_message)
|
||||
|
||||
# Retrieve messages
|
||||
messages = history.messages
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -24,6 +60,18 @@ class RedisChatMessageHistory(BaseChatMessageHistory):
|
||||
key_prefix: str = "message_store:",
|
||||
ttl: Optional[int] = None,
|
||||
):
|
||||
"""Initialize with a RedisChatMessageHistory instance.
|
||||
|
||||
Args:
|
||||
session_id: str
|
||||
The ID for single chat session. Used to form keys with `key_prefix`.
|
||||
url: Optional[str]
|
||||
String parameter configuration for connecting to the redis.
|
||||
key_prefix: Optional[str]
|
||||
The prefix of the key, combined with `session id` to form the key.
|
||||
ttl: Optional[int]
|
||||
Set the expiration time of `key`, the unit is seconds.
|
||||
"""
|
||||
try:
|
||||
import redis
|
||||
except ImportError:
|
||||
|
||||
@@ -32,7 +32,6 @@ from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.ext.asyncio import (
|
||||
AsyncEngine,
|
||||
AsyncSession,
|
||||
async_sessionmaker,
|
||||
create_async_engine,
|
||||
)
|
||||
from sqlalchemy.orm import (
|
||||
@@ -44,6 +43,12 @@ from sqlalchemy.orm import (
|
||||
sessionmaker,
|
||||
)
|
||||
|
||||
try:
|
||||
from sqlalchemy.ext.asyncio import async_sessionmaker
|
||||
except ImportError:
|
||||
# dummy for sqlalchemy < 2
|
||||
async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -182,7 +187,7 @@ class SQLChatMessageHistory(BaseChatMessageHistory):
|
||||
since="0.2.2",
|
||||
removal="0.3.0",
|
||||
name="connection_string",
|
||||
alternative="Use connection instead",
|
||||
alternative="connection",
|
||||
)
|
||||
_warned_once_already = True
|
||||
connection = connection_string
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from operator import itemgetter
|
||||
@@ -39,11 +40,17 @@ from langchain_core.output_parsers.openai_tools import (
|
||||
PydanticToolsParser,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
|
||||
from langchain_core.pydantic_v1 import (
|
||||
BaseModel,
|
||||
Field,
|
||||
SecretStr,
|
||||
root_validator,
|
||||
)
|
||||
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -65,7 +72,7 @@ def convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
elif isinstance(message, (FunctionMessage, ToolMessage)):
|
||||
message_dict = {
|
||||
"role": "function",
|
||||
"content": message.content,
|
||||
"content": _create_tool_content(message.content),
|
||||
"name": message.name or message.additional_kwargs.get("name"),
|
||||
}
|
||||
else:
|
||||
@@ -74,6 +81,20 @@ def convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
return message_dict
|
||||
|
||||
|
||||
def _create_tool_content(content: Union[str, List[Union[str, Dict[Any, Any]]]]) -> str:
|
||||
"""Convert tool content to dict scheme."""
|
||||
if isinstance(content, str):
|
||||
try:
|
||||
if isinstance(json.loads(content), dict):
|
||||
return content
|
||||
else:
|
||||
return json.dumps({"tool_result": content})
|
||||
except json.JSONDecodeError:
|
||||
return json.dumps({"tool_result": content})
|
||||
else:
|
||||
return json.dumps({"tool_result": content})
|
||||
|
||||
|
||||
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
|
||||
content = _dict.get("result", "") or ""
|
||||
additional_kwargs: Mapping[str, Any] = {}
|
||||
@@ -109,9 +130,9 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
|
||||
content=content,
|
||||
additional_kwargs=msg_additional_kwargs,
|
||||
usage_metadata=UsageMetadata(
|
||||
input_tokens=usage.prompt_tokens,
|
||||
output_tokens=usage.completion_tokens,
|
||||
total_tokens=usage.total_tokens,
|
||||
input_tokens=usage.get("prompt_tokens", 0),
|
||||
output_tokens=usage.get("completion_tokens", 0),
|
||||
total_tokens=usage.get("total_tokens", 0),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -340,13 +361,13 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
In the case of other model, passing these params will not affect the result.
|
||||
"""
|
||||
|
||||
model: str = "ERNIE-Bot-turbo"
|
||||
model: str = "ERNIE-Lite-8K"
|
||||
"""Model name.
|
||||
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
|
||||
|
||||
preset models are mapping to an endpoint.
|
||||
`model` will be ignored if `endpoint` is set.
|
||||
Default is ERNIE-Bot-turbo.
|
||||
Default is ERNIE-Lite-8K.
|
||||
"""
|
||||
|
||||
endpoint: Optional[str] = None
|
||||
@@ -754,7 +775,7 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
""" # noqa: E501
|
||||
if kwargs:
|
||||
raise ValueError(f"Received unsupported arguments {kwargs}")
|
||||
is_pydantic_schema = isinstance(schema, type) and issubclass(schema, BaseModel)
|
||||
is_pydantic_schema = isinstance(schema, type) and is_basemodel_subclass(schema)
|
||||
llm = self.bind_tools([schema])
|
||||
if is_pydantic_schema:
|
||||
output_parser: OutputParserLike = PydanticToolsParser(
|
||||
|
||||
@@ -57,6 +57,7 @@ from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
|
||||
from langchain_community.utilities.requests import Requests
|
||||
|
||||
@@ -443,7 +444,7 @@ class ChatEdenAI(BaseChatModel):
|
||||
if kwargs:
|
||||
raise ValueError(f"Received unsupported arguments {kwargs}")
|
||||
llm = self.bind_tools([schema], tool_choice="required")
|
||||
if isinstance(schema, type) and issubclass(schema, BaseModel):
|
||||
if isinstance(schema, type) and is_basemodel_subclass(schema):
|
||||
output_parser: OutputParserLike = PydanticToolsParser(
|
||||
tools=[schema], first_tool_only=True
|
||||
)
|
||||
|
||||
@@ -8,7 +8,7 @@ from typing import TYPE_CHECKING, Dict, Optional, Set
|
||||
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.pydantic_v1 import Field, root_validator
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
|
||||
|
||||
from langchain_community.adapters.openai import convert_message_to_dict
|
||||
from langchain_community.chat_models.openai import (
|
||||
@@ -79,10 +79,12 @@ class ChatEverlyAI(ChatOpenAI):
|
||||
@root_validator(pre=True)
|
||||
def validate_environment_override(cls, values: dict) -> dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
values["openai_api_key"] = get_from_dict_or_env(
|
||||
values,
|
||||
"everlyai_api_key",
|
||||
"EVERLYAI_API_KEY",
|
||||
values["openai_api_key"] = convert_to_secret_str(
|
||||
get_from_dict_or_env(
|
||||
values,
|
||||
"everlyai_api_key",
|
||||
"EVERLYAI_API_KEY",
|
||||
)
|
||||
)
|
||||
values["openai_api_base"] = DEFAULT_API_BASE
|
||||
|
||||
|
||||
@@ -46,10 +46,15 @@ from langchain_core.output_parsers.openai_tools import (
|
||||
parse_tool_call,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
|
||||
from langchain_core.pydantic_v1 import (
|
||||
BaseModel,
|
||||
Field,
|
||||
root_validator,
|
||||
)
|
||||
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
|
||||
|
||||
class ChatLlamaCpp(BaseChatModel):
|
||||
@@ -525,7 +530,7 @@ class ChatLlamaCpp(BaseChatModel):
|
||||
|
||||
if kwargs:
|
||||
raise ValueError(f"Received unsupported arguments {kwargs}")
|
||||
is_pydantic_schema = isinstance(schema, type) and issubclass(schema, BaseModel)
|
||||
is_pydantic_schema = isinstance(schema, type) and is_basemodel_subclass(schema)
|
||||
if schema is None:
|
||||
raise ValueError(
|
||||
"schema must be specified when method is 'function_calling'. "
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user