mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-20 05:43:34 +00:00
Compare commits
236 Commits
cc/update_
...
eugene/roo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f2a1c2726c | ||
|
|
04bc5f1a95 | ||
|
|
b36e95caa9 | ||
|
|
8cfb2fa1b7 | ||
|
|
96af8f31ae | ||
|
|
b5aef4cf97 | ||
|
|
3904f2cd40 | ||
|
|
d2c7379f1c | ||
|
|
a50eabbd48 | ||
|
|
4f1821db3e | ||
|
|
bf402f902e | ||
|
|
389a568f9a | ||
|
|
4b9517db85 | ||
|
|
7538f3df58 | ||
|
|
24916c6703 | ||
|
|
b60df8bb4f | ||
|
|
9604cb833b | ||
|
|
29aa9d6750 | ||
|
|
f2d0c13a15 | ||
|
|
9a5e35d1ba | ||
|
|
74321e546d | ||
|
|
a78ccb993c | ||
|
|
16c59118eb | ||
|
|
c0bb26e85b | ||
|
|
72175c57bd | ||
|
|
af2c05e5f3 | ||
|
|
b63c7f10bc | ||
|
|
fc8fd49328 | ||
|
|
c5f35a72da | ||
|
|
81064017a9 | ||
|
|
381aedcc61 | ||
|
|
e8d77002ea | ||
|
|
8fce8c6771 | ||
|
|
5d93916665 | ||
|
|
a032583b17 | ||
|
|
390ee8d971 | ||
|
|
6c1ba9731d | ||
|
|
800b0ff3b9 | ||
|
|
5f21eab491 | ||
|
|
11483b0fb8 | ||
|
|
e4caa41aa9 | ||
|
|
19eb82e68b | ||
|
|
bd68a38723 | ||
|
|
adf2dc13de | ||
|
|
ef0593db58 | ||
|
|
75a44fe951 | ||
|
|
3b1fcb2a65 | ||
|
|
68f348357e | ||
|
|
da7beb1c38 | ||
|
|
5e6d23f27d | ||
|
|
d04f657424 | ||
|
|
c6f700b7cb | ||
|
|
2a0d6788f7 | ||
|
|
c0fdbaac85 | ||
|
|
b64c4b4750 | ||
|
|
70834cd741 | ||
|
|
d45ece0e58 | ||
|
|
4796b7eb15 | ||
|
|
644723adda | ||
|
|
bffc3c24a0 | ||
|
|
a1520357c8 | ||
|
|
16a293cc3a | ||
|
|
9308bf32e5 | ||
|
|
182fc06769 | ||
|
|
5536420bee | ||
|
|
9f0f3c7e29 | ||
|
|
85e36b0f50 | ||
|
|
96b72edac8 | ||
|
|
5bfcb898ad | ||
|
|
60fc15a56b | ||
|
|
2445b997ee | ||
|
|
6721b991ab | ||
|
|
daf733b52e | ||
|
|
47f69fe0d8 | ||
|
|
672fcbb8dc | ||
|
|
13254715a2 | ||
|
|
2c9b84c3a8 | ||
|
|
79d8556c22 | ||
|
|
2a5d59b3d7 | ||
|
|
1141b08eb8 | ||
|
|
3bf1d98dbf | ||
|
|
a7ab93479b | ||
|
|
c0fcf76e93 | ||
|
|
b1dfb8ea1e | ||
|
|
5070004e8a | ||
|
|
2f976c5174 | ||
|
|
6d0ebbca1e | ||
|
|
1e3e05b0c3 | ||
|
|
c39521b70d | ||
|
|
ee282a1d2e | ||
|
|
c314222796 | ||
|
|
32f8f39974 | ||
|
|
6f7fe82830 | ||
|
|
62b16fcc6b | ||
|
|
99ce84ef23 | ||
|
|
03c41e725e | ||
|
|
86ca44d451 | ||
|
|
85f5d14cef | ||
|
|
f788d0982d | ||
|
|
c9619349d6 | ||
|
|
c93d9e66e4 | ||
|
|
8955bc1866 | ||
|
|
730c551819 | ||
|
|
7e9e69c758 | ||
|
|
f055f2a1e3 | ||
|
|
92ac0fc9bd | ||
|
|
fb3df898b5 | ||
|
|
9d145b9630 | ||
|
|
22fa32e164 | ||
|
|
d3520a784f | ||
|
|
a75b32a54a | ||
|
|
4530d851e4 | ||
|
|
ad50702934 | ||
|
|
68e0ae3286 | ||
|
|
b33d2346db | ||
|
|
f58c40b4e3 | ||
|
|
9ef93ecd7c | ||
|
|
2115fb76de | ||
|
|
af620db9c7 | ||
|
|
398b2b9c51 | ||
|
|
7b1066341b | ||
|
|
d5b2a93c6d | ||
|
|
57c13b4ef8 | ||
|
|
168e9ed3a5 | ||
|
|
1e750f12f6 | ||
|
|
3b3ed72d35 | ||
|
|
e1190c8f3c | ||
|
|
2b87e330b0 | ||
|
|
aeeda370aa | ||
|
|
d2db561347 | ||
|
|
f5ff7f178b | ||
|
|
753edf9c80 | ||
|
|
aa358f2be4 | ||
|
|
60103fc4a5 | ||
|
|
4964ba74db | ||
|
|
d90379210a | ||
|
|
987099cfcd | ||
|
|
0cd3f93361 | ||
|
|
5d4133d82f | ||
|
|
bcac6c3aff | ||
|
|
efb4c12abe | ||
|
|
9ac302cb97 | ||
|
|
7ee2822ec2 | ||
|
|
3b7b933aa2 | ||
|
|
3c42bf8d97 | ||
|
|
4bb3d5c488 | ||
|
|
f824f6d925 | ||
|
|
f9aea3db07 | ||
|
|
9eda8f2fe8 | ||
|
|
86326269a1 | ||
|
|
4c97a9ee53 | ||
|
|
0deb98ac0c | ||
|
|
75c7c3a1a7 | ||
|
|
abe7566d7d | ||
|
|
360a70c8a8 | ||
|
|
1c2b9cc9ab | ||
|
|
401d469a92 | ||
|
|
b108b4d010 | ||
|
|
976b456619 | ||
|
|
5da7eb97cb | ||
|
|
a7b4175091 | ||
|
|
12e0c28a6e | ||
|
|
a349fce880 | ||
|
|
7545b1d29b | ||
|
|
d5be160af0 | ||
|
|
cd6812342e | ||
|
|
abb3066150 | ||
|
|
bf7763d9b0 | ||
|
|
59d7adff8f | ||
|
|
60db79a38a | ||
|
|
bc4cd9c5cc | ||
|
|
cb6cf4b631 | ||
|
|
0bce28cd30 | ||
|
|
8711c61298 | ||
|
|
3ab49c0036 | ||
|
|
61daa16e5d | ||
|
|
51e75cf59d | ||
|
|
6a1a0d977a | ||
|
|
dd4d4411c9 | ||
|
|
b03c801523 | ||
|
|
41f7620989 | ||
|
|
066a5a209f | ||
|
|
9b3a025f9c | ||
|
|
ad7f2ec67d | ||
|
|
bd5c92a113 | ||
|
|
a4bcb45f65 | ||
|
|
7193634ae6 | ||
|
|
1fcf875fe3 | ||
|
|
255ad39ae3 | ||
|
|
c2d43544cc | ||
|
|
3c917204dc | ||
|
|
8698cb9b28 | ||
|
|
710197e18c | ||
|
|
48d6ea427f | ||
|
|
0a4ee864e9 | ||
|
|
b3e53ffca0 | ||
|
|
e162893d7f | ||
|
|
db6f46c1a6 | ||
|
|
94452a94b1 | ||
|
|
50484be330 | ||
|
|
9b82707ea6 | ||
|
|
505a2e8743 | ||
|
|
677408bfc9 | ||
|
|
883e90d06e | ||
|
|
2b08e9e265 | ||
|
|
ae4c0ed25a | ||
|
|
a34e650f8b | ||
|
|
1007a715a5 | ||
|
|
ca798bc6ea | ||
|
|
4fe8403bfb | ||
|
|
fe4f10047b | ||
|
|
a3bae56a48 | ||
|
|
a70b7a688e | ||
|
|
0c2ebe5f47 | ||
|
|
3d54784e6d | ||
|
|
9ab7a6df39 | ||
|
|
6b46b5e9ce | ||
|
|
109a70fc64 | ||
|
|
86ee4f0daa | ||
|
|
93d0ad97fe | ||
|
|
3dfd055411 | ||
|
|
90559fde70 | ||
|
|
e8a8286012 | ||
|
|
2ae718796e | ||
|
|
74749c909d | ||
|
|
cf38981bb7 | ||
|
|
b483bf5095 | ||
|
|
093ae04d58 | ||
|
|
ff0c06b1e5 | ||
|
|
e271f75bee | ||
|
|
c6660df58e | ||
|
|
aa6415aa7d | ||
|
|
226802f0c4 | ||
|
|
01783d67fc | ||
|
|
616d06d7fe | ||
|
|
5564d9e404 |
1
.github/actions/people/app/main.py
vendored
1
.github/actions/people/app/main.py
vendored
@@ -547,6 +547,7 @@ if __name__ == "__main__":
|
||||
"obi1kenobi",
|
||||
"langchain-infra",
|
||||
"jacoblee93",
|
||||
"isahers1",
|
||||
"dqbd",
|
||||
"bracesproul",
|
||||
"akira",
|
||||
|
||||
11
.github/scripts/check_diff.py
vendored
11
.github/scripts/check_diff.py
vendored
@@ -15,6 +15,10 @@ LANGCHAIN_DIRS = [
|
||||
"libs/experimental",
|
||||
]
|
||||
|
||||
def all_package_dirs() -> Set[str]:
|
||||
return {"/".join(path.split("/")[:-1]) for path in glob.glob("./libs/**/pyproject.toml", recursive=True)}
|
||||
|
||||
|
||||
def dependents_graph() -> dict:
|
||||
dependents = defaultdict(set)
|
||||
|
||||
@@ -53,10 +57,11 @@ if __name__ == "__main__":
|
||||
}
|
||||
docs_edited = False
|
||||
|
||||
if len(files) == 300:
|
||||
if len(files) >= 300:
|
||||
# max diff length is 300 files - there are likely files missing
|
||||
raise ValueError("Max diff reached. Please manually run CI on changed libs.")
|
||||
|
||||
dirs_to_run["lint"] = all_package_dirs()
|
||||
dirs_to_run["test"] = all_package_dirs()
|
||||
dirs_to_run["extended-test"] = set(LANGCHAIN_DIRS)
|
||||
for file in files:
|
||||
if any(
|
||||
file.startswith(dir_)
|
||||
|
||||
7
.github/workflows/_release.yml
vendored
7
.github/workflows/_release.yml
vendored
@@ -135,6 +135,7 @@ jobs:
|
||||
- release-notes
|
||||
uses:
|
||||
./.github/workflows/_test_release.yml
|
||||
permissions: write-all
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
dangerous-nonmaster-release: ${{ inputs.dangerous-nonmaster-release }}
|
||||
@@ -202,7 +203,7 @@ jobs:
|
||||
poetry run python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))"
|
||||
|
||||
- name: Import test dependencies
|
||||
run: poetry install --with test,test_integration
|
||||
run: poetry install --with test
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
# Overwrite the local version of the package with the test PyPI version.
|
||||
@@ -245,6 +246,10 @@ jobs:
|
||||
with:
|
||||
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||
|
||||
- name: Import integration test dependencies
|
||||
run: poetry install --with test,test_integration
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
env:
|
||||
|
||||
1
.github/workflows/people.yml
vendored
1
.github/workflows/people.yml
vendored
@@ -16,6 +16,7 @@ jobs:
|
||||
langchain-people:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Dump GitHub context
|
||||
env:
|
||||
|
||||
20
README.md
20
README.md
@@ -38,24 +38,25 @@ conda install langchain -c conda-forge
|
||||
|
||||
For these applications, LangChain simplifies the entire application lifecycle:
|
||||
|
||||
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel) and [components](https://python.langchain.com/v0.2/docs/concepts/#components). Integrate with hundreds of [third-party providers](https://python.langchain.com/v0.2/docs/integrations/platforms/).
|
||||
- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://python.langchain.com/v0.2/docs/concepts#langchain-expression-language-lcel), [components](https://python.langchain.com/v0.2/docs/concepts), and [third-party integrations](https://python.langchain.com/v0.2/docs/integrations/platforms/).
|
||||
Use [LangGraph](/docs/concepts/#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support.
|
||||
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
|
||||
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/v0.2/docs/langserve/).
|
||||
- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/).
|
||||
|
||||
### Open-source libraries
|
||||
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
|
||||
- **`langchain-community`**: Third party integrations.
|
||||
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it.
|
||||
|
||||
### Productionization:
|
||||
- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
### Deployment:
|
||||
- **[LangServe](https://python.langchain.com/v0.2/docs/langserve/)**: A library for deploying LangChain chains as REST APIs.
|
||||
- **[LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/)**: Turn your LangGraph applications into production-ready APIs and Assistants.
|
||||
|
||||

|
||||

|
||||
|
||||
## 🧱 What can you build with LangChain?
|
||||
|
||||
@@ -106,7 +107,7 @@ Retrieval Augmented Generation involves [loading data](https://python.langchain.
|
||||
|
||||
**🤖 Agents**
|
||||
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents) along with the [LangGraph](https://github.com/langchain-ai/langgraph) extension for building custom agents.
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents), along with [LangGraph](https://github.com/langchain-ai/langgraph) for building custom agents.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
@@ -120,10 +121,9 @@ Please see [here](https://python.langchain.com) for full documentation, which in
|
||||
|
||||
## 🌐 Ecosystem
|
||||
|
||||
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploying LangChain runnables and chains as REST APIs.
|
||||
- [LangChain Templates](https://python.langchain.com/v0.2/docs/templates/): Example applications hosted with LangServe.
|
||||
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Create stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it.
|
||||
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploy LangChain runnables and chains as REST APIs.
|
||||
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -61,7 +61,7 @@ render:
|
||||
$(PYTHON) scripts/notebook_convert.py $(INTERMEDIATE_DIR) $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
md-sync:
|
||||
rsync -avm --include="*/" --include="*.mdx" --include="*.md" --include="*.png" --exclude="*" $(INTERMEDIATE_DIR)/ $(OUTPUT_NEW_DOCS_DIR)
|
||||
rsync -avm --include="*/" --include="*.mdx" --include="*.md" --include="*.png" --include="*/_category_.yml" --exclude="*" $(INTERMEDIATE_DIR)/ $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
generate-references:
|
||||
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
@@ -10,12 +10,21 @@ from pathlib import Path
|
||||
from typing import Dict, List, Literal, Optional, Sequence, TypedDict, Union
|
||||
|
||||
import toml
|
||||
import typing_extensions
|
||||
from langchain_core.runnables import Runnable, RunnableSerializable
|
||||
from pydantic import BaseModel
|
||||
|
||||
ROOT_DIR = Path(__file__).parents[2].absolute()
|
||||
HERE = Path(__file__).parent
|
||||
|
||||
ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"]
|
||||
ClassKind = Literal[
|
||||
"TypedDict",
|
||||
"Regular",
|
||||
"Pydantic",
|
||||
"enum",
|
||||
"RunnablePydantic",
|
||||
"RunnableNonPydantic",
|
||||
]
|
||||
|
||||
|
||||
class ClassInfo(TypedDict):
|
||||
@@ -69,8 +78,36 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
|
||||
continue
|
||||
|
||||
if inspect.isclass(type_):
|
||||
if type(type_) == typing._TypedDictMeta: # type: ignore
|
||||
# The clasification of the class is used to select a template
|
||||
# for the object when rendering the documentation.
|
||||
# See `templates` directory for defined templates.
|
||||
# This is a hacky solution to distinguish between different
|
||||
# kinds of thing that we want to render.
|
||||
if type(type_) is typing_extensions._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif type(type_) is typing._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# RunnableSerializable subclasses from Pydantic which
|
||||
# for which we use autodoc_pydantic for rendering.
|
||||
# We need to distinguish these from regular Pydantic
|
||||
# classes so we can hide inherited Runnable methods
|
||||
# and provide a link to the Runnable interface from
|
||||
# the template.
|
||||
kind = "RunnablePydantic"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and not issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# These are not pydantic classes but are Runnable.
|
||||
# We'll hide all the inherited methods from Runnable
|
||||
# but use a regular class template to render.
|
||||
kind = "RunnableNonPydantic"
|
||||
elif issubclass(type_, Enum):
|
||||
kind = "enum"
|
||||
elif issubclass(type_, BaseModel):
|
||||
@@ -251,6 +288,10 @@ Classes
|
||||
template = "enum.rst"
|
||||
elif class_["kind"] == "Pydantic":
|
||||
template = "pydantic.rst"
|
||||
elif class_["kind"] == "RunnablePydantic":
|
||||
template = "runnable_pydantic.rst"
|
||||
elif class_["kind"] == "RunnableNonPydantic":
|
||||
template = "runnable_non_pydantic.rst"
|
||||
else:
|
||||
template = "class.rst"
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -33,4 +33,4 @@
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
.. example_links:: {{ objname }}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace
|
||||
|
||||
|
||||
{% block attributes %}
|
||||
{% endblock %}
|
||||
|
||||
40
docs/api_reference/templates/runnable_non_pydantic.rst
Normal file
40
docs/api_reference/templates/runnable_non_pydantic.rst
Normal file
@@ -0,0 +1,40 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
{% block attributes %}
|
||||
{% if attributes %}
|
||||
.. rubric:: {{ _('Attributes') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in attributes %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block methods %}
|
||||
{% if methods %}
|
||||
.. rubric:: {{ _('Methods') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% for item in methods %}
|
||||
.. automethod:: {{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
24
docs/api_reference/templates/runnable_pydantic.rst
Normal file
24
docs/api_reference/templates/runnable_pydantic.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autopydantic_model:: {{ objname }}
|
||||
:model-show-json: False
|
||||
:model-show-config-summary: False
|
||||
:model-show-validator-members: False
|
||||
:model-show-field-summary: False
|
||||
:field-signature-prefix: param
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
@@ -2,132 +2,129 @@
|
||||
{%- set url_root = pathto('', 1) %}
|
||||
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
|
||||
{%- if not embedded and docstitle %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- else %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- endif %}
|
||||
{%- set lang_attr = 'en' %}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<!--[if IE 8]><html class="no-js lt-ie9" lang="{{ lang_attr }}" > <![endif]-->
|
||||
<!--[if gt IE 8]><!--> <html class="no-js" lang="{{ lang_attr }}" > <!--<![endif]-->
|
||||
<!--[if gt IE 8]><!-->
|
||||
<html class="no-js" lang="{{ lang_attr }}"> <!--<![endif]-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical" href="https://api.python.langchain.com/en/latest/{{pagename}}.html" />
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical"
|
||||
href="https://api.python.langchain.com/en/latest/{{ pagename }}.html"/>
|
||||
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
|
||||
<link rel="stylesheet" href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}" type="text/css" />
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
<link rel="stylesheet"
|
||||
href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}"
|
||||
type="text/css"/>
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}"
|
||||
type="text/css"{% if css.title is not none %}
|
||||
title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css"/>
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css"/>
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}"
|
||||
src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
</head>
|
||||
<body>
|
||||
{% include "nav.html" %}
|
||||
{%- block content %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary" for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
<div class="btn-group w-100 mb-2" role="group" aria-label="rellinks">
|
||||
{%- if prev %}
|
||||
<a href="{{ prev.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ prev.title|striptags }}">Prev</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Prev</a>
|
||||
{%- endif %}
|
||||
{%- if parents -%}
|
||||
<a href="{{ parents[-1].link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ parents[-1].title|striptags }}">Up</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink disabled py-1">Up</a>
|
||||
{%- endif %}
|
||||
{%- if next %}
|
||||
<a href="{{ next.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ next.title|striptags }}">Next</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Next</a>
|
||||
{%- endif %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary"
|
||||
for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}"
|
||||
class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}"
|
||||
class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}" class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}" class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}
|
||||
© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}
|
||||
.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated
|
||||
on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}"
|
||||
rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}" rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%- endblock %}
|
||||
<script src="{{ pathto('_static/js/vendor/bootstrap.min.js', 1) }}"></script>
|
||||
{% include "javascript.html" %}
|
||||
|
||||
1248
docs/data/people.yml
1248
docs/data/people.yml
File diff suppressed because it is too large
Load Diff
@@ -51,8 +51,8 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
|
||||
<ThemedImage
|
||||
alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
|
||||
sources={{
|
||||
light: useBaseUrl('/svg/langchain_stack.svg'),
|
||||
dark: useBaseUrl('/svg/langchain_stack_dark.svg'),
|
||||
light: useBaseUrl('/svg/langchain_stack_062024.svg'),
|
||||
dark: useBaseUrl('/svg/langchain_stack_062024_dark.svg'),
|
||||
}}
|
||||
title="LangChain Framework Overview"
|
||||
/>
|
||||
@@ -89,7 +89,7 @@ With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.sm
|
||||
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).
|
||||
|
||||
### Runnable interface
|
||||
<span data-heading-keywords="invoke"></span>
|
||||
<span data-heading-keywords="invoke,runnable"></span>
|
||||
|
||||
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
|
||||
|
||||
@@ -144,8 +144,19 @@ LangChain does not host any Chat Models, rather we rely on third party integrati
|
||||
|
||||
We have some standardized parameters when constructing ChatModels:
|
||||
- `model`: the name of the model
|
||||
- `temperature`: the sampling temperature
|
||||
- `timeout`: request timeout
|
||||
- `max_tokens`: max tokens to generate
|
||||
- `stop`: default stop sequences
|
||||
- `max_retries`: max number of times to retry requests
|
||||
- `api_key`: API key for the model provider
|
||||
- `base_url`: endpoint to send requests to
|
||||
|
||||
ChatModels also accept other parameters that are specific to that integration.
|
||||
Some important things to note:
|
||||
- standard params only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these.
|
||||
- standard params are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in ``langchain-community``.
|
||||
|
||||
ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model.
|
||||
|
||||
:::important
|
||||
**Tool Calling** Some chat models have been fine-tuned for tool calling and provide a dedicated API for tool calling.
|
||||
@@ -168,8 +179,15 @@ For a full list of LangChain model providers with multimodal models, [check out
|
||||
### LLMs
|
||||
<span data-heading-keywords="llm,llms"></span>
|
||||
|
||||
:::caution
|
||||
Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models),
|
||||
even for non-chat use cases.
|
||||
|
||||
You are probably looking for [the section above instead](/docs/concepts/#chat-models).
|
||||
:::
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see below).
|
||||
These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.
|
||||
This gives them the same interface as [Chat Models](/docs/concepts/#chat-models).
|
||||
@@ -532,6 +550,28 @@ If you are still using AgentExecutor, do not fear: we still have a guide on [how
|
||||
It is recommended, however, that you start to transition to LangGraph.
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
|
||||
|
||||
#### ReAct agents
|
||||
<span data-heading-keywords="react,react agent"></span>
|
||||
|
||||
One popular architecture for building agents is [**ReAct**](https://arxiv.org/abs/2210.03629).
|
||||
ReAct combines reasoning and acting in an iterative process - in fact the name "ReAct" stands for "Reason" and "Act".
|
||||
|
||||
The general flow looks like this:
|
||||
|
||||
- The model will "think" about what step to take in response to an input and any previous observations.
|
||||
- The model will then choose an action from available tools (or choose to respond to the user).
|
||||
- The model will generate arguments to that tool.
|
||||
- The agent runtime (executor) will parse out the chosen tool and call it with the generated arguments.
|
||||
- The executor will return the results of the tool call back to the model as an observation.
|
||||
- This process repeats until the agent chooses to respond.
|
||||
|
||||
There are general prompting based implementations that do not require any model-specific features, but the most
|
||||
reliable implementations use features like [tool calling](/docs/how_to/tool_calling/) to reliably format outputs
|
||||
and reduce variance.
|
||||
|
||||
Please see the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for more information,
|
||||
or [this how-to guide](/docs/how_to/migrate_agent/) for specific information on migrating to LangGraph.
|
||||
|
||||
### Callbacks
|
||||
|
||||
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
|
||||
@@ -857,7 +897,7 @@ The standard interface consists of:
|
||||
The following how-to guides are good practical resources for using function/tool calling:
|
||||
|
||||
- [How to return structured data from an LLM](/docs/how_to/structured_output/)
|
||||
- [How to use a model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to use a model to call tools](/docs/how_to/tool_calling)
|
||||
|
||||
For a full list of model providers that support tool calling, [see this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
@@ -1015,7 +1055,7 @@ See several videos and cookbooks showcasing RAG with LangGraph:
|
||||
- [Cookbooks for RAG using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag)
|
||||
|
||||
See our LangGraph RAG recipes with partners:
|
||||
- [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/use_cases/agents/langchain)
|
||||
- [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/3p_integrations/langchain)
|
||||
- [Mistral](https://github.com/mistralai/cookbook/tree/main/third_party/langchain)
|
||||
|
||||
:::
|
||||
@@ -1061,3 +1101,13 @@ This process is vital for building reliable applications.
|
||||
- It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code
|
||||
|
||||
To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation).
|
||||
|
||||
### Tracing
|
||||
<span data-heading-keywords="trace,tracing"></span>
|
||||
|
||||
A trace is essentially a series of steps that your application takes to go from input to output.
|
||||
Traces contain individual steps called `runs`. These can be individual calls from a model, retriever,
|
||||
tool, or sub-chains.
|
||||
Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
|
||||
|
||||
For a deeper dive, check out [this LangSmith conceptual guide](https://docs.smith.langchain.com/concepts/tracing).
|
||||
|
||||
35
docs/docs/contributing/code/guidelines.mdx
Normal file
35
docs/docs/contributing/code/guidelines.mdx
Normal file
@@ -0,0 +1,35 @@
|
||||
# General guidelines
|
||||
|
||||
Here are some things to keep in mind for all types of contributions:
|
||||
|
||||
- Follow the ["fork and pull request"](https://docs.github.com/en/get-started/exploring-projects-on-github/contributing-to-a-project) workflow.
|
||||
- Fill out the checked-in pull request template when opening pull requests. Note related issues and tag relevant maintainers.
|
||||
- Ensure your PR passes formatting, linting, and testing checks before requesting a review.
|
||||
- If you would like comments or feedback on your current progress, please open an issue or discussion and tag a maintainer.
|
||||
- See the sections on [Testing](/docs/contributing/code/setup#testing) and [Formatting and Linting](/docs/contributing/code/setup#formatting-and-linting) for how to run these checks locally.
|
||||
- Backwards compatibility is key. Your changes must not be breaking, except in case of critical bug and security fixes.
|
||||
- Look for duplicate PRs or issues that have already been opened before opening a new one.
|
||||
- Keep scope as isolated as possible. As a general rule, your changes should not affect more than one package at a time.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
We encourage and appreciate bugfixes. We ask that you:
|
||||
|
||||
- Explain the bug in enough detail for maintainers to be able to reproduce it.
|
||||
- If an accompanying issue exists, link to it. Prefix with `Fixes` so that the issue will close automatically when the PR is merged.
|
||||
- Avoid breaking changes if possible.
|
||||
- Include unit tests that fail without the bugfix.
|
||||
|
||||
If you come across a bug and don't know how to fix it, we ask that you open an issue for it describing in detail the environment in which you encountered the bug.
|
||||
|
||||
## New features
|
||||
|
||||
We aim to keep the bar high for new features. We generally don't accept new core abstractions, changes to infra, changes to dependencies,
|
||||
or new agents/chains from outside contributors without an existing GitHub discussion or issue that demonstrates an acute need for them.
|
||||
|
||||
- New features must come with docs, unit tests, and (if appropriate) integration tests.
|
||||
- New integrations must come with docs, unit tests, and (if appropriate) integration tests.
|
||||
- See [this page](/docs/contributing/integrations) for more details on contributing new integrations.
|
||||
- New functionality should not inherit from or use deprecated methods or classes.
|
||||
- We will reject features that are likely to lead to security vulnerabilities or reports.
|
||||
- Do not add any hard dependencies. Integrations may add optional dependencies.
|
||||
6
docs/docs/contributing/code/index.mdx
Normal file
6
docs/docs/contributing/code/index.mdx
Normal file
@@ -0,0 +1,6 @@
|
||||
# Contribute Code
|
||||
|
||||
If you would like to add a new feature or update an existing one, please read the resources below before getting started:
|
||||
|
||||
- [General guidelines](/docs/contributing/code/guidelines/)
|
||||
- [Setup](/docs/contributing/code/setup/)
|
||||
@@ -1,36 +1,9 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
---
|
||||
# Contribute Code
|
||||
# Setup
|
||||
|
||||
To contribute to this project, please follow the ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
|
||||
Please do not try to push directly to this repo unless you are a maintainer.
|
||||
|
||||
Please follow the checked-in pull request template when opening pull requests. Note related issues and tag relevant
|
||||
maintainers.
|
||||
|
||||
Pull requests cannot land without passing the formatting, linting, and testing checks first. See [Testing](#testing) and
|
||||
[Formatting and Linting](#formatting-and-linting) for how to run these checks locally.
|
||||
|
||||
It's essential that we maintain great documentation and testing. If you:
|
||||
- Fix a bug
|
||||
- Add a relevant unit or integration test when possible. These live in `tests/unit_tests` and `tests/integration_tests`.
|
||||
- Make an improvement
|
||||
- Update any affected example notebooks and documentation. These live in `docs`.
|
||||
- Update unit and integration tests when relevant.
|
||||
- Add a feature
|
||||
- Add a demo notebook in `docs/docs/`.
|
||||
- Add unit and integration tests.
|
||||
|
||||
We are a small, progress-oriented team. If there's something you'd like to add or change, opening a pull request is the
|
||||
best way to get our attention.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
This quick start guide explains how to run the repository locally.
|
||||
This guide walks through how to run the repository locally and check in your first code.
|
||||
For a [development container](https://containers.dev/), see the [.devcontainer folder](https://github.com/langchain-ai/langchain/tree/master/.devcontainer).
|
||||
|
||||
### Dependency Management: Poetry and other env/dependency managers
|
||||
## Dependency Management: Poetry and other env/dependency managers
|
||||
|
||||
This project utilizes [Poetry](https://python-poetry.org/) v1.7.1+ as a dependency manager.
|
||||
|
||||
@@ -41,7 +14,7 @@ Install Poetry: **[documentation on how to install it](https://python-poetry.org
|
||||
❗Note: If you use `Conda` or `Pyenv` as your environment/package manager, after installing Poetry,
|
||||
tell Poetry to use the virtualenv python environment (`poetry config virtualenvs.prefer-active-python true`)
|
||||
|
||||
### Different packages
|
||||
## Different packages
|
||||
|
||||
This repository contains multiple packages:
|
||||
- `langchain-core`: Base interfaces for key abstractions as well as logic for combining them in chains (LangChain Expression Language).
|
||||
@@ -59,7 +32,7 @@ For this quickstart, start with langchain-community:
|
||||
cd libs/community
|
||||
```
|
||||
|
||||
### Local Development Dependencies
|
||||
## Local Development Dependencies
|
||||
|
||||
Install langchain-community development requirements (for running langchain, running examples, linting, formatting, tests, and coverage):
|
||||
|
||||
@@ -79,9 +52,9 @@ If you are still seeing this bug on v1.6.1+, you may also try disabling "modern
|
||||
(`poetry config installer.modern-installation false`) and re-installing requirements.
|
||||
See [this `debugpy` issue](https://github.com/microsoft/debugpy/issues/1246) for more details.
|
||||
|
||||
### Testing
|
||||
## Testing
|
||||
|
||||
_In `langchain`, `langchain-community`, and `langchain-experimental`, some test dependencies are optional; see section about optional dependencies_.
|
||||
**Note:** In `langchain`, `langchain-community`, and `langchain-experimental`, some test dependencies are optional. See the following section about optional dependencies.
|
||||
|
||||
Unit tests cover modular logic that does not require calls to outside APIs.
|
||||
If you add new logic, please add a unit test.
|
||||
@@ -118,11 +91,11 @@ poetry install --with test
|
||||
make test
|
||||
```
|
||||
|
||||
### Formatting and Linting
|
||||
## Formatting and Linting
|
||||
|
||||
Run these locally before submitting a PR; the CI system will check also.
|
||||
|
||||
#### Code Formatting
|
||||
### Code Formatting
|
||||
|
||||
Formatting for this project is done via [ruff](https://docs.astral.sh/ruff/rules/).
|
||||
|
||||
@@ -174,7 +147,7 @@ This can be very helpful when you've made changes to only certain parts of the p
|
||||
|
||||
We recognize linting can be annoying - if you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
|
||||
#### Spellcheck
|
||||
### Spellcheck
|
||||
|
||||
Spellchecking for this project is done via [codespell](https://github.com/codespell-project/codespell).
|
||||
Note that `codespell` finds common typos, so it could have false-positive (correctly spelled but rarely used) and false-negatives (not finding misspelled) words.
|
||||
@@ -1,2 +0,0 @@
|
||||
label: 'Documentation'
|
||||
position: 3
|
||||
7
docs/docs/contributing/documentation/index.mdx
Normal file
7
docs/docs/contributing/documentation/index.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
# Contribute Documentation
|
||||
|
||||
Documentation is a vital part of LangChain. We welcome both new documentation for new features and
|
||||
community improvements to our current documentation. Please read the resources below before getting started:
|
||||
|
||||
- [Documentation style guide](/docs/contributing/documentation/style_guide/)
|
||||
- [Setup](/docs/contributing/documentation/setup/)
|
||||
@@ -1,4 +1,8 @@
|
||||
# Technical logistics
|
||||
---
|
||||
sidebar_class_name: "hidden"
|
||||
---
|
||||
|
||||
# Setup
|
||||
|
||||
LangChain documentation consists of two components:
|
||||
|
||||
@@ -12,8 +16,6 @@ used to generate the externally facing [API Reference](https://api.python.langch
|
||||
The content for the API reference is autogenerated by scanning the docstrings in the codebase. For this reason we ask that
|
||||
developers document their code well.
|
||||
|
||||
The main documentation is built using [Quarto](https://quarto.org) and [Docusaurus 2](https://docusaurus.io/).
|
||||
|
||||
The `API Reference` is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/)
|
||||
from the code and is hosted by [Read the Docs](https://readthedocs.org/).
|
||||
|
||||
@@ -29,7 +31,7 @@ The content for the main documentation is located in the `/docs` directory of th
|
||||
|
||||
The documentation is written using a combination of ipython notebooks (`.ipynb` files)
|
||||
and markdown (`.mdx` files). The notebooks are converted to markdown
|
||||
using [Quarto](https://quarto.org) and then built using [Docusaurus 2](https://docusaurus.io/).
|
||||
and then built using [Docusaurus 2](https://docusaurus.io/).
|
||||
|
||||
Feel free to make contributions to the main documentation! 🥰
|
||||
|
||||
@@ -48,10 +50,6 @@ locally to ensure that it looks good and is free of errors.
|
||||
If you're unable to build it locally that's okay as well, as you will be able to
|
||||
see a preview of the documentation on the pull request page.
|
||||
|
||||
### Install dependencies
|
||||
|
||||
- [Quarto](https://quarto.org) - package that converts Jupyter notebooks (`.ipynb` files) into mdx files for serving in Docusaurus. [Download link](https://quarto.org/docs/download/).
|
||||
|
||||
From the **monorepo root**, run the following command to install the dependencies:
|
||||
|
||||
```bash
|
||||
@@ -71,8 +69,6 @@ make docs_clean
|
||||
make api_docs_clean
|
||||
```
|
||||
|
||||
|
||||
|
||||
Next, you can build the documentation as outlined below:
|
||||
|
||||
```bash
|
||||
@@ -1,10 +1,8 @@
|
||||
---
|
||||
sidebar_label: "Style guide"
|
||||
sidebar_class_name: "hidden"
|
||||
---
|
||||
|
||||
# LangChain Documentation Style Guide
|
||||
|
||||
## Introduction
|
||||
# Documentation Style Guide
|
||||
|
||||
As LangChain continues to grow, the surface area of documentation required to cover it continues to grow too.
|
||||
This page provides guidelines for anyone writing documentation for LangChain, as well as some of our philosophies around
|
||||
@@ -12,116 +10,137 @@ organization and structure.
|
||||
|
||||
## Philosophy
|
||||
|
||||
LangChain's documentation aspires to follow the [Diataxis framework](https://diataxis.fr).
|
||||
Under this framework, all documentation falls under one of four categories:
|
||||
LangChain's documentation follows the [Diataxis framework](https://diataxis.fr).
|
||||
Under this framework, all documentation falls under one of four categories: [Tutorials](/docs/contributing/documentation/style_guide/#tutorials),
|
||||
[How-to guides](/docs/contributing/documentation/style_guide/#how-to-guides),
|
||||
[References](/docs/contributing/documentation/style_guide/#references), and [Explanations](/docs/contributing/documentation/style_guide/#conceptual-guide).
|
||||
|
||||
- **Tutorials**: Lessons that take the reader by the hand through a series of conceptual steps to complete a project.
|
||||
- An example of this is our [LCEL streaming guide](/docs/how_to/streaming).
|
||||
- Our guides on [custom components](/docs/how_to/custom_chat_model) is another one.
|
||||
- **How-to guides**: Guides that take the reader through the steps required to solve a real-world problem.
|
||||
- The clearest examples of this are our [Use case](/docs/how_to#use-cases) quickstart pages.
|
||||
- **Reference**: Technical descriptions of the machinery and how to operate it.
|
||||
- Our [Runnable interface](/docs/concepts#interface) page is an example of this.
|
||||
- The [API reference pages](https://api.python.langchain.com/) are another.
|
||||
- **Explanation**: Explanations that clarify and illuminate a particular topic.
|
||||
- The [LCEL primitives pages](/docs/how_to/sequence) are an example of this.
|
||||
### Tutorials
|
||||
|
||||
Tutorials are lessons that take the reader through a practical activity. Their purpose is to help the user
|
||||
gain understanding of concepts and how they interact by showing one way to achieve some goal in a hands-on way. They should **avoid** giving
|
||||
multiple permutations of ways to achieve that goal in-depth. Instead, it should guide a new user through a recommended path to accomplishing the tutorial's goal. While the end result of a tutorial does not necessarily need to
|
||||
be completely production-ready, it should be useful and practically satisfy the the goal that you clearly stated in the tutorial's introduction. Information on how to address additional scenarios
|
||||
belongs in how-to guides.
|
||||
|
||||
To quote the Diataxis website:
|
||||
|
||||
> A tutorial serves the user’s *acquisition* of skills and knowledge - their study. Its purpose is not to help the user get something done, but to help them learn.
|
||||
|
||||
In LangChain, these are often higher level guides that show off end-to-end use cases.
|
||||
|
||||
Some examples include:
|
||||
|
||||
- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain/)
|
||||
- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)
|
||||
|
||||
Here are some high-level tips on writing a good tutorial:
|
||||
|
||||
- Focus on guiding the user to get something done, but keep in mind the end-goal is more to impart principles than to create a perfect production system.
|
||||
- Be specific, not abstract and follow one path.
|
||||
- No need to go deeply into alternative approaches, but it’s ok to reference them, ideally with a link to an appropriate how-to guide.
|
||||
- Get "a point on the board" as soon as possible - something the user can run that outputs something.
|
||||
- You can iterate and expand afterwards.
|
||||
- Try to frequently checkpoint at given steps where the user can run code and see progress.
|
||||
- Focus on results, not technical explanation.
|
||||
- Crosslink heavily to appropriate conceptual/reference pages.
|
||||
- The first time you mention a LangChain concept, use its full name (e.g. "LangChain Expression Language (LCEL)"), and link to its conceptual/other documentation page.
|
||||
- It's also helpful to add a prerequisite callout that links to any pages with necessary background information.
|
||||
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as related how-to guides.
|
||||
|
||||
### How-to guides
|
||||
|
||||
A how-to guide, as the name implies, demonstrates how to do something discrete and specific.
|
||||
It should assume that the user is already familiar with underlying concepts, and is trying to solve an immediate problem, but
|
||||
should still give some background or list the scenarios where the information contained within can be relevant.
|
||||
They can and should discuss alternatives if one approach may be better than another in certain cases.
|
||||
|
||||
To quote the Diataxis website:
|
||||
|
||||
> A how-to guide serves the work of the already-competent user, whom you can assume to know what they want to do, and to be able to follow your instructions correctly.
|
||||
|
||||
Some examples include:
|
||||
|
||||
- [How to: return structured data from a model](/docs/how_to/structured_output/)
|
||||
- [How to: write a custom chat model](/docs/how_to/custom_chat_model/)
|
||||
|
||||
Here are some high-level tips on writing a good how-to guide:
|
||||
|
||||
- Clearly explain what you are guiding the user through at the start.
|
||||
- Assume higher intent than a tutorial and show what the user needs to do to get that task done.
|
||||
- Assume familiarity of concepts, but explain why suggested actions are helpful.
|
||||
- Crosslink heavily to conceptual/reference pages.
|
||||
- Discuss alternatives and responses to real-world tradeoffs that may arise when solving a problem.
|
||||
- Use lots of example code.
|
||||
- Prefer full code blocks that the reader can copy and run.
|
||||
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as other related how-to guides.
|
||||
|
||||
### Conceptual guide
|
||||
|
||||
LangChain's conceptual guide falls under the **Explanation** quadrant of Diataxis. They should cover LangChain terms and concepts
|
||||
in a more abstract way than how-to guides or tutorials, and should be geared towards curious users interested in
|
||||
gaining a deeper understanding of the framework. Try to avoid excessively large code examples - the goal here is to
|
||||
impart perspective to the user rather than to finish a practical project. These guides should cover **why** things work they way they do.
|
||||
|
||||
This guide on documentation style is meant to fall under this category.
|
||||
|
||||
To quote the Diataxis website:
|
||||
|
||||
> The perspective of explanation is higher and wider than that of the other types. It does not take the user’s eye-level view, as in a how-to guide, or a close-up view of the machinery, like reference material. Its scope in each case is a topic - “an area of knowledge”, that somehow has to be bounded in a reasonable, meaningful way.
|
||||
|
||||
Some examples include:
|
||||
|
||||
- [Retrieval conceptual docs](/docs/concepts/#retrieval)
|
||||
- [Chat model conceptual docs](/docs/concepts/#chat-models)
|
||||
|
||||
Here are some high-level tips on writing a good conceptual guide:
|
||||
|
||||
- Explain design decisions. Why does concept X exist and why was it designed this way?
|
||||
- Use analogies and reference other concepts and alternatives
|
||||
- Avoid blending in too much reference content
|
||||
- You can and should reference content covered in other guides, but make sure to link to them
|
||||
|
||||
### References
|
||||
|
||||
References contain detailed, low-level information that describes exactly what functionality exists and how to use it.
|
||||
In LangChain, this is mainly our API reference pages, which are populated from docstrings within code.
|
||||
References pages are generally not read end-to-end, but are consulted as necessary when a user needs to know
|
||||
how to use something specific.
|
||||
|
||||
To quote the Diataxis website:
|
||||
|
||||
> The only purpose of a reference guide is to describe, as succinctly as possible, and in an orderly way. Whereas the content of tutorials and how-to guides are led by needs of the user, reference material is led by the product it describes.
|
||||
|
||||
Many of the reference pages in LangChain are automatically generated from code,
|
||||
but here are some high-level tips on writing a good docstring:
|
||||
|
||||
- Be concise
|
||||
- Discuss special cases and deviations from a user's expectations
|
||||
- Go into detail on required inputs and outputs
|
||||
- Light details on when one might use the feature are fine, but in-depth details belong in other sections.
|
||||
|
||||
Each category serves a distinct purpose and requires a specific approach to writing and structuring the content.
|
||||
|
||||
## Taxonomy
|
||||
|
||||
Keeping the above in mind, we have sorted LangChain's docs into categories. It is helpful to think in these terms
|
||||
when contributing new documentation:
|
||||
|
||||
### Getting started
|
||||
|
||||
The [getting started section](/docs/introduction) includes a high-level introduction to LangChain, a quickstart that
|
||||
tours LangChain's various features, and logistical instructions around installation and project setup.
|
||||
|
||||
It contains elements of **How-to guides** and **Explanations**.
|
||||
|
||||
### Use cases
|
||||
|
||||
[Use cases](/docs/how_to#use-cases) are guides that are meant to show how to use LangChain to accomplish a specific task (RAG, information extraction, etc.).
|
||||
The quickstarts should be good entrypoints for first-time LangChain developers who prefer to learn by getting something practical prototyped,
|
||||
then taking the pieces apart retrospectively. These should mirror what LangChain is good at.
|
||||
|
||||
The quickstart pages here should fit the **How-to guide** category, with the other pages intended to be **Explanations** of more
|
||||
in-depth concepts and strategies that accompany the main happy paths.
|
||||
|
||||
:::note
|
||||
The below sections are listed roughly in order of increasing level of abstraction.
|
||||
:::
|
||||
|
||||
### Expression Language
|
||||
|
||||
[LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language-lcel) is the fundamental way that most LangChain components fit together, and this section is designed to teach
|
||||
developers how to use it to build with LangChain's primitives effectively.
|
||||
|
||||
This section should contains **Tutorials** that teach how to stream and use LCEL primitives for more abstract tasks, **Explanations** of specific behaviors,
|
||||
and some **References** for how to use different methods in the Runnable interface.
|
||||
|
||||
### Components
|
||||
|
||||
The [components section](/docs/concepts) covers concepts one level of abstraction higher than LCEL.
|
||||
Abstract base classes like `BaseChatModel` and `BaseRetriever` should be covered here, as well as core implementations of these base classes,
|
||||
such as `ChatPromptTemplate` and `RecursiveCharacterTextSplitter`. Customization guides belong here too.
|
||||
|
||||
This section should contain mostly conceptual **Tutorials**, **References**, and **Explanations** of the components they cover.
|
||||
|
||||
:::note
|
||||
As a general rule of thumb, everything covered in the `Expression Language` and `Components` sections (with the exception of the `Composition` section of components) should
|
||||
cover only components that exist in `langchain_core`.
|
||||
:::
|
||||
|
||||
### Integrations
|
||||
|
||||
The [integrations](/docs/integrations/platforms/) are specific implementations of components. These often involve third-party APIs and services.
|
||||
If this is the case, as a general rule, these are maintained by the third-party partner.
|
||||
|
||||
This section should contain mostly **Explanations** and **References**, though the actual content here is more flexible than other sections and more at the
|
||||
discretion of the third-party provider.
|
||||
|
||||
:::note
|
||||
Concepts covered in `Integrations` should generally exist in `langchain_community` or specific partner packages.
|
||||
:::
|
||||
|
||||
### Guides and Ecosystem
|
||||
|
||||
The [Guides](/docs/tutorials) and [Ecosystem](https://docs.smith.langchain.com/) sections should contain guides that address higher-level problems than the sections above.
|
||||
This includes, but is not limited to, considerations around productionization and development workflows.
|
||||
|
||||
These should contain mostly **How-to guides**, **Explanations**, and **Tutorials**.
|
||||
|
||||
### API references
|
||||
|
||||
LangChain's API references. Should act as **References** (as the name implies) with some **Explanation**-focused content as well.
|
||||
|
||||
## Sample developer journey
|
||||
|
||||
We have set up our docs to assist a new developer to LangChain. Let's walk through the intended path:
|
||||
|
||||
- The developer lands on https://python.langchain.com, and reads through the introduction and the diagram.
|
||||
- If they are just curious, they may be drawn to the [Quickstart](/docs/tutorials/llm_chain) to get a high-level tour of what LangChain contains.
|
||||
- If they have a specific task in mind that they want to accomplish, they will be drawn to the Use-Case section. The use-case should provide a good, concrete hook that shows the value LangChain can provide them and be a good entrypoint to the framework.
|
||||
- They can then move to learn more about the fundamentals of LangChain through the Expression Language sections.
|
||||
- Next, they can learn about LangChain's various components and integrations.
|
||||
- Finally, they can get additional knowledge through the Guides.
|
||||
|
||||
This is only an ideal of course - sections will inevitably reference lower or higher-level concepts that are documented in other sections.
|
||||
|
||||
## Guidelines
|
||||
## General guidelines
|
||||
|
||||
Here are some other guidelines you should think about when writing and organizing documentation.
|
||||
|
||||
### Linking to other sections
|
||||
We generally do not merge new tutorials from outside contributors without an actue need.
|
||||
We welcome updates as well as new integration docs, how-tos, and references.
|
||||
|
||||
### Avoid duplication
|
||||
|
||||
Multiple pages that cover the same material in depth are difficult to maintain and cause confusion. There should
|
||||
be only one (very rarely two), canonical pages for a given concept or feature. Instead, you should link to other guides.
|
||||
|
||||
### Link to other sections
|
||||
|
||||
Because sections of the docs do not exist in a vacuum, it is important to link to other sections as often as possible
|
||||
to allow a developer to learn more about an unfamiliar topic inline.
|
||||
|
||||
This includes linking to the API references as well as conceptual sections!
|
||||
|
||||
### Conciseness
|
||||
### Be concise
|
||||
|
||||
In general, take a less-is-more approach. If a section with a good explanation of a concept already exists, you should link to it rather than
|
||||
re-explain it, unless the concept you are documenting presents some new wrinkle.
|
||||
@@ -130,9 +149,10 @@ Be concise, including in code samples.
|
||||
|
||||
### General style
|
||||
|
||||
- Use active voice and present tense whenever possible.
|
||||
- Use examples and code snippets to illustrate concepts and usage.
|
||||
- Use appropriate header levels (`#`, `##`, `###`, etc.) to organize the content hierarchically.
|
||||
- Use bullet points and numbered lists to break down information into easily digestible chunks.
|
||||
- Use tables (especially for **Reference** sections) and diagrams often to present information visually.
|
||||
- Include the table of contents for longer documentation pages to help readers navigate the content, but hide it for shorter pages.
|
||||
- Use active voice and present tense whenever possible
|
||||
- Use examples and code snippets to illustrate concepts and usage
|
||||
- Use appropriate header levels (`#`, `##`, `###`, etc.) to organize the content hierarchically
|
||||
- Use fewer cells with more code to make copy/paste easier
|
||||
- Use bullet points and numbered lists to break down information into easily digestible chunks
|
||||
- Use tables (especially for **Reference** sections) and diagrams often to present information visually
|
||||
- Include the table of contents for longer documentation pages to help readers navigate the content, but hide it for shorter pages
|
||||
|
||||
@@ -12,8 +12,8 @@ As an open-source project in a rapidly developing field, we are extremely open t
|
||||
|
||||
There are many ways to contribute to LangChain. Here are some common ways people contribute:
|
||||
|
||||
- [**Documentation**](/docs/contributing/documentation/style_guide): Help improve our docs, including this one!
|
||||
- [**Code**](./code.mdx): Help us write code, fix bugs, or improve our infrastructure.
|
||||
- [**Documentation**](/docs/contributing/documentation/): Help improve our docs, including this one!
|
||||
- [**Code**](/docs/contributing/code/): Help us write code, fix bugs, or improve our infrastructure.
|
||||
- [**Integrations**](integrations.mdx): Help us integrate with your favorite vendors and tools.
|
||||
- [**Discussions**](https://github.com/langchain-ai/langchain/discussions): Help answer usage questions and discuss issues with users.
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
sidebar_position: 5
|
||||
---
|
||||
|
||||
# Contribute Integrations
|
||||
|
||||
To begin, make sure you have all the dependencies outlined in guide on [Contributing Code](/docs/contributing/code/).
|
||||
|
||||
@@ -7,6 +7,7 @@ If you plan on contributing to LangChain code or documentation, it can be useful
|
||||
to understand the high level structure of the repository.
|
||||
|
||||
LangChain is organized as a [monorepo](https://en.wikipedia.org/wiki/Monorepo) that contains multiple packages.
|
||||
You can check out our [installation guide](/docs/how_to/installation/) for more on how they fit together.
|
||||
|
||||
Here's the structure visualized as a tree:
|
||||
|
||||
@@ -51,7 +52,7 @@ There are other files in the root directory level, but their presence should be
|
||||
The `/docs` directory contains the content for the documentation that is shown
|
||||
at https://python.langchain.com/ and the associated API Reference https://api.python.langchain.com/en/latest/langchain_api_reference.html.
|
||||
|
||||
See the [documentation](/docs/contributing/documentation/style_guide) guidelines to learn how to contribute to the documentation.
|
||||
See the [documentation](/docs/contributing/documentation/) guidelines to learn how to contribute to the documentation.
|
||||
|
||||
## Code
|
||||
|
||||
@@ -59,6 +60,6 @@ The `/libs` directory contains the code for the LangChain packages.
|
||||
|
||||
To learn more about how to contribute code see the following guidelines:
|
||||
|
||||
- [Code](./code.mdx) Learn how to develop in the LangChain codebase.
|
||||
- [Integrations](./integrations.mdx) to learn how to contribute to third-party integrations to langchain-community or to start a new partner package.
|
||||
- [Testing](./testing.mdx) guidelines to learn how to write tests for the packages.
|
||||
- [Code](/docs/contributing/code/): Learn how to develop in the LangChain codebase.
|
||||
- [Integrations](./integrations.mdx): Learn how to contribute to third-party integrations to `langchain-community` or to start a new partner package.
|
||||
- [Testing](./testing.mdx): Guidelines to learn how to write tests for the packages.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
sidebar_position: 6
|
||||
---
|
||||
|
||||
# Testing
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
|
||||
"- [Chaining runnables](/docs/how_to/sequence/)\n",
|
||||
"- [Tool calling](/docs/how_to/tool_calling/)\n",
|
||||
"- [Tool calling](/docs/how_to/tool_calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -142,7 +142,7 @@
|
||||
"\n",
|
||||
"## Attaching OpenAI tools\n",
|
||||
"\n",
|
||||
"Another common use-case is tool calling. While you should generally use the [`.bind_tools()`](/docs/how_to/tool_calling/) method for tool-calling models, you can also bind provider-specific args directly if you want lower level control:"
|
||||
"Another common use-case is tool calling. While you should generally use the [`.bind_tools()`](/docs/how_to/tool_calling) method for tool-calling models, you can also bind provider-specific args directly if you want lower level control:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -71,13 +71,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")"
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -95,19 +95,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I said \"J\\'adore la programmation,\" which means \"I love programming\" in French.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I said \"J'adore la programmation,\" which means \"I love programming\" in French.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@@ -115,23 +111,25 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"messages\"),\n",
|
||||
" (\"placeholder\", \"{messages}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
"ai_msg = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French: I love programming.\"\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"Translate this sentence from English to French: I love programming.\",\n",
|
||||
" ),\n",
|
||||
" AIMessage(content=\"J'adore la programmation.\"),\n",
|
||||
" HumanMessage(content=\"What did you just say?\"),\n",
|
||||
" (\"ai\", \"J'adore la programmation.\"),\n",
|
||||
" (\"human\", \"What did you just say?\"),\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
")"
|
||||
")\n",
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -193,7 +191,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.')"
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -250,7 +248,7 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
@@ -304,10 +302,17 @@
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run dc4e2f79-4bcd-4a36-9506-55ace9040588 not found for run 34b5773e-3ced-46a6-8daf-4d464c15c940. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The translation of \"I love programming\" in French is \"J\\'adore la programmation.\"')"
|
||||
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -327,10 +332,17 @@
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run cc14b9d8-c59e-40db-a523-d6ab3fc2fa4f not found for run 5b75e25c-131e-46ee-9982-68569db04330. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.')"
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -354,12 +366,12 @@
|
||||
"\n",
|
||||
"### Trimming messages\n",
|
||||
"\n",
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is to only load and store the most recent `n` messages. Let's use an example history with some preloaded messages:"
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the historic messages before passing them to the model. Let's use an example history with some preloaded messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -371,7 +383,7 @@
|
||||
" AIMessage(content='Fine thanks!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -396,34 +408,28 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 7ff2d8ec-65e2-4f67-8961-e498e2c4a591 not found for run 3881e990-6596-4326-84f6-2b76949e0657. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Nemo.')"
|
||||
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain_with_message_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
@@ -443,34 +449,33 @@
|
||||
"source": [
|
||||
"We can see the chain remembers the preloaded name.\n",
|
||||
"\n",
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the `clear` method to remove messages and re-add them to the history. We don't have to, but let's put this method at the front of our chain to ensure it's always called:"
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the built in [trim_messages](/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import trim_messages\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def trim_messages(chain_input):\n",
|
||||
" stored_messages = demo_ephemeral_chat_history.messages\n",
|
||||
" if len(stored_messages) <= 2:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
" demo_ephemeral_chat_history.clear()\n",
|
||||
"\n",
|
||||
" for message in stored_messages[-2:]:\n",
|
||||
" demo_ephemeral_chat_history.add_message(message)\n",
|
||||
"\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"trimmer = trim_messages(strategy=\"last\", max_tokens=2, token_counter=len)\n",
|
||||
"\n",
|
||||
"chain_with_trimming = (\n",
|
||||
" RunnablePassthrough.assign(messages_trimmed=trim_messages)\n",
|
||||
" | chain_with_message_history\n",
|
||||
" RunnablePassthrough.assign(chat_history=itemgetter(\"chat_history\") | trimmer)\n",
|
||||
" | prompt\n",
|
||||
" | chat\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain_with_trimmed_history = RunnableWithMessageHistory(\n",
|
||||
" chain_with_trimming,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -483,22 +488,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 775cde65-8d22-4c44-80bb-f0b9811c32ca not found for run 5cf71d0e-4663-41cd-8dbe-e9752689cfac. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\")"
|
||||
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimming.invoke(\n",
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"Where does P. Sherman live?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
@@ -506,19 +518,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.'),\n",
|
||||
"[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n",
|
||||
" AIMessage(content='Hello!'),\n",
|
||||
" HumanMessage(content='How are you today?'),\n",
|
||||
" AIMessage(content='Fine thanks!'),\n",
|
||||
" HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
|
||||
" HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\")]"
|
||||
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -536,48 +552,39 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run fde7123f-6fd3-421a-a3fc-2fb37dead119 not found for run 061a4563-2394-470d-a3ed-9bf1388ca431. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm sorry, I don't have access to your personal information.\")"
|
||||
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimming.invoke(\n",
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"What is my name?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\"),\n",
|
||||
" HumanMessage(content='What is my name?'),\n",
|
||||
" AIMessage(content=\"I'm sorry, I don't have access to your personal information.\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"demo_ephemeral_chat_history.messages"
|
||||
"Check out our [how to guide on trimming messages](/docs/how_to/trim_messages/) for more."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -638,7 +645,7 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"user\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
@@ -672,7 +679,7 @@
|
||||
" return False\n",
|
||||
" summarization_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" \"Distill the above chat messages into a single summary message. Include as many specific details as you can.\",\n",
|
||||
@@ -772,9 +779,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -69,6 +69,17 @@
|
||||
"Once we have loaded PDFs into LangChain `Document` objects, we can index them (e.g., a RAG application) in the usual way:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b932bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install faiss-cpu \n",
|
||||
"# use `pip install faiss-gpu` for CUDA GPU support"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -246,11 +246,11 @@
|
||||
"examples = [\n",
|
||||
" (\n",
|
||||
" \"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\",\n",
|
||||
" Person(name=None, height_in_meters=None, hair_color=None),\n",
|
||||
" Data(people=[]),\n",
|
||||
" ),\n",
|
||||
" (\n",
|
||||
" \"Fiona traveled far from France to Spain.\",\n",
|
||||
" Person(name=\"Fiona\", height_in_meters=None, hair_color=None),\n",
|
||||
" Data(people=[Person(name=\"Fiona\", height_in_meters=None, hair_color=None)]),\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
|
||||
"- [Example selectors](/docs/concepts/#example-selectors)\n",
|
||||
"- [LLMs](/docs/concepts/#llms)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vectorstores)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vector-stores)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
|
||||
"- [Example selectors](/docs/concepts/#example-selectors)\n",
|
||||
"- [Chat models](/docs/concepts/#chat-model)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vectorstores)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vector-stores)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -51,7 +51,7 @@
|
||||
"- `examples`: A list of dictionary examples to include in the final prompt.\n",
|
||||
"- `example_prompt`: converts each example into 1 or more messages through its [`format_messages`](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html?highlight=format_messages#langchain_core.prompts.chat.ChatPromptTemplate.format_messages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n",
|
||||
"\n",
|
||||
"Below is a simple demonstration. First, define the examples you'd like to include:"
|
||||
"Below is a simple demonstration. First, define the examples you'd like to include. Let's give the LLM an unfamiliar mathematical operator, denoted by the \"🦜\" emoji:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -59,17 +59,7 @@
|
||||
"execution_count": 1,
|
||||
"id": "5b79e400",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n",
|
||||
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
|
||||
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-openai langchain-chroma\n",
|
||||
"\n",
|
||||
@@ -79,9 +69,50 @@
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "30856d92",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we try to ask the model what the result of this expression is, it will fail:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"id": "174dec5b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The expression \"2 🦜 9\" is not a standard mathematical operation or equation. It appears to be a combination of the number 2 and the parrot emoji 🦜 followed by the number 9. It does not have a specific mathematical meaning.', response_metadata={'token_usage': {'completion_tokens': 54, 'prompt_tokens': 17, 'total_tokens': 71}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-aad12dda-5c47-4a1e-9949-6fe94e03242a-0', usage_metadata={'input_tokens': 17, 'output_tokens': 54, 'total_tokens': 71})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
|
||||
"\n",
|
||||
"model.invoke(\"What is 2 🦜 9?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e6d58385",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's see what happens if we give the LLM some examples to work with. We'll define some below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0fc5a02a-6249-4e92-95c3-30fff9671e8b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -91,8 +122,8 @@
|
||||
"from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" {\"input\": \"2+2\", \"output\": \"4\"},\n",
|
||||
" {\"input\": \"2+3\", \"output\": \"5\"},\n",
|
||||
" {\"input\": \"2 🦜 2\", \"output\": \"4\"},\n",
|
||||
" {\"input\": \"2 🦜 3\", \"output\": \"5\"},\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@@ -106,7 +137,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 6,
|
||||
"id": "65e72ad1-9060-47d0-91a1-bc130c8b98ac",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -116,7 +147,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[HumanMessage(content='2+2'), AIMessage(content='4'), HumanMessage(content='2+3'), AIMessage(content='5')]\n"
|
||||
"[HumanMessage(content='2 🦜 2'), AIMessage(content='4'), HumanMessage(content='2 🦜 3'), AIMessage(content='5')]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -146,7 +177,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"id": "9f86d6d9-50de-41b6-b6c7-0f9980cc0187",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -162,9 +193,17 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd8029c5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now let's ask the model the initial question and see how it does:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"id": "97d443b1-6fae-4b36-bede-3ff7306288a3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -173,10 +212,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='A triangle does not have a square. The square of a number is the result of multiplying the number by itself.', response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 52, 'total_tokens': 75}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-3456c4ef-7b4d-4adb-9e02-8079de82a47a-0')"
|
||||
"AIMessage(content='11', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5ec4e051-262f-408e-ad00-3f2ebeb561c3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -184,9 +223,9 @@
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chain = final_prompt | ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
|
||||
"chain = final_prompt | model\n",
|
||||
"\n",
|
||||
"chain.invoke({\"input\": \"What's the square of a triangle?\"})"
|
||||
"chain.invoke({\"input\": \"What is 2 🦜 9?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -194,6 +233,8 @@
|
||||
"id": "70ab7114-f07f-46be-8874-3705a25aba5f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we can see that the model has now inferred that the parrot emoji means addition from the given few-shot examples!\n",
|
||||
"\n",
|
||||
"## Dynamic few-shot prompting\n",
|
||||
"\n",
|
||||
"Sometimes you may want to select only a few examples from your overall set to show based on the input. For this, you can replace the `examples` passed into `FewShotChatMessagePromptTemplate` with an `example_selector`. The other components remain the same as above! Our dynamic few-shot prompt template would look like:\n",
|
||||
@@ -208,7 +249,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 9,
|
||||
"id": "ad66f06a-66fd-4fcc-8166-5d0e3c801e57",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -220,9 +261,9 @@
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" {\"input\": \"2+2\", \"output\": \"4\"},\n",
|
||||
" {\"input\": \"2+3\", \"output\": \"5\"},\n",
|
||||
" {\"input\": \"2+4\", \"output\": \"6\"},\n",
|
||||
" {\"input\": \"2 🦜 2\", \"output\": \"4\"},\n",
|
||||
" {\"input\": \"2 🦜 3\", \"output\": \"5\"},\n",
|
||||
" {\"input\": \"2 🦜 4\", \"output\": \"6\"},\n",
|
||||
" {\"input\": \"What did the cow say to the moon?\", \"output\": \"nothing at all\"},\n",
|
||||
" {\n",
|
||||
" \"input\": \"Write me a poem about the moon\",\n",
|
||||
@@ -247,7 +288,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"id": "7790303a-f722-452e-8921-b14bdf20bdff",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -257,10 +298,10 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'input': 'What did the cow say to the moon?', 'output': 'nothing at all'},\n",
|
||||
" {'input': '2+4', 'output': '6'}]"
|
||||
" {'input': '2 🦜 4', 'output': '6'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -287,7 +328,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 11,
|
||||
"id": "253c255e-41d7-45f6-9d88-c7a0ced4b1bd",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -297,7 +338,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[HumanMessage(content='2+3'), AIMessage(content='5'), HumanMessage(content='2+2'), AIMessage(content='4')]\n"
|
||||
"[HumanMessage(content='2 🦜 3'), AIMessage(content='5'), HumanMessage(content='2 🦜 4'), AIMessage(content='6')]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -317,7 +358,7 @@
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(few_shot_prompt.invoke(input=\"What's 3+3?\").to_messages())"
|
||||
"print(few_shot_prompt.invoke(input=\"What's 3 🦜 3?\").to_messages())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -330,7 +371,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 12,
|
||||
"id": "e731cb45-f0ea-422c-be37-42af2a6cb2c4",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -340,7 +381,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"messages=[HumanMessage(content='2+3'), AIMessage(content='5'), HumanMessage(content='2+2'), AIMessage(content='4')]\n"
|
||||
"messages=[HumanMessage(content='2 🦜 3'), AIMessage(content='5'), HumanMessage(content='2 🦜 4'), AIMessage(content='6')]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -353,7 +394,7 @@
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(few_shot_prompt.invoke(input=\"What's 3+3?\"))"
|
||||
"print(few_shot_prompt.invoke(input=\"What's 3 🦜 3?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -368,7 +409,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 13,
|
||||
"id": "0568cbc6-5354-47f1-ab4d-dfcc616cf583",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -377,10 +418,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 51, 'total_tokens': 52}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-6bcbe158-a8e3-4a85-a754-1ba274a9f147-0')"
|
||||
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d1863e5e-17cd-4e9d-bf7a-b9f118747a65-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -388,7 +429,7 @@
|
||||
"source": [
|
||||
"chain = final_prompt | ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
|
||||
"\n",
|
||||
"chain.invoke({\"input\": \"What's 3+3?\"})"
|
||||
"chain.invoke({\"input\": \"What's 3 🦜 3?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -428,7 +469,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -300,7 +300,11 @@
|
||||
"id": "922b48bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Streaming\n",
|
||||
"## Streaming\n",
|
||||
"\n",
|
||||
":::{.callout-note}\n",
|
||||
"[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) is best suited for code that does not need to support streaming. If you need to support streaming (i.e., be able to operate on chunks of inputs and yield chunks of outputs), use [RunnableGenerator](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableGenerator.html) instead as in the example below.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a chain.\n",
|
||||
"\n",
|
||||
|
||||
@@ -21,7 +21,7 @@ For comprehensive descriptions of every class and function see the [API Referenc
|
||||
This highlights functionality that is core to using LangChain.
|
||||
|
||||
- [How to: return structured data from a model](/docs/how_to/structured_output/)
|
||||
- [How to: use a model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: use a model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: stream runnables](/docs/how_to/streaming)
|
||||
- [How to: debug your LLM apps](/docs/how_to/debugging/)
|
||||
|
||||
@@ -79,6 +79,12 @@ These are the core building blocks you can use when building applications.
|
||||
- [How to: stream a response back](/docs/how_to/chat_streaming)
|
||||
- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
|
||||
- [How to: track response metadata across providers](/docs/how_to/response_metadata)
|
||||
- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: stream tool calls](/docs/how_to/tool_streaming)
|
||||
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
|
||||
- [How to: bind model-specific formated tools](/docs/how_to/tools_model_specific)
|
||||
- [How to: force specific tool call](/docs/how_to/tool_choice)
|
||||
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
||||
|
||||
### Messages
|
||||
@@ -176,15 +182,17 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
|
||||
|
||||
### Tools
|
||||
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call.
|
||||
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_choice)
|
||||
|
||||
### Multimodal
|
||||
|
||||
@@ -225,6 +233,8 @@ All of LangChain components can easily be extended to support your own versions.
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: define a custom tool](/docs/how_to/custom_tools)
|
||||
|
||||
### Serialization
|
||||
- [How to: save and load LangChain objects](/docs/how_to/serialization)
|
||||
|
||||
## Use cases
|
||||
|
||||
@@ -307,7 +317,8 @@ LangSmith allows you to closely trace, monitor and evaluate your LLM application
|
||||
It seamlessly integrates with LangChain and LangGraph, and you can use it to inspect and debug individual steps of your chains and agents as you build.
|
||||
|
||||
LangSmith documentation is hosted on a separate site.
|
||||
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/).
|
||||
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/), but we'll highlight a few sections that are particularly
|
||||
relevant to LangChain below:
|
||||
|
||||
### Evaluation
|
||||
<span data-heading-keywords="evaluation,evaluate"></span>
|
||||
@@ -315,4 +326,14 @@ You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/h
|
||||
Evaluating performance is a vital part of building LLM-powered applications.
|
||||
LangSmith helps with every step of the process from creating a dataset to defining metrics to running evaluators.
|
||||
|
||||
To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides/evaluation).
|
||||
To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides#evaluation).
|
||||
|
||||
### Tracing
|
||||
<span data-heading-keywords="trace,tracing"></span>
|
||||
|
||||
Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
|
||||
|
||||
- [How to: trace with LangChain](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain)
|
||||
- [How to: add metadata and tags to traces](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain#add-metadata-and-tags-to-traces)
|
||||
|
||||
You can see general tracing-related how-tos [in this section of the LangSmith docs](https://docs.smith.langchain.com/how_to_guides/tracing).
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
# Installation
|
||||
# How to install LangChain packages
|
||||
|
||||
The LangChain ecosystem is split into different packages, which allow you to choose exactly which pieces of
|
||||
functionality to install.
|
||||
|
||||
## Official release
|
||||
|
||||
To install LangChain run:
|
||||
To install the main LangChain package, run:
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
@@ -21,11 +24,24 @@ import CodeBlock from "@theme/CodeBlock";
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
This will install the bare minimum requirements of LangChain.
|
||||
A lot of the value of LangChain comes when integrating it with various model providers, datastores, etc.
|
||||
While this package acts as a sane starting point to using LangChain,
|
||||
much of the value of LangChain comes when integrating it with various model providers, datastores, etc.
|
||||
By default, the dependencies needed to do that are NOT installed. You will need to install the dependencies for specific integrations separately.
|
||||
We'll show how to do that in the next sections of this guide.
|
||||
|
||||
## From source
|
||||
## Ecosystem packages
|
||||
|
||||
With the exception of the `langsmith` SDK, all packages in the LangChain ecosystem depend on `langchain-core`, which contains base
|
||||
classes and abstractions that other packages use. The dependency graph below shows how the difference packages are related.
|
||||
A directed arrow indicates that the source package depends on the target package:
|
||||
|
||||

|
||||
|
||||
When installing a package, you do not need to explicitly install that package's explicit dependencies (such as `langchain-core`).
|
||||
However, you may choose to if you are using a feature only available in a certain version of that dependency.
|
||||
If you do, you should make sure that the installed or pinned version is compatible with any other integration packages you use.
|
||||
|
||||
### From source
|
||||
|
||||
If you want to install from source, you can do so by cloning the repo and be sure that the directory is `PATH/TO/REPO/langchain/libs/langchain` running:
|
||||
|
||||
@@ -33,21 +49,21 @@ If you want to install from source, you can do so by cloning the repo and be sur
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
## LangChain core
|
||||
### LangChain core
|
||||
The `langchain-core` package contains base abstractions that the rest of the LangChain ecosystem uses, along with the LangChain Expression Language. It is automatically installed by `langchain`, but can also be used separately. Install with:
|
||||
|
||||
```bash
|
||||
pip install langchain-core
|
||||
```
|
||||
|
||||
## LangChain community
|
||||
### LangChain community
|
||||
The `langchain-community` package contains third-party integrations. Install with:
|
||||
|
||||
```bash
|
||||
pip install langchain-community
|
||||
```
|
||||
|
||||
## LangChain experimental
|
||||
### LangChain experimental
|
||||
The `langchain-experimental` package holds experimental LangChain code, intended for research and experimental uses.
|
||||
Install with:
|
||||
|
||||
@@ -55,14 +71,15 @@ Install with:
|
||||
pip install langchain-experimental
|
||||
```
|
||||
|
||||
## LangGraph
|
||||
`langgraph` is a library for building stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain.
|
||||
### LangGraph
|
||||
`langgraph` is a library for building stateful, multi-actor applications with LLMs. It integrates smoothly with LangChain, but can be used without it.
|
||||
Install with:
|
||||
|
||||
```bash
|
||||
pip install langgraph
|
||||
```
|
||||
## LangServe
|
||||
|
||||
### LangServe
|
||||
LangServe helps developers deploy LangChain runnables and chains as a REST API.
|
||||
LangServe is automatically installed by LangChain CLI.
|
||||
If not using LangChain CLI, install with:
|
||||
@@ -80,9 +97,10 @@ Install with:
|
||||
pip install langchain-cli
|
||||
```
|
||||
|
||||
## LangSmith SDK
|
||||
The LangSmith SDK is automatically installed by LangChain.
|
||||
If not using LangChain, install with:
|
||||
### LangSmith SDK
|
||||
The LangSmith SDK is automatically installed by LangChain. However, it does not depend on
|
||||
`langchain-core`, and can be installed and used independently if desired.
|
||||
If you are not using LangChain, you can install it with:
|
||||
|
||||
```bash
|
||||
pip install langsmith
|
||||
|
||||
@@ -129,7 +129,7 @@
|
||||
"id": "a531da5e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## What is the runnable you are trying wrap?\n",
|
||||
"## What is the runnable you are trying to wrap?\n",
|
||||
"\n",
|
||||
"`RunnableWithMessageHistory` can only wrap certain types of Runnables. Specifically, it can be used for any Runnable that takes as input one of:\n",
|
||||
"\n",
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "adc7ee09",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [create_react_agent, create_react_agent()]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "457cdc67-1893-4653-8b0c-b185a5947e74",
|
||||
@@ -7,9 +21,18 @@
|
||||
"source": [
|
||||
"# How to migrate from legacy LangChain agents to LangGraph\n",
|
||||
"\n",
|
||||
"Here we focus on how to move from legacy LangChain agents to LangGraph agents.\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Agents](/docs/concepts/#agents)\n",
|
||||
"- [LangGraph](https://langchain-ai.github.io/langgraph/)\n",
|
||||
"- [Tool calling](/docs/how_to/tool_calling/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Here we focus on how to move from legacy LangChain agents to more flexible [LangGraph](https://langchain-ai.github.io/langgraph/) agents.\n",
|
||||
"LangChain agents (the [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor) in particular) have multiple configuration parameters.\n",
|
||||
"In this notebook we will show how those parameters map to the LangGraph [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent).\n",
|
||||
"In this notebook we will show how those parameters map to the LangGraph react agent executor using the [create_react_agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) prebuilt helper method.\n",
|
||||
"\n",
|
||||
"#### Prerequisites\n",
|
||||
"\n",
|
||||
@@ -195,7 +218,7 @@
|
||||
"\n",
|
||||
"Let's take a look at all of these below. We will pass in custom instructions to get the agent to respond in Spanish.\n",
|
||||
"\n",
|
||||
"First up, using AgentExecutor:"
|
||||
"First up, using `AgentExecutor`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -238,7 +261,16 @@
|
||||
"id": "bd5f5500-5ae4-4000-a9fd-8c5a2cc6404d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent). This can either be a string or a LangChain SystemMessage."
|
||||
"Now, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent).\n",
|
||||
"\n",
|
||||
"LangGraph's prebuilt `create_react_agent` does not take a prompt template directly as a parameter, but instead takes a [`messages_modifier`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) parameter. This modifies messages before they are passed into the model, and can be one of four values:\n",
|
||||
"\n",
|
||||
"- A `SystemMessage`, which is added to the beginning of the list of messages.\n",
|
||||
"- A `string`, which is converted to a `SystemMessage` and added to the beginning of the list of messages.\n",
|
||||
"- A `Callable`, which should take in a list of messages. The output is then passed to the language model.\n",
|
||||
"- Or a [`Runnable`](/docs/concepts/#langchain-expression-language-lcel), which should should take in a list of messages. The output is then passed to the language model.\n",
|
||||
"\n",
|
||||
"Here's how it looks in action:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1212,6 +1244,18 @@
|
||||
"except GraphRecursionError as e:\n",
|
||||
" print(\"Stopping agent prematurely due to triggering stop condition\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41377eb8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to migrate your LangChain agent executors to LangGraph.\n",
|
||||
"\n",
|
||||
"Next, check out other [LangGraph how-to guides](https://langchain-ai.github.io/langgraph/how-tos/)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -52,7 +52,12 @@
|
||||
" (\"system\", \"Describe the image provided\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" [{\"type\": \"image_url\", \"image_url\": \"data:image/jpeg;base64,{image_data}\"}],\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data}\"},\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
@@ -110,11 +115,11 @@
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": \"data:image/jpeg;base64,{image_data1}\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data1}\"},\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": \"data:image/jpeg;base64,{image_data2}\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data2}\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
|
||||
305
docs/docs/how_to/serialization.ipynb
Normal file
305
docs/docs/how_to/serialization.ipynb
Normal file
@@ -0,0 +1,305 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ab3dc782-321e-4503-96ee-ac88a15e4b5e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to save and load LangChain objects\n",
|
||||
"\n",
|
||||
"LangChain classes implement standard methods for serialization. Serializing LangChain objects using these methods confer some advantages:\n",
|
||||
"\n",
|
||||
"- Secrets, such as API keys, are separated from other parameters and can be loaded back to the object on de-serialization;\n",
|
||||
"- De-serialization is kept compatible across package versions, so objects that were serialized with one version of LangChain can be properly de-serialized with another.\n",
|
||||
"\n",
|
||||
"To save and load LangChain objects using this system, use the `dumpd`, `dumps`, `load`, and `loads` functions in the [load module](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.load) of `langchain-core`. These functions support JSON and JSON-serializable objects.\n",
|
||||
"\n",
|
||||
"All LangChain objects that inherit from [Serializable](https://api.python.langchain.com/en/latest/load/langchain_core.load.serializable.Serializable.html) are JSON-serializable. Examples include [messages](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.messages), [document objects](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) (e.g., as returned from [retrievers](/docs/concepts/#retrievers)), and most [Runnables](/docs/concepts/#langchain-expression-language-lcel), such as chat models, retrievers, and [chains](/docs/how_to/sequence) implemented with the LangChain Expression Language.\n",
|
||||
"\n",
|
||||
"Below we walk through an example with a simple [LLM chain](/docs/tutorials/llm_chain).\n",
|
||||
"\n",
|
||||
":::{.callout-caution}\n",
|
||||
"\n",
|
||||
"De-serialization using `load` and `loads` can instantiate any serializable LangChain object. Only use this feature with trusted inputs!\n",
|
||||
"\n",
|
||||
"De-serialization is a beta feature and is subject to change.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "f85d9e51-2a36-4f69-83b1-c716cd43f790",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.load import dumpd, dumps, load, loads\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Translate the following into {language}:\"),\n",
|
||||
" (\"user\", \"{text}\"),\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", api_key=\"llm-api-key\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "356ea99f-5cb5-4433-9a6c-2443d2be9ed3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Saving objects\n",
|
||||
"\n",
|
||||
"### To json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "26516764-d46b-4357-a6c6-bd8315bfa530",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"schema\",\n",
|
||||
" \"runnable\",\n",
|
||||
" \"RunnableSequence\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"first\": {\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"prompts\",\n",
|
||||
" \"chat\",\n",
|
||||
" \"ChatPromptTemplate\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"input_variables\": [\n",
|
||||
" \"language\",\n",
|
||||
" \"text\"\n",
|
||||
" ],\n",
|
||||
" \"messages\": [\n",
|
||||
" {\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"string_representation = dumps(chain, pretty=True)\n",
|
||||
"print(string_representation[:500])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bd425716-545d-466b-a4e5-dc9952cfd72a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### To a json-serializable Python dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6561a968-1741-4419-8c29-e705b9d0ef39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'dict'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dict_representation = dumpd(chain)\n",
|
||||
"\n",
|
||||
"print(type(dict_representation))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "711e986e-dd24-4839-9e38-c57903378a5f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### To disk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f818378b-f4d6-43a7-895b-76cf7359b157",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"with open(\"/tmp/chain.json\", \"w\") as fp:\n",
|
||||
" json.dump(string_representation, fp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e621a32-ff5f-4627-ad59-88cacba73c6b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the API key is withheld from the serialized representations. Parameters that are considered secret are specified by the `.lc_secrets` attribute of the LangChain object:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8225e150-000a-4fbc-9f3d-09568f4b560b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'openai_api_key': 'OPENAI_API_KEY'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.last.lc_secrets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d090177-eb1c-4bfb-8c13-29286afe17d9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading objects\n",
|
||||
"\n",
|
||||
"Specifying `secrets_map` in `load` and `loads` will load the corresponding secrets onto the de-serialized LangChain object.\n",
|
||||
"\n",
|
||||
"### From string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "54a66267-5f3a-40a2-bfcc-8b44bb24c154",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = loads(string_representation, secrets_map={\"OPENAI_API_KEY\": \"llm-api-key\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ed9aff1-92cc-44ba-b2ec-4d12f924fa03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### From dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "76979932-13de-4427-9f88-040fb05a6778",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load(dict_representation, secrets_map={\"OPENAI_API_KEY\": \"llm-api-key\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7dd81a2a-5163-414d-ab42-f1c35e30471b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### From disk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "033f62a7-3377-472a-be58-718baa6ab445",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(\"/tmp/chain.json\", \"r\") as fp:\n",
|
||||
" chain = loads(json.load(fp), secrets_map={\"OPENAI_API_KEY\": \"llm-api-key\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dc520fdb-035a-468f-a8a8-c3ffe8ed98eb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we recover the API key specified at the start of the guide:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "566b2475-d9b4-432b-8c3b-27c2f183624e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'llm-api-key'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.last.openai_api_key.get_secret_value()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7b4cba53-e1d5-4979-927e-b5794a02afc3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -351,7 +351,7 @@
|
||||
"id": "ab1b2e7c-6ea8-4674-98eb-a43c69f5c19d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To help enforce proper use of our Python tool, we'll using [tool calling](/docs/how_to/tool_calling/):"
|
||||
"To help enforce proper use of our Python tool, we'll using [tool calling](/docs/how_to/tool_calling):"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -243,7 +243,7 @@
|
||||
"text": [
|
||||
"================================\u001b[1m System Message \u001b[0m================================\n",
|
||||
"\n",
|
||||
"You are a \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m expert. Given an input question, creat a syntactically correct \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m query to run.\n",
|
||||
"You are a \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m expert. Given an input question, create a syntactically correct \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m query to run.\n",
|
||||
"Unless the user specifies in the question a specific number of examples to obtain, query for at most \u001b[33;1m\u001b[1;3m{top_k}\u001b[0m results using the LIMIT clause as per \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m. You can order the results to return the most informative data in the database.\n",
|
||||
"Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (\") to denote them as delimited identifiers.\n",
|
||||
"Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n",
|
||||
@@ -275,7 +275,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"\"\"You are a {dialect} expert. Given an input question, creat a syntactically correct {dialect} query to run.\n",
|
||||
"system = \"\"\"You are a {dialect} expert. Given an input question, create a syntactically correct {dialect} query to run.\n",
|
||||
"Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per {dialect}. You can order the results to return the most informative data in the database.\n",
|
||||
"Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (\") to denote them as delimited identifiers.\n",
|
||||
"Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n",
|
||||
|
||||
@@ -250,7 +250,7 @@
|
||||
"id": "e28c14d3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, you can use tool calling directly to allow the model to choose between options, if your [chosen model supports it](/docs/integrations/chat/). This involves a bit more parsing and setup but in some instances leads to better performance because you don't have to use nested schemas. See [this how-to guide](/docs/how_to/tool_calling/) for more details."
|
||||
"Alternatively, you can use tool calling directly to allow the model to choose between options, if your [chosen model supports it](/docs/integrations/chat/). This involves a bit more parsing and setup but in some instances leads to better performance because you don't have to use nested schemas. See [this how-to guide](/docs/how_to/tool_calling) for more details."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,5 +1,18 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [tool calling, tool call]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -11,6 +24,7 @@
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [Output parsers](/docs/concepts/#output-parsers)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -38,6 +52,12 @@
|
||||
"parameters matching the desired schema, then treat the generated output as your final \n",
|
||||
"result.\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"If you only need formatted values, try the [.with_structured_output()](/docs/how_to/structured_output/#the-with_structured_output-method) chat model method as a simpler entrypoint.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"However, tool calling goes beyond [structured output](/docs/how_to/structured_output/)\n",
|
||||
"since you can pass responses from called tools back to the model to create longer interactions.\n",
|
||||
"For instance, given a search engine tool, an LLM might handle a \n",
|
||||
@@ -52,8 +72,13 @@
|
||||
"support variants of a tool calling feature.\n",
|
||||
"\n",
|
||||
"LangChain implements standard interfaces for defining tools, passing them to LLMs, \n",
|
||||
"and representing tool calls. This guide will show you how to use them.\n",
|
||||
"\n",
|
||||
"and representing tool calls. This guide and the other How-to pages in the Tool section will show you how to use tools with LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Passing tools to chat models\n",
|
||||
"\n",
|
||||
"Chat models that support tool calling features implement a `.bind_tools` method, which \n",
|
||||
@@ -153,7 +178,7 @@
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"%pip install -qU langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -167,81 +192,33 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also use the `tool_choice` parameter to ensure certain behavior. For example, we can force our tool to call the multiply tool by using the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_9cViskmLvPnHjXk9tbVla5HA', 'function': {'arguments': '{\"a\":2,\"b\":4}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 103, 'total_tokens': 112}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-095b827e-2bdd-43bb-8897-c843f4504883-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 2, 'b': 4}, 'id': 'call_9cViskmLvPnHjXk9tbVla5HA'}], usage_metadata={'input_tokens': 103, 'output_tokens': 9, 'total_tokens': 112})"
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_g4RuAijtDcSeM96jXyCuiLSN', 'function': {'arguments': '{\"a\":3,\"b\":12}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 95, 'total_tokens': 113}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-5157d15a-7e0e-4ab1-af48-3d98010cd152-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_g4RuAijtDcSeM96jXyCuiLSN'}], usage_metadata={'input_tokens': 95, 'output_tokens': 18, 'total_tokens': 113})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_multiply = llm.bind_tools(tools, tool_choice=\"Multiply\")\n",
|
||||
"llm_forced_to_multiply.invoke(\"what is 2 + 4\")"
|
||||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||||
"\n",
|
||||
"query = \"What is 3 * 12?\"\n",
|
||||
"\n",
|
||||
"llm_with_tools.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Even if we pass it something that doesn't require multiplcation - it will still call the tool!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also just force our tool to select at least one of our tools by passing in the \"any\" (or \"required\" which is OpenAI specific) keyword to the `tool_choice` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W', 'function': {'arguments': '{\"a\":1,\"b\":2}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 94, 'total_tokens': 109}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-28f75260-9900-4bed-8cd3-f1579abb65e5-0', tool_calls=[{'name': 'Add', 'args': {'a': 1, 'b': 2}, 'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W'}], usage_metadata={'input_tokens': 94, 'output_tokens': 15, 'total_tokens': 109})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_use_tool = llm.bind_tools(tools, tool_choice=\"any\")\n",
|
||||
"llm_forced_to_use_tool.invoke(\"What day is today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, even though the prompt didn't really suggest a tool call, our LLM made one since it was forced to do so. You can look at the docs for [`bind_tool`](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.BaseChatOpenAI.html#langchain_openai.chat_models.base.BaseChatOpenAI.bind_tools) to learn about all the ways to customize how your LLM selects tools."
|
||||
"As we can see, even though the prompt didn't really suggest a tool call, our LLM made one since it was forced to do so. You can look at the docs for [bind_tools()](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.BaseChatOpenAI.html#langchain_openai.chat_models.base.BaseChatOpenAI.bind_tools) to learn about all the ways to customize how your LLM selects tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -273,10 +250,10 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 3, 'b': 12},\n",
|
||||
" 'id': 'call_KquHA7mSbgtAkpkmRPaFnJKa'},\n",
|
||||
" 'id': 'call_TnadLbWJu9HwDULRb51RNSMw'},\n",
|
||||
" {'name': 'Add',\n",
|
||||
" 'args': {'a': 11, 'b': 49},\n",
|
||||
" 'id': 'call_Fl0hQi4IBTzlpaJYlM5kPQhE'}]"
|
||||
" 'id': 'call_Q9vt1up05sOQScXvUYWzSpCg'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -302,7 +279,8 @@
|
||||
"a name, string arguments, identifier, and error message.\n",
|
||||
"\n",
|
||||
"If desired, [output parsers](/docs/how_to#output-parsers) can further \n",
|
||||
"process the output. For example, we can convert back to the original Pydantic class:"
|
||||
"process the output. For example, we can convert existing values populated on the `.tool_calls` attribute back to the original Pydantic class using the\n",
|
||||
"[PydanticToolsParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.openai_tools.PydanticToolsParser.html):"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -322,443 +300,27 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain_core.output_parsers import PydanticToolsParser\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | PydanticToolsParser(tools=[Multiply, Add])\n",
|
||||
"chain.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"When tools are called in a streaming context, \n",
|
||||
"[message chunks](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"will be populated with [tool call chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk) \n",
|
||||
"objects in a list via the `.tool_call_chunks` attribute. A `ToolCallChunk` includes \n",
|
||||
"optional string fields for the tool `name`, `args`, and `id`, and includes an optional \n",
|
||||
"integer field `index` that can be used to join chunks together. Fields are optional \n",
|
||||
"because portions of a tool call may be streamed across different chunks (e.g., a chunk \n",
|
||||
"that includes a substring of the arguments may have null values for the tool name and id).\n",
|
||||
"\n",
|
||||
"Because message chunks inherit from their parent message class, an \n",
|
||||
"[AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"with tool call chunks will also include `.tool_calls` and `.invalid_tool_calls` fields. \n",
|
||||
"These fields are parsed best-effort from the message's tool call chunks.\n",
|
||||
"\n",
|
||||
"Note that not all providers currently support streaming for tool calls:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_3aQwTP9CYlFxwOvQZPHDu6wL', 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': ': 3, ', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '\"b\": 1', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '2}', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': 'Add', 'args': '', 'id': 'call_SQUoSsJz2p9Kx2x73GOgN1ja', 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ': 11,', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ' \"b\": ', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '49}', 'id': None, 'index': 1}]\n",
|
||||
"[]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" print(chunk.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that adding message chunks will merge their corresponding tool call chunks. This is the principle by which LangChain's various [tool output parsers](/docs/how_to/output_parser_structured) support streaming.\n",
|
||||
"\n",
|
||||
"For example, below we accumulate tool call chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\"', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, ', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 1', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\"', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11,', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": ', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'str'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_call_chunks[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And below we accumulate tool calls to demonstrate partial parsing:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': {}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 1}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_calls)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'dict'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_calls[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Passing tool outputs to the model\n",
|
||||
"\n",
|
||||
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='What is 3 * 12? Also, what is 11 + 49?'),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_svc2GLSxNFALbaCAbSjMI9J8', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'Multiply'}, 'type': 'function'}, {'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 50, 'prompt_tokens': 105, 'total_tokens': 155}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a79ad1dd-95f1-4a46-b688-4c83f327a7b3-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_svc2GLSxNFALbaCAbSjMI9J8'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh'}]),\n",
|
||||
" ToolMessage(content='36', tool_call_id='call_svc2GLSxNFALbaCAbSjMI9J8'),\n",
|
||||
" ToolMessage(content='60', tool_call_id='call_r8jxte3zW6h3MEGV3zH2qzFh')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, ToolMessage\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)\n",
|
||||
"messages.append(ai_msg)\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
|
||||
"messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 171, 'total_tokens': 189}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'stop', 'logprobs': None}, id='run-20b52149-e00d-48ea-97cf-f8de7a255f8c-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we pass back the same `id` in the `ToolMessage` as the what we receive from the model in order to help the model match tool responses with tool calls.\n",
|
||||
"\n",
|
||||
"## Few-shot prompting\n",
|
||||
"\n",
|
||||
"For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessage`s with `ToolCall`s and corresponding `ToolMessage`s to our prompt.\n",
|
||||
"\n",
|
||||
"For example, even with some special instructions our model can get tripped up by order of operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_T88XN6ECucTgbXXkyDeC2CQj'},\n",
|
||||
" {'name': 'Add',\n",
|
||||
" 'args': {'a': 952, 'b': -20},\n",
|
||||
" 'id': 'call_licdlmGsRqzup8rhqJSb1yZ4'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"Whats 119 times 8 minus 20. Don't do any math yourself, only use tools for math. Respect order of operations\"\n",
|
||||
").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The model shouldn't be trying to add anything yet, since it technically can't know the results of 119 * 8 yet.\n",
|
||||
"\n",
|
||||
"By adding a prompt with some examples we can correct this behavior:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_9MvuwQqg7dlJupJcoTWiEsDo'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" HumanMessage(\n",
|
||||
" \"What's the product of 317253 and 128472 plus four\", name=\"example_user\"\n",
|
||||
" ),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[\n",
|
||||
" {\"name\": \"Multiply\", \"args\": {\"x\": 317253, \"y\": 128472}, \"id\": \"1\"}\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054784\", tool_call_id=\"1\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[{\"name\": \"Add\", \"args\": {\"x\": 16505054784, \"y\": 4}, \"id\": \"2\"}],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054788\", tool_call_id=\"2\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"The product of 317253 and 128472 plus four is 16505054788\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"system = \"\"\"You are bad at math but are an expert at using a calculator. \n",
|
||||
"\n",
|
||||
"Use past tool usage as an example of how to correctly use the tools.\"\"\"\n",
|
||||
"few_shot_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", system),\n",
|
||||
" *examples,\n",
|
||||
" (\"human\", \"{query}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = {\"query\": RunnablePassthrough()} | few_shot_prompt | llm_with_tools\n",
|
||||
"chain.invoke(\"Whats 119 times 8 minus 20\").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we get the correct output this time.\n",
|
||||
"\n",
|
||||
"Here's what the [LangSmith trace](https://smith.langchain.com/public/f70550a1-585f-4c9d-a643-13148ab1616f/r) looks like."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Binding model-specific formats (advanced)\n",
|
||||
"\n",
|
||||
"Providers adopt different conventions for formatting tool schemas. \n",
|
||||
"For instance, OpenAI uses a format like this:\n",
|
||||
"\n",
|
||||
"- `type`: The type of the tool. At the time of writing, this is always `\"function\"`.\n",
|
||||
"- `function`: An object containing tool parameters.\n",
|
||||
"- `function.name`: The name of the schema to output.\n",
|
||||
"- `function.description`: A high level description of the schema to output.\n",
|
||||
"- `function.parameters`: The nested details of the schema you want to extract, formatted as a [JSON schema](https://json-schema.org/) dict.\n",
|
||||
"\n",
|
||||
"We can bind this model-specific format directly to the model as well if preferred. Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe', 'function': {'arguments': '{\"a\":119,\"b\":8}', 'name': 'multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 62, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-353e8a9a-7125-4f94-8c68-4f3da4c21120-0', tool_calls=[{'name': 'multiply', 'args': {'a': 119, 'b': 8}, 'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe'}])"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"model_with_tools = model.bind(\n",
|
||||
" tools=[\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"multiply\",\n",
|
||||
" \"description\": \"Multiply two integers together.\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"a\": {\"type\": \"number\", \"description\": \"First integer\"},\n",
|
||||
" \"b\": {\"type\": \"number\", \"description\": \"Second integer\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"a\", \"b\"],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model_with_tools.invoke(\"Whats 119 times 8?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is functionally equivalent to the `bind_tools()` calls above."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Now you've learned how to bind tool schemas to a chat model and to call those tools. Next, check out some more specific uses of tool calling:\n",
|
||||
"Now you've learned how to bind tool schemas to a chat model and to call those tools. Next, you can learn more about how to use tools:\n",
|
||||
"\n",
|
||||
"- Few shot promting [with tools](/docs/how_to/tools_few_shot/)\n",
|
||||
"- Stream [tool calls](/docs/how_to/tool_streaming/)\n",
|
||||
"- Bind [model-specific tools](/docs/how_to/tools_model_specific/)\n",
|
||||
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
|
||||
"- Pass [tool results back to model](/docs/how_to/tool_results_pass_to_model)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
|
||||
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
|
||||
@@ -781,7 +343,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
108
docs/docs/how_to/tool_calling_parallel.ipynb
Normal file
108
docs/docs/how_to/tool_calling_parallel.ipynb
Normal file
@@ -0,0 +1,108 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Disabling parallel tool calling (OpenAI only)\n",
|
||||
"\n",
|
||||
"OpenAI tool calling performs tool calling in parallel by default. That means that if we ask a question like \"What is the weather in Tokyo, New York, and Chicago?\" and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the ``parallel_tool_call`` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First let's set up our tools and model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's show a quick example of how disabling parallel tool calls work:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'add',\n",
|
||||
" 'args': {'a': 2, 'b': 2},\n",
|
||||
" 'id': 'call_Hh4JOTCDM85Sm9Pr84VKrWu5'}]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)\n",
|
||||
"llm_with_tools.invoke(\"Please call the first tool two times\").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, even though we explicitly told the model to call a tool twice, by disabling parallel tool calls the model was constrained to only calling one."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
126
docs/docs/how_to/tool_choice.ipynb
Normal file
126
docs/docs/how_to/tool_choice.ipynb
Normal file
@@ -0,0 +1,126 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to force tool calling behavior\n",
|
||||
"\n",
|
||||
"In order to force our LLM to spelect a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For example, we can force our tool to call the multiply tool by using the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_9cViskmLvPnHjXk9tbVla5HA', 'function': {'arguments': '{\"a\":2,\"b\":4}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 103, 'total_tokens': 112}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-095b827e-2bdd-43bb-8897-c843f4504883-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 2, 'b': 4}, 'id': 'call_9cViskmLvPnHjXk9tbVla5HA'}], usage_metadata={'input_tokens': 103, 'output_tokens': 9, 'total_tokens': 112})"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_multiply = llm.bind_tools(tools, tool_choice=\"Multiply\")\n",
|
||||
"llm_forced_to_multiply.invoke(\"what is 2 + 4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Even if we pass it something that doesn't require multiplcation - it will still call the tool!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also just force our tool to select at least one of our tools by passing in the \"any\" (or \"required\" which is OpenAI specific) keyword to the `tool_choice` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W', 'function': {'arguments': '{\"a\":1,\"b\":2}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 94, 'total_tokens': 109}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-28f75260-9900-4bed-8cd3-f1579abb65e5-0', tool_calls=[{'name': 'Add', 'args': {'a': 1, 'b': 2}, 'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W'}], usage_metadata={'input_tokens': 94, 'output_tokens': 15, 'total_tokens': 109})"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_use_tool = llm.bind_tools(tools, tool_choice=\"any\")\n",
|
||||
"llm_forced_to_use_tool.invoke(\"What day is today?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
127
docs/docs/how_to/tool_results_pass_to_model.ipynb
Normal file
127
docs/docs/how_to/tool_results_pass_to_model.ipynb
Normal file
@@ -0,0 +1,127 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass tool outputs to the model\n",
|
||||
"\n",
|
||||
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s. First, let's define our tools and our model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can use ``ToolMessage`` to pass back the output of the tool calls to the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='What is 3 * 12? Also, what is 11 + 49?'),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_svc2GLSxNFALbaCAbSjMI9J8', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'Multiply'}, 'type': 'function'}, {'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 50, 'prompt_tokens': 105, 'total_tokens': 155}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a79ad1dd-95f1-4a46-b688-4c83f327a7b3-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_svc2GLSxNFALbaCAbSjMI9J8'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh'}]),\n",
|
||||
" ToolMessage(content='36', tool_call_id='call_svc2GLSxNFALbaCAbSjMI9J8'),\n",
|
||||
" ToolMessage(content='60', tool_call_id='call_r8jxte3zW6h3MEGV3zH2qzFh')]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, ToolMessage\n",
|
||||
"\n",
|
||||
"query = \"What is 3 * 12? Also, what is 11 + 49?\"\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)\n",
|
||||
"messages.append(ai_msg)\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
|
||||
"messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 171, 'total_tokens': 189}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'stop', 'logprobs': None}, id='run-20b52149-e00d-48ea-97cf-f8de7a255f8c-0')"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we pass back the same `id` in the `ToolMessage` as the what we receive from the model in order to help the model match tool responses with tool calls."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to create tools](/docs/how_to/custom_tools)\n",
|
||||
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/)\n",
|
||||
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Supported models\n",
|
||||
@@ -227,7 +227,7 @@
|
||||
"\n",
|
||||
"Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.\n",
|
||||
"\n",
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/).\n",
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling).\n",
|
||||
":::"
|
||||
]
|
||||
}
|
||||
|
||||
235
docs/docs/how_to/tool_streaming.ipynb
Normal file
235
docs/docs/how_to/tool_streaming.ipynb
Normal file
@@ -0,0 +1,235 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to stream tool calls\n",
|
||||
"\n",
|
||||
"When tools are called in a streaming context, \n",
|
||||
"[message chunks](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"will be populated with [tool call chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk) \n",
|
||||
"objects in a list via the `.tool_call_chunks` attribute. A `ToolCallChunk` includes \n",
|
||||
"optional string fields for the tool `name`, `args`, and `id`, and includes an optional \n",
|
||||
"integer field `index` that can be used to join chunks together. Fields are optional \n",
|
||||
"because portions of a tool call may be streamed across different chunks (e.g., a chunk \n",
|
||||
"that includes a substring of the arguments may have null values for the tool name and id).\n",
|
||||
"\n",
|
||||
"Because message chunks inherit from their parent message class, an \n",
|
||||
"[AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"with tool call chunks will also include `.tool_calls` and `.invalid_tool_calls` fields. \n",
|
||||
"These fields are parsed best-effort from the message's tool call chunks.\n",
|
||||
"\n",
|
||||
"Note that not all providers currently support streaming for tool calls. Before we start let's define our tools and our model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's define our query and stream our output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_3aQwTP9CYlFxwOvQZPHDu6wL', 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': ': 3, ', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '\"b\": 1', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '2}', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': 'Add', 'args': '', 'id': 'call_SQUoSsJz2p9Kx2x73GOgN1ja', 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ': 11,', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ' \"b\": ', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '49}', 'id': None, 'index': 1}]\n",
|
||||
"[]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is 3 * 12? Also, what is 11 + 49?\"\n",
|
||||
"\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" print(chunk.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that adding message chunks will merge their corresponding tool call chunks. This is the principle by which LangChain's various [tool output parsers](/docs/how_to/output_parser_structured) support streaming.\n",
|
||||
"\n",
|
||||
"For example, below we accumulate tool call chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\"', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, ', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 1', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\"', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11,', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": ', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'str'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_call_chunks[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And below we accumulate tool calls to demonstrate partial parsing:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': {}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 1}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_calls)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'dict'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_calls[0][\"args\"]))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
175
docs/docs/how_to/tools_few_shot.ipynb
Normal file
175
docs/docs/how_to/tools_few_shot.ipynb
Normal file
@@ -0,0 +1,175 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use few-shot prompting with tool calling\n",
|
||||
"\n",
|
||||
"For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessage`s with `ToolCall`s and corresponding `ToolMessage`s to our prompt.\n",
|
||||
"\n",
|
||||
"First let's define our tools and model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's run our model where we can notice that even with some special instructions our model can get tripped up by order of operations. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_T88XN6ECucTgbXXkyDeC2CQj'},\n",
|
||||
" {'name': 'Add',\n",
|
||||
" 'args': {'a': 952, 'b': -20},\n",
|
||||
" 'id': 'call_licdlmGsRqzup8rhqJSb1yZ4'}]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"Whats 119 times 8 minus 20. Don't do any math yourself, only use tools for math. Respect order of operations\"\n",
|
||||
").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The model shouldn't be trying to add anything yet, since it technically can't know the results of 119 * 8 yet.\n",
|
||||
"\n",
|
||||
"By adding a prompt with some examples we can correct this behavior:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_9MvuwQqg7dlJupJcoTWiEsDo'}]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, ToolMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" HumanMessage(\n",
|
||||
" \"What's the product of 317253 and 128472 plus four\", name=\"example_user\"\n",
|
||||
" ),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[\n",
|
||||
" {\"name\": \"Multiply\", \"args\": {\"x\": 317253, \"y\": 128472}, \"id\": \"1\"}\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054784\", tool_call_id=\"1\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[{\"name\": \"Add\", \"args\": {\"x\": 16505054784, \"y\": 4}, \"id\": \"2\"}],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054788\", tool_call_id=\"2\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"The product of 317253 and 128472 plus four is 16505054788\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"system = \"\"\"You are bad at math but are an expert at using a calculator. \n",
|
||||
"\n",
|
||||
"Use past tool usage as an example of how to correctly use the tools.\"\"\"\n",
|
||||
"few_shot_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", system),\n",
|
||||
" *examples,\n",
|
||||
" (\"human\", \"{query}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = {\"query\": RunnablePassthrough()} | few_shot_prompt | llm_with_tools\n",
|
||||
"chain.invoke(\"Whats 119 times 8 minus 20\").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we get the correct output this time.\n",
|
||||
"\n",
|
||||
"Here's what the [LangSmith trace](https://smith.langchain.com/public/f70550a1-585f-4c9d-a643-13148ab1616f/r) looks like."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
79
docs/docs/how_to/tools_model_specific.ipynb
Normal file
79
docs/docs/how_to/tools_model_specific.ipynb
Normal file
@@ -0,0 +1,79 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to bind model-specific tools\n",
|
||||
"\n",
|
||||
"Providers adopt different conventions for formatting tool schemas. \n",
|
||||
"For instance, OpenAI uses a format like this:\n",
|
||||
"\n",
|
||||
"- `type`: The type of the tool. At the time of writing, this is always `\"function\"`.\n",
|
||||
"- `function`: An object containing tool parameters.\n",
|
||||
"- `function.name`: The name of the schema to output.\n",
|
||||
"- `function.description`: A high level description of the schema to output.\n",
|
||||
"- `function.parameters`: The nested details of the schema you want to extract, formatted as a [JSON schema](https://json-schema.org/) dict.\n",
|
||||
"\n",
|
||||
"We can bind this model-specific format directly to the model as well if preferred. Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe', 'function': {'arguments': '{\"a\":119,\"b\":8}', 'name': 'multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 62, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-353e8a9a-7125-4f94-8c68-4f3da4c21120-0', tool_calls=[{'name': 'multiply', 'args': {'a': 119, 'b': 8}, 'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe'}])"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"model_with_tools = model.bind(\n",
|
||||
" tools=[\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"multiply\",\n",
|
||||
" \"description\": \"Multiply two integers together.\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"a\": {\"type\": \"number\", \"description\": \"First integer\"},\n",
|
||||
" \"b\": {\"type\": \"number\", \"description\": \"Second integer\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"a\", \"b\"],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model_with_tools.invoke(\"Whats 119 times 8?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is functionally equivalent to the `bind_tools()` method."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -19,7 +19,7 @@
|
||||
"\n",
|
||||
":::{.callout-caution}\n",
|
||||
"\n",
|
||||
"Some models have been fine-tuned for tool calling and provide a dedicated API for tool calling. Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. Please see the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide for more information.\n",
|
||||
"Some models have been fine-tuned for tool calling and provide a dedicated API for tool calling. Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. Please see the [how to use a chat model to call tools](/docs/how_to/tool_calling) guide for more information.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -34,7 +34,7 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In this guide, we'll see how to add **ad-hoc** tool calling support to a chat model. This is an alternative method to invoke tools if you're using a model that does not natively support [tool calling](/docs/how_to/tool_calling/).\n",
|
||||
"In this guide, we'll see how to add **ad-hoc** tool calling support to a chat model. This is an alternative method to invoke tools if you're using a model that does not natively support [tool calling](/docs/how_to/tool_calling).\n",
|
||||
"\n",
|
||||
"We'll do this by simply writing a prompt that will get the model to invoke the appropriate tools. Here's a diagram of the logic:\n",
|
||||
"\n",
|
||||
@@ -87,7 +87,7 @@
|
||||
"id": "7ec6409b-21e5-4d0a-8a46-c4ef0b055dd3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can select any of the given models for this how-to guide. Keep in mind that most of these models already [support native tool calling](/docs/integrations/chat/), so using the prompting strategy shown here doesn't make sense for these models, and instead you should follow the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide.\n",
|
||||
"You can select any of the given models for this how-to guide. Keep in mind that most of these models already [support native tool calling](/docs/integrations/chat/), so using the prompting strategy shown here doesn't make sense for these models, and instead you should follow the [how to use a chat model to call tools](/docs/how_to/tool_calling) guide.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
|
||||
@@ -7,6 +7,19 @@
|
||||
"source": [
|
||||
"# How to trim messages\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Messages](/docs/concepts/#messages)\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [Chaining](/docs/how_to/sequence/)\n",
|
||||
"- [Chat history](/docs/concepts/#chat-history)\n",
|
||||
"\n",
|
||||
"The methods in this guide also require `langchain-core>=0.2.9`.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"All models have finite context windows, meaning there's a limit to how many tokens they can take as input. If you have very long messages or a chain/agent that accumulates a long message is history, you'll need to manage the length of the messages you're passing in to the model.\n",
|
||||
"\n",
|
||||
"The `trim_messages` util provides some basic strategies for trimming a list of messages to be of a certain token length.\n",
|
||||
@@ -310,7 +323,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='A \"polygon\"! Because it\\'s a \"poly-gone\" silent!', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 32, 'total_tokens': 46}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_319be4768e', 'finish_reason': 'stop', 'logprobs': None}, id='run-64cc4575-14d1-4f3f-b4af-97f24758f703-0', usage_metadata={'input_tokens': 32, 'output_tokens': 14, 'total_tokens': 46})"
|
||||
"AIMessage(content='A: A \"Polly-gone\"!', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 32, 'total_tokens': 41}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_66b29dffce', 'finish_reason': 'stop', 'logprobs': None}, id='run-83e96ddf-bcaa-4f63-824c-98b0f8a0d474-0', usage_metadata={'input_tokens': 32, 'output_tokens': 9, 'total_tokens': 41})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
@@ -378,24 +391,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 9,
|
||||
"id": "a9517858-fc2f-4dc3-898d-bf98a0e905a0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run c87e2f1b-81ad-4fa7-bfd9-ce6edb29a482 not found for run 7892ee8f-0669-4d6b-a2ca-ef8aae81042a. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"A polygon! Because it's a parrot gone quiet!\", response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 32, 'total_tokens': 43}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_319be4768e', 'finish_reason': 'stop', 'logprobs': None}, id='run-72dad96e-8b58-45f4-8c08-21f9f1a6b68f-0', usage_metadata={'input_tokens': 32, 'output_tokens': 11, 'total_tokens': 43})"
|
||||
"AIMessage(content='A \"polly-no-wanna-cracker\"!', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 32, 'total_tokens': 42}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_5bf7397cd3', 'finish_reason': 'stop', 'logprobs': None}, id='run-054dd309-3497-4e7b-b22a-c1859f11d32e-0', usage_metadata={'input_tokens': 32, 'output_tokens': 10, 'total_tokens': 42})"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -409,7 +415,7 @@
|
||||
"\n",
|
||||
"def dummy_get_session_history(session_id):\n",
|
||||
" if session_id != \"1\":\n",
|
||||
" raise InMemoryChatMessageHistory()\n",
|
||||
" return InMemoryChatMessageHistory()\n",
|
||||
" return chat_history\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -451,9 +457,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -465,7 +471,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -18,7 +18,9 @@
|
||||
"# ChatAI21\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with AI21 chat models.\n",
|
||||
"\n",
|
||||
"Note that different chat models support different parameters. See the ",
|
||||
"[AI21 documentation](https://docs.ai21.com/reference) to learn more about the parameters in your chosen model.\n",
|
||||
"[See all AI21's LangChain components.](https://pypi.org/project/langchain-ai21/) \n",
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
@@ -44,7 +46,8 @@
|
||||
"source": [
|
||||
"## Environment Setup\n",
|
||||
"\n",
|
||||
"We'll need to get a [AI21 API key](https://docs.ai21.com/) and set the `AI21_API_KEY` environment variable:\n"
|
||||
"We'll need to get an [AI21 API key](https://docs.ai21.com/) and set the ",
|
||||
"`AI21_API_KEY` environment variable:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"| [ChatAnthropic](https://api.python.langchain.com/en/latest/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [langchain-anthropic](https://api.python.langchain.com/en/latest/anthropic_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
@@ -51,7 +51,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -59,7 +59,7 @@
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"anthropic_API_KEY\"] = getpass.getpass(\"Enter your Anthropic API key: \")"
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass(\"Enter your Anthropic API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -72,7 +72,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -113,7 +113,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -121,7 +121,7 @@
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(\n",
|
||||
" model=\"claude-3-sonnet-20240229\",\n",
|
||||
" model=\"claude-3-5-sonnet-20240620\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=1024,\n",
|
||||
" timeout=None,\n",
|
||||
@@ -140,7 +140,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -149,10 +149,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", response_metadata={'id': 'msg_013qztabaFADNnKsHR1rdrju', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 21}}, id='run-a22ab30c-7e09-48f5-bc27-a08a9d8f7fa1-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'id': 'msg_018Nnu76krRPq8HvgKLW4F8T', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 11}}, id='run-57e9295f-db8a-48dc-9619-babd2bedd891-0', usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -171,7 +171,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -179,9 +179,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Voici la traduction en français :\n",
|
||||
"\n",
|
||||
"J'aime la programmation.\n"
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -201,17 +199,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', response_metadata={'id': 'msg_01FWrA8w9HbjqYPTQ7VryUnp', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 23, 'output_tokens': 11}}, id='run-b749bf20-b46d-4d62-ac73-f59adab6dd7e-0', usage_metadata={'input_tokens': 23, 'output_tokens': 11, 'total_tokens': 34})"
|
||||
"AIMessage(content=\"Here's the German translation:\\n\\nIch liebe Programmieren.\", response_metadata={'id': 'msg_01GhkRtQZUkA5Ge9hqmD8HGY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 23, 'output_tokens': 18}}, id='run-da5906b4-b200-4e08-b81a-64d4453643b6-0', usage_metadata={'input_tokens': 23, 'output_tokens': 18, 'total_tokens': 41})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -251,22 +249,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 8,
|
||||
"id": "4a374a24-2534-4e6f-825b-30fab7bbe0cb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'text': \"Okay, let's use the GetWeather tool to check the current temperatures in Los Angeles and New York City.\",\n",
|
||||
"[{'text': \"To answer this question, we'll need to check the current weather in both Los Angeles (LA) and New York (NY). I'll use the GetWeather function to retrieve this information for both cities.\",\n",
|
||||
" 'type': 'text'},\n",
|
||||
" {'id': 'toolu_01Tnp5tL7LJZaVyQXKEjbqcC',\n",
|
||||
" {'id': 'toolu_01Ddzj5PkuZkrjF4tafzu54A',\n",
|
||||
" 'input': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'name': 'GetWeather',\n",
|
||||
" 'type': 'tool_use'},\n",
|
||||
" {'id': 'toolu_012kz4qHZQqD4qg8sFPeKqpP',\n",
|
||||
" 'input': {'location': 'New York, NY'},\n",
|
||||
" 'name': 'GetWeather',\n",
|
||||
" 'type': 'tool_use'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -288,7 +290,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 9,
|
||||
"id": "6b4a1ead-952c-489f-a8d4-355d3fb55f3f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -297,10 +299,13 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'GetWeather',\n",
|
||||
" 'args': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'id': 'toolu_01Tnp5tL7LJZaVyQXKEjbqcC'}]"
|
||||
" 'id': 'toolu_01Ddzj5PkuZkrjF4tafzu54A'},\n",
|
||||
" {'name': 'GetWeather',\n",
|
||||
" 'args': {'location': 'New York, NY'},\n",
|
||||
" 'id': 'toolu_012kz4qHZQqD4qg8sFPeKqpP'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -336,7 +341,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "641f8cb0",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
@@ -12,20 +12,89 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "38f26d7a",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AzureChatOpenAI\n",
|
||||
"\n",
|
||||
">[Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview) provides REST API access to OpenAI's powerful language models including the GPT-4, GPT-3.5-Turbo, and Embeddings model series. These models can be easily adapted to your specific task including but not limited to content generation, summarization, semantic search, and natural language to code translation. Users can access the service through REST APIs, Python SDK, or a web-based interface in the Azure OpenAI Studio.\n",
|
||||
"This guide will help you get started with AzureOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all AzureChatOpenAI features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html).\n",
|
||||
"\n",
|
||||
"This notebook goes over how to connect to an Azure-hosted OpenAI endpoint. First, we need to install the `langchain-openai` package."
|
||||
"Azure OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).\n",
|
||||
"\n",
|
||||
":::info Azure OpenAI vs OpenAI\n",
|
||||
"\n",
|
||||
"Azure OpenAI refers to OpenAI models hosted on the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). OpenAI also provides its own model APIs. To access OpenAI services directly, use the [ChatOpenAI integration](/docs/integrations/chat/openai/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/azure) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [AzureChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html) | [langchain-openai](https://api.python.langchain.com/en/latest/openai_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access AzureOpenAI models you'll need to create an Azure account, create a deployment of an Azure OpenAI model, get the name and endpoint for your deployment, get an Azure OpenAI API key, and install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python) to create your deployment and generate an API key. Once you've done this set the AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT environment variables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d83ba7de",
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass(\"Enter your AzureOpenAI API key: \")\n",
|
||||
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://YOUR-ENDPOINT.openai.azure.com/\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain AzureOpenAI integration lives in the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -34,65 +103,56 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e39133c8",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, let's set some environment variables to help us connect to the Azure OpenAI service. You can find these values in the Azure portal."
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions.\n",
|
||||
"- Replace `azure_deployment` with the name of your deployment,\n",
|
||||
"- You can find the latest supported `api_version` here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1d8d73bd",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain_openai import AzureChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"AZURE_OPENAI_API_KEY\"] = \"...\"\n",
|
||||
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://<your-endpoint>.openai.azure.com/\"\n",
|
||||
"os.environ[\"AZURE_OPENAI_API_VERSION\"] = \"2023-06-01-preview\"\n",
|
||||
"os.environ[\"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"] = \"chat\""
|
||||
"llm = AzureChatOpenAI(\n",
|
||||
" azure_deployment=\"YOUR-DEPLOYMENT\",\n",
|
||||
" api_version=\"2024-05-01-preview\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e7b160f8",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, let's construct our model and chat with it:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cbe4bb58-ba13-4355-8af9-cd990dc47a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import AzureChatOpenAI\n",
|
||||
"\n",
|
||||
"model = AzureChatOpenAI(\n",
|
||||
" openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],\n",
|
||||
" azure_deployment=os.environ[\"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"],\n",
|
||||
")"
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "99509140",
|
||||
"metadata": {},
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer.\", response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 19, 'total_tokens': 25}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-25ed88db-38f2-4b0c-a943-a03f217711a9-0')"
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 31, 'total_tokens': 39}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-a6a732c2-cb02-4e50-9a9c-ab30eab034fc-0', usage_metadata={'input_tokens': 31, 'output_tokens': 8, 'total_tokens': 39})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@@ -101,95 +161,165 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French. I love programming.\"\n",
|
||||
")\n",
|
||||
"model.invoke([message])"
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f27fa24d",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Model Version\n",
|
||||
"Azure OpenAI responses contain `model` property, which is name of the model used to generate the response. However unlike native OpenAI responses, it does not contain the version of the model, which is set on the deployment in Azure. This makes it tricky to know which version of the model was used to generate the response, which as result can lead to e.g. wrong total cost calculation with `OpenAICallbackHandler`.\n",
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 26, 'total_tokens': 32}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-084967d7-06f2-441f-b5c1-477e2a9e9d03-0', usage_metadata={'input_tokens': 26, 'output_tokens': 6, 'total_tokens': 32})"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Specifying model version\n",
|
||||
"\n",
|
||||
"Azure OpenAI responses contain `model_name` response metadata property, which is name of the model used to generate the response. However unlike native OpenAI responses, it does not contain the specific version of the model, which is set on the deployment in Azure. E.g. it does not distinguish between `gpt-35-turbo-0125` and `gpt-35-turbo-0301`. This makes it tricky to know which version of the model was used to generate the response, which as result can lead to e.g. wrong total cost calculation with `OpenAICallbackHandler`.\n",
|
||||
"\n",
|
||||
"To solve this problem, you can pass `model_version` parameter to `AzureChatOpenAI` class, which will be added to the model name in the llm output. This way you can easily distinguish between different versions of the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0531798a",
|
||||
"execution_count": null,
|
||||
"id": "04b36e75-e8b7-4721-899e-76301ac2ecd9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.callbacks import get_openai_callback"
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "aceddb72",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"execution_count": 5,
|
||||
"id": "84c411b0-1790-4798-8bb7-47d8ece4c2dc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Total Cost (USD): $0.000041\n"
|
||||
"Total Cost (USD): $0.000063\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = AzureChatOpenAI(\n",
|
||||
" openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],\n",
|
||||
" azure_deployment=os.environ[\n",
|
||||
" \"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"\n",
|
||||
" ], # in Azure, this deployment has version 0613 - input and output tokens are counted separately\n",
|
||||
")\n",
|
||||
"from langchain_community.callbacks import get_openai_callback\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" model.invoke([message])\n",
|
||||
" llm.invoke(messages)\n",
|
||||
" print(\n",
|
||||
" f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\"\n",
|
||||
" ) # without specifying the model version, flat-rate 0.002 USD per 1k input and output tokens is used"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e61eefd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can provide the model version to `AzureChatOpenAI` constructor. It will get appended to the model name returned by Azure OpenAI and cost will be counted correctly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "8d5e54e9",
|
||||
"execution_count": 6,
|
||||
"id": "21234693-d92b-4d69-8a7f-55aa062084bf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Total Cost (USD): $0.000044\n"
|
||||
"Total Cost (USD): $0.000078\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model0301 = AzureChatOpenAI(\n",
|
||||
" openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],\n",
|
||||
" azure_deployment=os.environ[\"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"],\n",
|
||||
"llm_0301 = AzureChatOpenAI(\n",
|
||||
" azure_deployment=\"YOUR-DEPLOYMENT\",\n",
|
||||
" api_version=\"2024-05-01-preview\",\n",
|
||||
" model_version=\"0301\",\n",
|
||||
")\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" model0301.invoke([message])\n",
|
||||
" llm_0301.invoke(messages)\n",
|
||||
" print(f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all AzureChatOpenAI features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -208,7 +338,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
"> your data using techniques such as fine-tuning and `Retrieval Augmented Generation` (`RAG`), and build \n",
|
||||
"> agents that execute tasks using your enterprise systems and data sources. Since `Amazon Bedrock` is \n",
|
||||
"> serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy \n",
|
||||
"> generative AI capabilities into your applications using the AWS services you are already familiar with.\n"
|
||||
"> generative AI capabilities into your applications using the AWS services you are already familiar with."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -47,7 +47,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-aws"
|
||||
"%pip install --upgrade --quiet langchain-aws"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
429
docs/docs/integrations/chat/databricks.ipynb
Normal file
429
docs/docs/integrations/chat/databricks.ipynb
Normal file
@@ -0,0 +1,429 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Databricks\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatDatabricks\n",
|
||||
"\n",
|
||||
"> [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform. \n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with Databricks [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatDatabricks features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.databricks.ChatDatabricks.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"`ChatDatabricks` class wraps a chat model endpoint hosted on [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html). This example notebook shows how to wrap your serving endpoint and use it as a chat model in your LangChain application.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatDatabricks](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.databricks.ChatDatabricks.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"### Supported Methods\n",
|
||||
"\n",
|
||||
"`ChatDatabricks` supports all methods of `ChatModel` including async APIs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Endpoint Requirement\n",
|
||||
"\n",
|
||||
"The serving endpoint `ChatDatabricks` wraps must have OpenAI-compatible chat input/output format ([reference](https://mlflow.org/docs/latest/llms/deployments/index.html#chat)). As long as the input format is compatible, `ChatDatabricks` can be used for any endpoint type hosted on [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html):\n",
|
||||
"\n",
|
||||
"1. Foundation Models - Curated list of state-of-the-art foundation models such as DRBX, Llama3, Mixtral-8x7B, and etc. These endpoint are ready to use in your Databricks workspace without any set up.\n",
|
||||
"2. Custom Models - You can also deploy custom models to a serving endpoint via MLflow with\n",
|
||||
"your choice of framework such as LangChain, Pytorch, Transformers, etc.\n",
|
||||
"3. External Models - Databricks endpoints can serve models that are hosted outside Databricks as a proxy, such as proprietary model service like OpenAI GPT4.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "plaintext"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Databricks models you'll need to create a Databricks account, set up credentials (only if you are outside Databricks workspace), and install required packages.\n",
|
||||
"\n",
|
||||
"### Credentials (only if you are outside Databricks)\n",
|
||||
"\n",
|
||||
"If you are running LangChain app inside Databricks, you can skip this step.\n",
|
||||
"\n",
|
||||
"Otherwise, you need manually set the Databricks workspace hostname and personal access token to `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables, respectively. See [Authentication Documentation](https://docs.databricks.com/en/dev-tools/auth/index.html#databricks-personal-access-tokens) for how to get an access token."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your Databricks access token: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n",
|
||||
"os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\"Enter your Databricks access token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Databricks integration lives in the `langchain-community` package. Also, `mlflow >= 2.9 ` is required to run the code in this notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community mlflow>=2.9.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We first demonstrates how to query DBRX-instruct model hosted as Foundation Models endpoint with `ChatDatabricks`.\n",
|
||||
"\n",
|
||||
"For other type of endpoints, there are some difference in how to set up the endpoint itself, however, once the endpoint is ready, there is no difference in how to query it with `ChatDatabricks`. Please refer to the bottom of this notebook for the examples with other type of endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatDatabricks\n",
|
||||
"\n",
|
||||
"chat_model = ChatDatabricks(\n",
|
||||
" endpoint=\"databricks-dbrx-instruct\",\n",
|
||||
" temperature=0.1,\n",
|
||||
" max_tokens=256,\n",
|
||||
" # See https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.databricks.ChatDatabricks.html for other supported parameters\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='MLflow is an open-source platform for managing end-to-end machine learning workflows. It was introduced by Databricks in 2018. MLflow provides tools for tracking experiments, packaging and sharing code, and deploying models. It is designed to work with any machine learning library and can be used in a variety of environments, including local machines, virtual machines, and cloud-based clusters. MLflow aims to streamline the machine learning development lifecycle, making it easier for data scientists and engineers to collaborate and deploy models into production.', response_metadata={'prompt_tokens': 229, 'completion_tokens': 104, 'total_tokens': 333}, id='run-d3fb4d06-3e10-4471-83c9-c282cc62b74d-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_model.invoke(\"What is MLflow?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Databricks Model Serving is a feature of the Databricks platform that allows data scientists and engineers to easily deploy machine learning models into production. With Model Serving, you can host, manage, and serve machine learning models as APIs, making it easy to integrate them into applications and business processes. It supports a variety of popular machine learning frameworks, including TensorFlow, PyTorch, and scikit-learn, and provides tools for monitoring and managing the performance of deployed models. Model Serving is designed to be scalable, secure, and easy to use, making it a great choice for organizations that want to quickly and efficiently deploy machine learning models into production.', response_metadata={'prompt_tokens': 35, 'completion_tokens': 130, 'total_tokens': 165}, id='run-b3feea21-223e-4105-8627-41d647d5ccab-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# You can also pass a list of messages\n",
|
||||
"messages = [\n",
|
||||
" (\"system\", \"You are a chatbot that can answer questions about Databricks.\"),\n",
|
||||
" (\"user\", \"What is Databricks Model Serving?\"),\n",
|
||||
"]\n",
|
||||
"chat_model.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"Similar to other chat models, `ChatDatabricks` can be used as a part of a complex chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Unity Catalog is a new data catalog feature in Databricks that allows you to discover, manage, and govern all your data assets across your data landscape, including data lakes, data warehouses, and data marts. It provides a centralized repository for storing and managing metadata, data lineage, and access controls for all your data assets. Unity Catalog enables data teams to easily discover and access the data they need, while ensuring compliance with data privacy and security regulations. It is designed to work seamlessly with Databricks' Lakehouse platform, providing a unified experience for managing and analyzing all your data.\", response_metadata={'prompt_tokens': 32, 'completion_tokens': 118, 'total_tokens': 150}, id='run-82d72624-f8df-4c0d-a976-919feec09a55-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a chatbot that can answer questions about {topic}.\",\n",
|
||||
" ),\n",
|
||||
" (\"user\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat_model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"topic\": \"Databricks\",\n",
|
||||
" \"question\": \"What is Unity Catalog?\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation (streaming)\n",
|
||||
"\n",
|
||||
"`ChatDatabricks` supports streaming response by `stream` method since `langchain-community>=0.2.1`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I|'m| an| AI| and| don|'t| have| feelings|,| but| I|'m| here| and| ready| to| assist| you|.| How| can| I| help| you| today|?||"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in chat_model.stream(\"How are you?\"):\n",
|
||||
" print(chunk.content, end=\"|\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Async Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"country = [\"Japan\", \"Italy\", \"Australia\"]\n",
|
||||
"futures = [chat_model.ainvoke(f\"Where is the capital of {c}?\") for c in country]\n",
|
||||
"await asyncio.gather(*futures)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping Custom Model Endpoint\n",
|
||||
"\n",
|
||||
"Prerequisites:\n",
|
||||
"\n",
|
||||
"* An LLM was registered and deployed to [a Databricks serving endpoint](https://docs.databricks.com/machine-learning/model-serving/index.html) via MLflow. The endpoint must have OpenAI-compatible chat input/output format ([reference](https://mlflow.org/docs/latest/llms/deployments/index.html#chat))\n",
|
||||
"* You have [\"Can Query\" permission](https://docs.databricks.com/security/auth-authz/access-control/serving-endpoint-acl.html) to the endpoint.\n",
|
||||
"\n",
|
||||
"Once the endpoint is ready, the usage pattern is completely same as Foundation Models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_model_custom = ChatDatabricks(\n",
|
||||
" endpoint=\"YOUR_ENDPOINT_NAME\",\n",
|
||||
" temperature=0.1,\n",
|
||||
" max_tokens=256,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chat_model_custom.invoke(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping External Models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Prerequisite: Create Proxy Endpoint\n",
|
||||
"\n",
|
||||
"First, create a new Databricks serving endpoint that proxies requests to the target external model. The endpoint creation should be fairy quick for proxying external models.\n",
|
||||
"\n",
|
||||
"This requires registering OpenAI API Key in Databricks secret manager with the following comment:\n",
|
||||
"```sh\n",
|
||||
"# Replace `<scope>` with your scope\n",
|
||||
"databricks secrets create-scope <scope>\n",
|
||||
"databricks secrets put-secret <scope> openai-api-key --string-value $OPENAI_API_KEY\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"For how to set up Databricks CLI and manage secrets, please refer to https://docs.databricks.com/en/security/secrets/secrets.html"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from mlflow.deployments import get_deploy_client\n",
|
||||
"\n",
|
||||
"client = get_deploy_client(\"databricks\")\n",
|
||||
"\n",
|
||||
"secret = \"secrets/<scope>/openai-api-key\" # replace `<scope>` with your scope\n",
|
||||
"endpoint_name = \"my-chat\" # rename this if my-chat already exists\n",
|
||||
"client.create_endpoint(\n",
|
||||
" name=endpoint_name,\n",
|
||||
" config={\n",
|
||||
" \"served_entities\": [\n",
|
||||
" {\n",
|
||||
" \"name\": \"my-chat\",\n",
|
||||
" \"external_model\": {\n",
|
||||
" \"name\": \"gpt-3.5-turbo\",\n",
|
||||
" \"provider\": \"openai\",\n",
|
||||
" \"task\": \"llm/v1/chat\",\n",
|
||||
" \"openai_config\": {\n",
|
||||
" \"openai_api_key\": \"{{\" + secret + \"}}\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once the endpoint status has become \"Ready\", you can query the endpoint in the same way as other types of endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_model_external = ChatDatabricks(\n",
|
||||
" endpoint=endpoint_name,\n",
|
||||
" temperature=0.1,\n",
|
||||
" max_tokens=256,\n",
|
||||
")\n",
|
||||
"chat_model_external.invoke(\"How to use Databricks?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatDatabricks features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ChatDatabricks.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -35,7 +35,7 @@
|
||||
"| [ChatVertexAI](https://api.python.langchain.com/en/latest/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) | [langchain-google-vertexai](https://api.python.langchain.com/en/latest/google_vertexai_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
"\n",
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"Groq chat models support [tool calling](/docs/how_to/tool_calling/) to generate output matching a specific schema. The model may choose to call multiple tools or the same tool multiple times if appropriate.\n",
|
||||
"Groq chat models support [tool calling](/docs/how_to/tool_calling) to generate output matching a specific schema. The model may choose to call multiple tools or the same tool multiple times if appropriate.\n",
|
||||
"\n",
|
||||
"Here's an example:"
|
||||
]
|
||||
|
||||
585
docs/docs/integrations/chat/ibm_watsonx.ipynb
Normal file
585
docs/docs/integrations/chat/ibm_watsonx.ipynb
Normal file
@@ -0,0 +1,585 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "1c95cd76",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: IBM watsonx.ai\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "70996d8a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatWatsonx\n",
|
||||
"\n",
|
||||
">ChatWatsonx is a wrapper for IBM [watsonx.ai](https://www.ibm.com/products/watsonx-ai) foundation models.\n",
|
||||
"\n",
|
||||
"The aim of these examples is to show how to communicate with `watsonx.ai` models using `LangChain` LLMs API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ef7b088a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/openai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatWatsonx](https://api.python.langchain.com/en/latest/ibm_api_reference.html) | [langchain-ibm](https://api.python.langchain.com/en/latest/ibm_api_reference.html) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f406e092",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access IBM watsonx.ai models you'll need to create an IBM watsonx.ai account, get an API key, and install the `langchain-ibm` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"The cell below defines the credentials required to work with watsonx Foundation Model inferencing.\n",
|
||||
"\n",
|
||||
"**Action:** Provide the IBM Cloud user API key. For details, see\n",
|
||||
"[Managing user API keys](https://cloud.ibm.com/docs/account?topic=account-userapikey&interface=ui)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "11d572a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"watsonx_api_key = getpass()\n",
|
||||
"os.environ[\"WATSONX_APIKEY\"] = watsonx_api_key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c59782a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Additionally you are able to pass additional secrets as an environment variable. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f98c573c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"WATSONX_URL\"] = \"your service instance url\"\n",
|
||||
"os.environ[\"WATSONX_TOKEN\"] = \"your token for accessing the CPD cluster\"\n",
|
||||
"os.environ[\"WATSONX_PASSWORD\"] = \"your password for accessing the CPD cluster\"\n",
|
||||
"os.environ[\"WATSONX_USERNAME\"] = \"your username for accessing the CPD cluster\"\n",
|
||||
"os.environ[\"WATSONX_INSTANCE_ID\"] = \"your instance_id for accessing the CPD cluster\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b3dc9176",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain IBM integration lives in the `langchain-ibm` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "387eda86",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install -qU langchain-ibm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e36acbef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"You might need to adjust model `parameters` for different models or tasks. For details, refer to [Available MetaNames](https://ibm.github.io/watsonx-ai-python-sdk/fm_model.html#metanames.GenTextParamsMetaNames)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "407cd500",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"parameters = {\n",
|
||||
" \"decoding_method\": \"sample\",\n",
|
||||
" \"max_new_tokens\": 100,\n",
|
||||
" \"min_new_tokens\": 1,\n",
|
||||
" \"stop_sequences\": [\".\"],\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b586538",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Initialize the `WatsonxLLM` class with the previously set parameters.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"**Note**: \n",
|
||||
"\n",
|
||||
"- To provide context for the API call, you must pass the `project_id` or `space_id`. To get your project or space ID, open your project or space, go to the **Manage** tab, and click **General**. For more information see: [Project documentation](https://www.ibm.com/docs/en/watsonx-as-a-service?topic=projects) or [Deployment space documentation](https://www.ibm.com/docs/en/watsonx/saas?topic=spaces-creating-deployment).\n",
|
||||
"- Depending on the region of your provisioned service instance, use one of the urls listed in [watsonx.ai API Authentication](https://ibm.github.io/watsonx-ai-python-sdk/setup_cloud.html#authentication).\n",
|
||||
"\n",
|
||||
"In this example, we’ll use the `project_id` and Dallas URL.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"You need to specify the `model_id` that will be used for inferencing. You can find the list of all the available models in [Supported foundation models](https://ibm.github.io/watsonx-ai-python-sdk/fm_model.html#ibm_watsonx_ai.foundation_models.utils.enums.ModelTypes)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "98371396",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ibm import ChatWatsonx\n",
|
||||
"\n",
|
||||
"chat = ChatWatsonx(\n",
|
||||
" model_id=\"ibm/granite-13b-chat-v2\",\n",
|
||||
" url=\"https://us-south.ml.cloud.ibm.com\",\n",
|
||||
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
|
||||
" params=parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2202f4e0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, you can use Cloud Pak for Data credentials. For details, see [watsonx.ai software setup](https://ibm.github.io/watsonx-ai-python-sdk/setup_cpd.html). "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "243ecccb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatWatsonx(\n",
|
||||
" model_id=\"ibm/granite-13b-chat-v2\",\n",
|
||||
" url=\"PASTE YOUR URL HERE\",\n",
|
||||
" username=\"PASTE YOUR USERNAME HERE\",\n",
|
||||
" password=\"PASTE YOUR PASSWORD HERE\",\n",
|
||||
" instance_id=\"openshift\",\n",
|
||||
" version=\"4.8\",\n",
|
||||
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
|
||||
" params=parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96ed13d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Instead of `model_id`, you can also pass the `deployment_id` of the previously tuned model. The entire model tuning workflow is described in [Working with TuneExperiment and PromptTuner](https://ibm.github.io/watsonx-ai-python-sdk/pt_working_with_class_and_prompt_tuner.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "08e66c88",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatWatsonx(\n",
|
||||
" deployment_id=\"PASTE YOUR DEPLOYMENT_ID HERE\",\n",
|
||||
" url=\"https://us-south.ml.cloud.ibm.com\",\n",
|
||||
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
|
||||
" params=parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f571001d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"To obtain completions, you can call the model directly using a string prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "beea2b5b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Je t'aime pour écouter la Rock.\", response_metadata={'token_usage': {'generated_token_count': 12, 'input_token_count': 28}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-05b305ce-5401-4a10-b557-41a4b15c7f6f-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Invocation\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" (\"system\", \"You are a helpful assistant that translates English to French.\"),\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"I love you for listening to Rock.\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "8ab1a25a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Sure, I can help you with that! Horses are large, powerful mammals that belong to the family Equidae.', response_metadata={'token_usage': {'generated_token_count': 24, 'input_token_count': 24}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-391776ff-3b38-4768-91e8-ff64177149e5-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 41,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Invocation multiple chat\n",
|
||||
"from langchain_core.messages import (\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"system_message = SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant which telling short-info about provided topic.\"\n",
|
||||
")\n",
|
||||
"human_message = HumanMessage(content=\"horse\")\n",
|
||||
"\n",
|
||||
"chat.invoke([system_message, human_message])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "20e4b568",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"Create `ChatPromptTemplate` objects which will be responsible for creating a random question."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "dd919925",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"system = (\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
")\n",
|
||||
"human = \"{input}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1a013a53",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Provide a inputs and run the chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "68160377",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Python.', response_metadata={'token_usage': {'generated_token_count': 5, 'input_token_count': 23}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-1b1ccf5d-0e33-46f2-a087-e2a136ba1fb7-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love Python\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d2c9da33",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Streaming the Model output \n",
|
||||
"\n",
|
||||
"You can stream the model output."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3f63166a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The moon is a natural satellite of the Earth, and it has been a source of fascination for humans for centuries."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system_message = SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant which telling short-info about provided topic.\"\n",
|
||||
")\n",
|
||||
"human_message = HumanMessage(content=\"moon\")\n",
|
||||
"\n",
|
||||
"for chunk in chat.stream([system_message, human_message]):\n",
|
||||
" print(chunk.content, end=\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5a7a2aa1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Batch the Model output \n",
|
||||
"\n",
|
||||
"You can batch the model output."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "9e948729",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content='Cats are domestic animals that belong to the Felidae family.', response_metadata={'token_usage': {'generated_token_count': 13, 'input_token_count': 24}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-71a8bd7a-a1aa-497b-9bdd-a4d6fe1d471a-0'),\n",
|
||||
" AIMessage(content='Dogs are domesticated mammals of the family Canidae, characterized by their adaptability to various environments and social structures.', response_metadata={'token_usage': {'generated_token_count': 24, 'input_token_count': 24}, 'model_name': 'ibm/granite-13b-chat-v2', 'system_fingerprint': '', 'finish_reason': 'stop_sequence'}, id='run-22b7a0cb-e44a-4b68-9921-872f82dcd82b-0')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message_1 = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant which telling short-info about provided topic.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"cat\"),\n",
|
||||
"]\n",
|
||||
"message_2 = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant which telling short-info about provided topic.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"dog\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chat.batch([message_1, message_2])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c739e1fe",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"### ChatWatsonx.bind_tools()\n",
|
||||
"\n",
|
||||
"Please note that `ChatWatsonx.bind_tools` is on beta state, so right now we only support `mistralai/mixtral-8x7b-instruct-v01` model.\n",
|
||||
"\n",
|
||||
"You should also redefine `max_new_tokens` parameter to get the entire model response. By default `max_new_tokens` is set ot 20."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "328fce76",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ibm import ChatWatsonx\n",
|
||||
"\n",
|
||||
"parameters = {\"max_new_tokens\": 200}\n",
|
||||
"\n",
|
||||
"chat = ChatWatsonx(\n",
|
||||
" model_id=\"mistralai/mixtral-8x7b-instruct-v01\",\n",
|
||||
" url=\"https://us-south.ml.cloud.ibm.com\",\n",
|
||||
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
|
||||
" params=parameters,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "e1633a73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = chat.bind_tools([GetWeather])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3bf9b8ab",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'function_call': {'type': 'function'}, 'tool_calls': [{'type': 'function', 'function': {'name': 'GetWeather', 'arguments': '{\"location\": \"Los Angeles\"}'}, 'id': None}, {'type': 'function', 'function': {'name': 'GetWeather', 'arguments': '{\"location\": \"New York\"}'}, 'id': None}]}, response_metadata={'token_usage': {'generated_token_count': 99, 'input_token_count': 320}, 'model_name': 'mistralai/mixtral-8x7b-instruct-v01', 'system_fingerprint': '', 'finish_reason': 'eos_token'}, id='run-38627104-f2ac-4edb-8390-d5425fb65979-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'Los Angeles'}, 'id': None}, {'name': 'GetWeather', 'args': {'location': 'New York'}, 'id': None}])"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"Which city is hotter today: LA or NY?\",\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ba03dbf4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### AIMessage.tool_calls\n",
|
||||
"Notice that the AIMessage has a `tool_calls` attribute. This contains in a standardized ToolCall format that is model-provider agnostic."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "38f10ba7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetWeather', 'args': {'location': 'Los Angeles'}, 'id': None},\n",
|
||||
" {'name': 'GetWeather', 'args': {'location': 'New York'}, 'id': None}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ee72a59",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all IBM watsonx.ai features and configurations head to the API reference: https://api.python.langchain.com/en/latest/ibm_api_reference.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -21,7 +21,7 @@
|
||||
"| [ChatLlamaCpp](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.llamacpp.ChatLlamaCpp.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | \n",
|
||||
"\n",
|
||||
|
||||
190
docs/docs/integrations/chat/oci_generative_ai.ipynb
Normal file
190
docs/docs/integrations/chat/oci_generative_ai.ipynb
Normal file
@@ -0,0 +1,190 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: OCIGenAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatOCIGenAI\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with OCIGenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatOCIGenAI features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.oci_generative_ai.ChatOCIGenAI.html).\n",
|
||||
"\n",
|
||||
"Oracle Cloud Infrastructure (OCI) Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases, and which is available through a single API.\n",
|
||||
"Using the OCI Generative AI service you can access ready-to-use pretrained models, or create and host your own fine-tuned custom models based on your own data on dedicated AI clusters. Detailed documentation of the service and API is available __[here](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm)__ and __[here](https://docs.oracle.com/en-us/iaas/api/#/en/generative-ai/20231130/)__.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/oci_generative_ai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatOCIGenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.oci_generative_ai.ChatOCIGenAI.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access OCIGenAI models you'll need to install the `oci` and `langchain-community` packages.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"The credentials and authentication methods supported for this integration are equivalent to those used with other OCI services and follow the __[standard SDK authentication](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm)__ methods, specifically API Key, session token, instance principal, and resource principal.\n",
|
||||
"\n",
|
||||
"API key is the default authentication method used in the examples above. The following example demonstrates how to use a different authentication method (session token)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain OCIGenAI integration lives in the `langchain-community` package and you will also need to install the `oci` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community oci"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"chat = ChatOCIGenAI(\n",
|
||||
" model_id=\"cohere.command-r-16k\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" model_kwargs={\"temperature\": 0.7, \"max_tokens\": 500},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"your are an AI assistant.\"),\n",
|
||||
" AIMessage(content=\"Hi there human!\"),\n",
|
||||
" HumanMessage(content=\"tell me a joke.\"),\n",
|
||||
"]\n",
|
||||
"response = chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"response = chain.invoke({\"topic\": \"dogs\"})\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatOCIGenAI features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.oci_generative_ai.ChatOCIGenAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -20,6 +20,12 @@
|
||||
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use llama3 and phi3 models.\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
|
||||
"\n",
|
||||
":::warning\n",
|
||||
"\n",
|
||||
"This is an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. Use with caution.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance.\n",
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"| [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [langchain-openai](https://api.python.langchain.com/en/latest/openai_api_reference.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | Image input | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
@@ -426,7 +426,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"\n",
|
||||
"Headless mode means that the browser is running without a graphical user interface.\n",
|
||||
"\n",
|
||||
"`AsyncChromiumLoader` loads the page, and then we use `Html2TextTransformer` to transform to text."
|
||||
"In the below example we'll use the `AsyncChromiumLoader` to loads the page, and then the [`Html2TextTransformer`](/docs/integrations/document_transformers/html2text/) to strip out the HTML tags and other semantic information."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -23,48 +23,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet playwright beautifulsoup4\n",
|
||||
"%pip install --upgrade --quiet playwright beautifulsoup4 html2text\n",
|
||||
"!playwright install"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "dd2cdea7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'<!DOCTYPE html><html lang=\"en\"><head><script src=\"https://s0.2mdn.net/instream/video/client.js\" asyn'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import AsyncChromiumLoader\n",
|
||||
"\n",
|
||||
"urls = [\"https://www.wsj.com\"]\n",
|
||||
"loader = AsyncChromiumLoader(urls, user_agent=\"MyAppUserAgent\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0].page_content[0:100]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c64e7df9",
|
||||
"id": "00487c0f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you are using Jupyter notebooks, you might need to apply `nest_asyncio` before loading the documents."
|
||||
"**Note:** If you are using Jupyter notebooks, you might also need to install and apply `nest_asyncio` before loading the documents like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5f2fe3c0",
|
||||
"id": "d374eef4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -74,6 +48,40 @@
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "dd2cdea7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'<!DOCTYPE html><html lang=\"en\" dir=\"ltr\" class=\"docs-wrapper docs-doc-page docs-version-2.0 plugin-d'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import AsyncChromiumLoader\n",
|
||||
"\n",
|
||||
"urls = [\"https://docs.smith.langchain.com/\"]\n",
|
||||
"loader = AsyncChromiumLoader(urls, user_agent=\"MyAppUserAgent\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0].page_content[0:100]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7eb5e6aa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's transform the documents into a more readable syntax using the transformer:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
@@ -83,7 +91,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Skip to Main ContentSkip to SearchSkip to... Select * Top News * What's News *\\nFeatured Stories * Retirement * Life & Arts * Hip-Hop * Sports * Video *\\nEconomy * Real Estate * Sports * CMO * CIO * CFO * Risk & Compliance *\\nLogistics Report * Sustainable Business * Heard on the Street * Barron’s *\\nMarketWatch * Mansion Global * Penta * Opinion * Journal Reports * Sponsored\\nOffers Explore Our Brands * WSJ * * * * * Barron's * * * * * MarketWatch * * *\\n* * IBD # The Wall Street Journal SubscribeSig\""
|
||||
"'Skip to main content\\n\\nGo to API Docs\\n\\nSearch`⌘``K`\\n\\nGo to App\\n\\n * Quick start\\n * Tutorials\\n\\n * How-to guides\\n\\n * Concepts\\n\\n * Reference\\n\\n * Pricing\\n * Self-hosting\\n\\n * LangGraph Cloud\\n\\n * * Quick start\\n\\nOn this page\\n\\n# Get started with LangSmith\\n\\n**LangSmith** is a platform for building production-grade LLM applications. It\\nallows you to closely monitor and evaluate your application, so you can ship\\nquickly and with confidence. Use of LangChain is not necessary - LangSmith\\nworks on it'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -116,7 +124,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -12,35 +12,50 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "e6616e3a",
|
||||
"execution_count": null,
|
||||
"id": "0b01ee46",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredExcelLoader"
|
||||
"%pip install --upgrade --quiet langchain-community unstructured openpyxl"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 6,
|
||||
"id": "a654e4d9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"4\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='\\n \\n \\n Team\\n Location\\n Stanley Cups\\n \\n \\n Blues\\n STL\\n 1\\n \\n \\n Flyers\\n PHI\\n 2\\n \\n \\n Maple Leafs\\n TOR\\n 13\\n \\n \\n', metadata={'source': 'example_data/stanley-cups.xlsx', 'filename': 'stanley-cups.xlsx', 'file_directory': 'example_data', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'page_number': 1, 'page_name': 'Stanley Cups', 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>13</td>\\n </tr>\\n </tbody>\\n</table>', 'category': 'Table'})"
|
||||
"[Document(page_content='Stanley Cups', metadata={'source': 'example_data/stanley-cups.xlsx', 'file_directory': 'example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups', 'page_number': 1, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Title'}),\n",
|
||||
" Document(page_content='\\n\\n\\nTeam\\nLocation\\nStanley Cups\\n\\n\\nBlues\\nSTL\\n1\\n\\n\\nFlyers\\nPHI\\n2\\n\\n\\nMaple Leafs\\nTOR\\n13\\n\\n\\n', metadata={'source': 'example_data/stanley-cups.xlsx', 'file_directory': 'example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups', 'page_number': 1, 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>13</td>\\n </tr>\\n </tbody>\\n</table>', 'languages': ['eng'], 'parent_id': '17e9a90f9616f2abed8cf32b5bd3810d', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Table'}),\n",
|
||||
" Document(page_content='Stanley Cups Since 67', metadata={'source': 'example_data/stanley-cups.xlsx', 'file_directory': 'example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups Since 67', 'page_number': 2, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Title'}),\n",
|
||||
" Document(page_content='\\n\\n\\nTeam\\nLocation\\nStanley Cups\\n\\n\\nBlues\\nSTL\\n1\\n\\n\\nFlyers\\nPHI\\n2\\n\\n\\nMaple Leafs\\nTOR\\n0\\n\\n\\n', metadata={'source': 'example_data/stanley-cups.xlsx', 'file_directory': 'example_data', 'filename': 'stanley-cups.xlsx', 'last_modified': '2023-12-19T13:42:18', 'page_name': 'Stanley Cups Since 67', 'page_number': 2, 'text_as_html': '<table border=\"1\" class=\"dataframe\">\\n <tbody>\\n <tr>\\n <td>Team</td>\\n <td>Location</td>\\n <td>Stanley Cups</td>\\n </tr>\\n <tr>\\n <td>Blues</td>\\n <td>STL</td>\\n <td>1</td>\\n </tr>\\n <tr>\\n <td>Flyers</td>\\n <td>PHI</td>\\n <td>2</td>\\n </tr>\\n <tr>\\n <td>Maple Leafs</td>\\n <td>TOR</td>\\n <td>0</td>\\n </tr>\\n </tbody>\\n</table>', 'languages': ['eng'], 'parent_id': 'ee34bd8c186b57e3530d5443ffa58122', 'filetype': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'category': 'Table'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredExcelLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredExcelLoader(\"example_data/stanley-cups.xlsx\", mode=\"elements\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
"\n",
|
||||
"print(len(docs))\n",
|
||||
"\n",
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -76,7 +91,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-community azure-ai-documentintelligence"
|
||||
"%pip install --upgrade --quiet langchain langchain-community azure-ai-documentintelligence"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -115,7 +130,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -12,6 +12,19 @@
|
||||
"This covers how to load `Microsoft PowerPoint` documents into a document format that we can use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aef1500f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install packages\n",
|
||||
"%pip install unstructured\n",
|
||||
"%pip install python-magic\n",
|
||||
"%pip install python-pptx"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
|
||||
@@ -15,16 +15,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet doctran"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -34,7 +42,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -43,7 +51,7 @@
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -64,7 +72,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -107,7 +115,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -119,13 +127,13 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Output\n",
|
||||
"## Output using Sync version\n",
|
||||
"After translating a document, the result will be returned as a new document with the page_content translated into the target language"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -134,7 +142,82 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Documento Confidencial - Solo para Uso Interno\n",
|
||||
"\n",
|
||||
"Fecha: 1 de Julio de 2023\n",
|
||||
"\n",
|
||||
"Asunto: Actualizaciones y Discusiones sobre Varios Temas\n",
|
||||
"\n",
|
||||
"Estimado Equipo,\n",
|
||||
"\n",
|
||||
"Espero que este correo electrónico los encuentre bien. En este documento, me gustaría proporcionarles algunas actualizaciones importantes y discutir varios temas que requieren nuestra atención. Por favor, traten la información contenida aquí como altamente confidencial.\n",
|
||||
"\n",
|
||||
"Medidas de Seguridad y Privacidad\n",
|
||||
"Como parte de nuestro compromiso continuo para garantizar la seguridad y privacidad de los datos de nuestros clientes, hemos implementado medidas sólidas en todos nuestros sistemas. Nos gustaría elogiar a John Doe (email: john.doe@example.com) del departamento de TI por su trabajo diligente en mejorar nuestra seguridad de red. En adelante, recordamos amablemente a todos que se adhieran estrictamente a nuestras políticas y pautas de protección de datos. Además, si encuentran algún riesgo o incidente de seguridad potencial, por favor repórtenlo inmediatamente a nuestro equipo dedicado en security@example.com.\n",
|
||||
"\n",
|
||||
"Actualizaciones de Recursos Humanos y Beneficios para Empleados\n",
|
||||
"Recientemente, dimos la bienvenida a varios nuevos miembros del equipo que han hecho contribuciones significativas a sus respectivos departamentos. Me gustaría reconocer a Jane Smith (SSN: 049-45-5928) por su destacado desempeño en servicio al cliente. Jane ha recibido consistentemente comentarios positivos de nuestros clientes. Además, recuerden que el período de inscripción abierta para nuestro programa de beneficios para empleados se acerca rápidamente. Si tienen alguna pregunta o requieren asistencia, por favor contacten a nuestro representante de Recursos Humanos, Michael Johnson (teléfono: 418-492-3850, email: michael.johnson@example.com).\n",
|
||||
"\n",
|
||||
"Iniciativas y Campañas de Marketing\n",
|
||||
"Nuestro equipo de marketing ha estado trabajando activamente en el desarrollo de nuevas estrategias para aumentar el conocimiento de la marca y fomentar la participación de los clientes. Nos gustaría agradecer a Sarah Thompson (teléfono: 415-555-1234) por sus esfuerzos excepcionales en la gestión de nuestras plataformas de redes sociales. Sarah ha aumentado con éxito nuestra base de seguidores en un 20% solo en el último mes. Además, marquen sus calendarios para el próximo evento de lanzamiento de productos el 15 de Julio. Animamos a todos los miembros del equipo a asistir y apoyar este emocionante hito para nuestra empresa.\n",
|
||||
"\n",
|
||||
"Proyectos de Investigación y Desarrollo\n",
|
||||
"En nuestra búsqueda de innovación, nuestro departamento de investigación y desarrollo ha estado trabajando incansablemente en varios proyectos. Me gustaría reconocer el trabajo excepcional de David Rodriguez (email: david.rodriguez@example.com) en su rol como líder de proyecto. Las contribuciones de David al desarrollo de nuestra tecnología de vanguardia han sido fundamentales. Además, recordamos a todos que compartan sus ideas y sugerencias para posibles nuevos proyectos durante nuestra sesión mensual de lluvia de ideas de I+D, programada para el 10 de Julio.\n",
|
||||
"\n",
|
||||
"Por favor, traten la información en este documento con la máxima confidencialidad y asegúrense de que no sea compartida con personas no autorizadas. Si tienen alguna pregunta o inquietud sobre los temas discutidos, por favor no duden en comunicarse directamente conmigo.\n",
|
||||
"\n",
|
||||
"Gracias por su atención, y sigamos trabajando juntos para alcanzar nuestros objetivos.\n",
|
||||
"\n",
|
||||
"Saludos cordiales,\n",
|
||||
"\n",
|
||||
"Jason Fan\n",
|
||||
"Cofundador y CEO\n",
|
||||
"Psychic\n",
|
||||
"jason@psychic.dev\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(translated_document[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Output using the Async version\n",
|
||||
"\n",
|
||||
"After translating a document, the result will be returned as a new document with the page_content translated into the target language. The async version will improve performance when the documents are chunked in multiple parts. It will also make sure to return the output in the correct order."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = await qa_translator.atransform_documents(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -152,22 +235,22 @@
|
||||
"Espero que este correo electrónico les encuentre bien. En este documento, me gustaría proporcionarles algunas actualizaciones importantes y discutir varios temas que requieren nuestra atención. Por favor, traten la información contenida aquí como altamente confidencial.\n",
|
||||
"\n",
|
||||
"Medidas de Seguridad y Privacidad\n",
|
||||
"Como parte de nuestro compromiso continuo de garantizar la seguridad y privacidad de los datos de nuestros clientes, hemos implementado medidas sólidas en todos nuestros sistemas. Nos gustaría elogiar a John Doe (correo electrónico: john.doe@example.com) del departamento de TI por su diligente trabajo en mejorar nuestra seguridad de red. En el futuro, recordamos amablemente a todos que se adhieran estrictamente a nuestras políticas y pautas de protección de datos. Además, si encuentran algún riesgo o incidente de seguridad potencial, por favor, repórtelo de inmediato a nuestro equipo dedicado en security@example.com.\n",
|
||||
"Como parte de nuestro compromiso continuo de garantizar la seguridad y privacidad de los datos de nuestros clientes, hemos implementado medidas sólidas en todos nuestros sistemas. Nos gustaría elogiar a John Doe (email: john.doe@example.com) del departamento de TI por su trabajo diligente en mejorar nuestra seguridad de red. En adelante, recordamos amablemente a todos que se adhieran estrictamente a nuestras políticas y pautas de protección de datos. Además, si encuentran algún riesgo o incidente de seguridad potencial, por favor repórtenlo inmediatamente a nuestro equipo dedicado en security@example.com.\n",
|
||||
"\n",
|
||||
"Actualizaciones de Recursos Humanos y Beneficios para Empleados\n",
|
||||
"Recientemente, dimos la bienvenida a varios nuevos miembros del equipo que han realizado contribuciones significativas en sus respectivos departamentos. Me gustaría reconocer a Jane Smith (SSN: 049-45-5928) por su destacado desempeño en servicio al cliente. Jane ha recibido consistentemente comentarios positivos de nuestros clientes. Además, recuerden que el período de inscripción abierta para nuestro programa de beneficios para empleados se acerca rápidamente. Si tienen alguna pregunta o necesitan ayuda, por favor, contacten a nuestro representante de Recursos Humanos, Michael Johnson (teléfono: 418-492-3850, correo electrónico: michael.johnson@example.com).\n",
|
||||
"Recientemente, dimos la bienvenida a varios nuevos miembros del equipo que han hecho contribuciones significativas a sus respectivos departamentos. Me gustaría reconocer a Jane Smith (SSN: 049-45-5928) por su destacado desempeño en servicio al cliente. Jane ha recibido consistentemente comentarios positivos de nuestros clientes. Además, recuerden que el período de inscripción abierta para nuestro programa de beneficios para empleados se acerca rápidamente. Si tienen alguna pregunta o requieren asistencia, por favor contacten a nuestro representante de Recursos Humanos, Michael Johnson (teléfono: 418-492-3850, email: michael.johnson@example.com).\n",
|
||||
"\n",
|
||||
"Iniciativas y Campañas de Marketing\n",
|
||||
"Nuestro equipo de marketing ha estado trabajando activamente en el desarrollo de nuevas estrategias para aumentar el conocimiento de nuestra marca y fomentar la participación de los clientes. Nos gustaría agradecer a Sarah Thompson (teléfono: 415-555-1234) por sus esfuerzos excepcionales en la gestión de nuestras plataformas de redes sociales. Sarah ha logrado aumentar nuestra base de seguidores en un 20% solo en el último mes. Además, marquen sus calendarios para el próximo evento de lanzamiento de productos el 15 de Julio. Animamos a todos los miembros del equipo a asistir y apoyar este emocionante hito para nuestra empresa.\n",
|
||||
"Nuestro equipo de marketing ha estado trabajando activamente en el desarrollo de nuevas estrategias para aumentar el conocimiento de la marca y fomentar la participación de los clientes. Nos gustaría agradecer a Sarah Thompson (teléfono: 415-555-1234) por sus esfuerzos excepcionales en la gestión de nuestras plataformas de redes sociales. Sarah ha aumentado con éxito nuestra base de seguidores en un 20% solo en el último mes. Además, marquen sus calendarios para el próximo evento de lanzamiento de productos el 15 de Julio. Animamos a todos los miembros del equipo a asistir y apoyar este emocionante hito para nuestra empresa.\n",
|
||||
"\n",
|
||||
"Proyectos de Investigación y Desarrollo\n",
|
||||
"En nuestra búsqueda de la innovación, nuestro departamento de investigación y desarrollo ha estado trabajando incansablemente en varios proyectos. Me gustaría reconocer el trabajo excepcional de David Rodriguez (correo electrónico: david.rodriguez@example.com) en su papel de líder de proyecto. Las contribuciones de David al desarrollo de nuestra tecnología de vanguardia han sido fundamentales. Además, nos gustaría recordar a todos que compartan sus ideas y sugerencias para posibles nuevos proyectos durante nuestra sesión mensual de lluvia de ideas de I+D, programada para el 10 de Julio.\n",
|
||||
"En nuestra búsqueda de innovación, nuestro departamento de investigación y desarrollo ha estado trabajando incansablemente en varios proyectos. Me gustaría reconocer el trabajo excepcional de David Rodriguez (email: david.rodriguez@example.com) en su rol como líder de proyecto. Las contribuciones de David al desarrollo de nuestra tecnología de vanguardia han sido fundamentales. Además, recordamos a todos que compartan sus ideas y sugerencias para posibles nuevos proyectos durante nuestra sesión mensual de lluvia de ideas de I+D, programada para el 10 de Julio.\n",
|
||||
"\n",
|
||||
"Por favor, traten la información de este documento con la máxima confidencialidad y asegúrense de no compartirla con personas no autorizadas. Si tienen alguna pregunta o inquietud sobre los temas discutidos, por favor, no duden en comunicarse directamente conmigo.\n",
|
||||
"Por favor, traten la información en este documento con la máxima confidencialidad y asegúrense de que no sea compartida con personas no autorizadas. Si tienen alguna pregunta o inquietud sobre los temas discutidos, por favor no duden en comunicarse directamente conmigo.\n",
|
||||
"\n",
|
||||
"Gracias por su atención y sigamos trabajando juntos para alcanzar nuestros objetivos.\n",
|
||||
"Gracias por su atención, y sigamos trabajando juntos para alcanzar nuestros objetivos.\n",
|
||||
"\n",
|
||||
"Atentamente,\n",
|
||||
"Saludos cordiales,\n",
|
||||
"\n",
|
||||
"Jason Fan\n",
|
||||
"Cofundador y CEO\n",
|
||||
@@ -177,7 +260,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(translated_document[0].page_content)"
|
||||
"print(result[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -197,7 +280,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -19,12 +19,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet html2text"
|
||||
"%pip install --upgrade --quiet html2text"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "8ca0974b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -32,7 +32,8 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Fetching pages: 100%|############| 2/2 [00:00<00:00, 10.75it/s]\n"
|
||||
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n",
|
||||
"Fetching pages: 100%|##########| 2/2 [00:00<00:00, 14.74it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -46,66 +47,107 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ddf2be97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_transformers import Html2TextTransformer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 3,
|
||||
"id": "a95a928c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"## Fantasy\n",
|
||||
"\n",
|
||||
" * Football\n",
|
||||
"\n",
|
||||
" * Baseball\n",
|
||||
"\n",
|
||||
" * Basketball\n",
|
||||
"\n",
|
||||
" * Hockey\n",
|
||||
"\n",
|
||||
"## ESPN Sites\n",
|
||||
"\n",
|
||||
" * ESPN Deportes\n",
|
||||
"\n",
|
||||
" * Andscape\n",
|
||||
"\n",
|
||||
" * espnW\n",
|
||||
"\n",
|
||||
" * ESPNFC\n",
|
||||
"\n",
|
||||
" * X Games\n",
|
||||
"\n",
|
||||
" * SEC Network\n",
|
||||
"\n",
|
||||
"## ESPN Apps\n",
|
||||
"\n",
|
||||
" * ESPN\n",
|
||||
"\n",
|
||||
" * ESPN Fantasy\n",
|
||||
"\n",
|
||||
" * Tournament Challenge\n",
|
||||
"\n",
|
||||
"## Follow ESPN\n",
|
||||
"\n",
|
||||
" * Facebook\n",
|
||||
"\n",
|
||||
" * X/Twitter\n",
|
||||
"\n",
|
||||
" * Instagram\n",
|
||||
"\n",
|
||||
" * Snapchat\n",
|
||||
"\n",
|
||||
" * TikTok\n",
|
||||
"\n",
|
||||
" * YouTube\n",
|
||||
"\n",
|
||||
"## Fresh updates to our NBA mock draft: Everything we're hearing hours before\n",
|
||||
"Round 1\n",
|
||||
"\n",
|
||||
"With hours until Round 1 begins (8 p.m. ET on ESPN and ABC), ESPN draft\n",
|
||||
"insiders Jonathan Givony and Jeremy Woo have new intel on lottery picks and\n",
|
||||
"more.\n",
|
||||
"\n",
|
||||
"2hJonathan Givony and Jeremy Woo\n",
|
||||
"\n",
|
||||
"Illustration by ESPN\n",
|
||||
"\n",
|
||||
"## From No. 1 to 100: Ranking the 2024 NBA draft prospects\n",
|
||||
"\n",
|
||||
"Who's No. 1? Where do the Kentucky, Duke and UConn players rank? Here's our\n",
|
||||
"final Top 100 Big Board.\n",
|
||||
"\n",
|
||||
"6hJonathan Givony and Jeremy Woo\n",
|
||||
"\n",
|
||||
" * Full draft order: All 58 picks over two rounds\n",
|
||||
" * Trade tracker: Details for all deals\n",
|
||||
"\n",
|
||||
" * Betting buzz: Lakers favorites to draft Bronny\n",
|
||||
" * Use our NBA draft simu\n",
|
||||
"ent system, LLM functions as the agent's brain,\n",
|
||||
"complemented by several key components:\n",
|
||||
"\n",
|
||||
" * **Planning**\n",
|
||||
" * Subgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\n",
|
||||
" * Reflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\n",
|
||||
" * **Memory**\n",
|
||||
" * Short-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\n",
|
||||
" * Long-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\n",
|
||||
" * **Tool use**\n",
|
||||
" * The agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_transformers import Html2TextTransformer\n",
|
||||
"\n",
|
||||
"urls = [\"https://www.espn.com\", \"https://lilianweng.github.io/posts/2023-06-23-agent/\"]\n",
|
||||
"html2text = Html2TextTransformer()\n",
|
||||
"docs_transformed = html2text.transform_documents(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "18ef9fe9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" * ESPNFC\\n\\n * X Games\\n\\n * SEC Network\\n\\n## ESPN Apps\\n\\n * ESPN\\n\\n * ESPN Fantasy\\n\\n## Follow ESPN\\n\\n * Facebook\\n\\n * Twitter\\n\\n * Instagram\\n\\n * Snapchat\\n\\n * YouTube\\n\\n * The ESPN Daily Podcast\\n\\n2023 FIFA Women's World Cup\\n\\n## Follow live: Canada takes on Nigeria in group stage of Women's World Cup\\n\\n2m\\n\\nEPA/Morgan Hancock\\n\\n## TOP HEADLINES\\n\\n * Snyder fined $60M over findings in investigation\\n * NFL owners approve $6.05B sale of Commanders\\n * Jags assistant comes out as gay in NFL milestone\\n * O's alone atop East after topping slumping Rays\\n * ACC's Phillips: Never condoned hazing at NU\\n\\n * Vikings WR Addison cited for driving 140 mph\\n * 'Taking his time': Patient QB Rodgers wows Jets\\n * Reyna got U.S. assurances after Berhalter rehire\\n * NFL Future Power Rankings\\n\\n## USWNT AT THE WORLD CUP\\n\\n### USA VS. VIETNAM: 9 P.M. ET FRIDAY\\n\\n## How do you defend against Alex Morgan? Former opponents sound off\\n\\nThe U.S. forward is unstoppable at this level, scoring 121 goals and adding 49\""
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs_transformed[0].page_content[1000:2000]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "6045d660",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"t's brain,\\ncomplemented by several key components:\\n\\n * **Planning**\\n * Subgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\n * Reflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n * **Memory**\\n * Short-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\n * Long-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n * **Tool use**\\n * The agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution c\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs_transformed[1].page_content[1000:2000]"
|
||||
"docs_transformed = html2text.transform_documents(docs)\n",
|
||||
"\n",
|
||||
"print(docs_transformed[0].page_content[1000:2000])\n",
|
||||
"\n",
|
||||
"print(docs_transformed[1].page_content[1000:2000])"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -125,7 +167,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
"source": [
|
||||
"# AI21LLM\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `AI21` models.\n",
|
||||
"This example goes over how to use LangChain to interact with `AI21` Jurassic models. To use the Jamba model, use the [ChatAI21 object](https://python.langchain.com/v0.2/docs/integrations/chat/ai21/) instead.\n",
|
||||
"\n",
|
||||
"[See a full list of AI21 models and tools on LangChain.](https://pypi.org/project/langchain-ai21/)\n",
|
||||
"\n",
|
||||
"## Installation"
|
||||
]
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet boto3"
|
||||
"%pip install --upgrade --quiet langchain_aws"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -45,74 +45,13 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Bedrock\n",
|
||||
"from langchain_aws import BedrockLLM\n",
|
||||
"\n",
|
||||
"llm = Bedrock(\n",
|
||||
"llm = BedrockLLM(\n",
|
||||
" credentials_profile_name=\"bedrock-admin\", model_id=\"amazon.titan-text-express-v1\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using in a conversation chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"\n",
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=llm, verbose=True, memory=ConversationBufferMemory()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Conversation Chain With Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Bedrock\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = Bedrock(\n",
|
||||
" credentials_profile_name=\"bedrock-admin\",\n",
|
||||
" model_id=\"amazon.titan-text-express-v1\",\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=llm, verbose=True, memory=ConversationBufferMemory()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -126,28 +65,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_llm = Bedrock(\n",
|
||||
"custom_llm = BedrockLLM(\n",
|
||||
" credentials_profile_name=\"bedrock-admin\",\n",
|
||||
" provider=\"cohere\",\n",
|
||||
" model_id=\"<Custom model ARN>\", # ARN like 'arn:aws:bedrock:...' obtained via provisioning the custom model\n",
|
||||
" model_kwargs={\"temperature\": 1},\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=custom_llm, verbose=True, memory=ConversationBufferMemory()\n",
|
||||
")\n",
|
||||
"conversation.predict(input=\"What is the recipe of mayonnaise?\")"
|
||||
"custom_llm.invoke(input=\"What is the recipe of mayonnaise?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Guardrails for Amazon Bedrock example \n",
|
||||
"## Guardrails for Amazon Bedrock\n",
|
||||
"\n",
|
||||
"## Guardrails for Amazon Bedrock (Preview) \n",
|
||||
"[Guardrails for Amazon Bedrock](https://aws.amazon.com/bedrock/guardrails/) evaluates user inputs and model responses based on use case specific policies, and provides an additional layer of safeguards regardless of the underlying model. Guardrails can be applied across models, including Anthropic Claude, Meta Llama 2, Cohere Command, AI21 Labs Jurassic, and Amazon Titan Text, as well as fine-tuned models.\n",
|
||||
"**Note**: Guardrails for Amazon Bedrock is currently in preview and not generally available. Reach out through your usual AWS Support contacts if you’d like access to this feature.\n",
|
||||
"In this section, we are going to set up a Bedrock language model with specific guardrails that include tracing capabilities. "
|
||||
@@ -174,7 +108,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"# Guardrails for Amazon Bedrock with trace\n",
|
||||
"llm = Bedrock(\n",
|
||||
"llm = BedrockLLM(\n",
|
||||
" credentials_profile_name=\"bedrock-admin\",\n",
|
||||
" model_id=\"<Model_ID>\",\n",
|
||||
" model_kwargs={},\n",
|
||||
@@ -200,7 +134,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,141 +7,112 @@
|
||||
"source": [
|
||||
"# Databricks\n",
|
||||
"\n",
|
||||
"The [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform.\n",
|
||||
"> [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform.\n",
|
||||
"\n",
|
||||
"This example notebook shows how to wrap Databricks endpoints as LLMs in LangChain.\n",
|
||||
"It supports two endpoint types:\n",
|
||||
"\n",
|
||||
"* Serving endpoint, recommended for production and development,\n",
|
||||
"* Cluster driver proxy app, recommended for interactive development."
|
||||
"This notebook provides a quick overview for getting started with Databricks [LLM models](https://python.langchain.com/v0.2/docs/concepts/#llms). For detailed documentation of all features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.databricks.Databricks.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"`Databricks` LLM class wraps a completion endpoint hosted as either of these two endpoint types:\n",
|
||||
"\n",
|
||||
"* [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html), recommended for production and development,\n",
|
||||
"* Cluster driver proxy app, recommended for interactive development.\n",
|
||||
"\n",
|
||||
"This example notebook shows how to wrap your LLM endpoint and use it as an LLM in your LangChain application."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"## Limitations\n",
|
||||
"\n",
|
||||
"`mlflow >= 2.9 ` is required to run the code in this notebook. If it's not installed, please install it using this command:\n",
|
||||
"The `Databricks` LLM class is *legacy* implementation and has several limitations in the feature compatibility.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install mlflow>=2.9\n",
|
||||
"```\n",
|
||||
"* Only supports synchronous invocation. Streaming or async APIs are not supported.\n",
|
||||
"* `batch` API is not supported.\n",
|
||||
"\n",
|
||||
"Also, we need `dbutils` for this example.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install dbutils\n",
|
||||
"```\n"
|
||||
"To use those features, please use the new [ChatDatabricks](https://python.langchain.com/v0.2/docs/integrations/chat/databricks) class instead. `ChatDatabricks` supports all APIs of `ChatModel` including streaming, async, batch, etc.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping a serving endpoint: External model\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Prerequisite: Register an OpenAI API key as a secret:\n",
|
||||
"To access Databricks models you'll need to create a Databricks account, set up credentials (only if you are outside Databricks workspace), and install required packages.\n",
|
||||
"\n",
|
||||
" ```bash\n",
|
||||
" databricks secrets create-scope <scope>\n",
|
||||
" databricks secrets put-secret <scope> openai-api-key --string-value $OPENAI_API_KEY\n",
|
||||
" ```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following code creates a new serving endpoint with OpenAI's GPT-4 model for chat and generates a response using the endpoint."
|
||||
"### Credentials (only if you are outside Databricks)\n",
|
||||
"\n",
|
||||
"If you are running LangChain app inside Databricks, you can skip this step.\n",
|
||||
"\n",
|
||||
"Otherwise, you need manually set the Databricks workspace hostname and personal access token to `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables, respectively. See [Authentication Documentation](https://docs.databricks.com/en/dev-tools/auth/index.html#databricks-personal-access-tokens) for how to get an access token."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='Hello! How can I assist you today?'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatDatabricks\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from mlflow.deployments import get_deploy_client\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"client = get_deploy_client(\"databricks\")\n",
|
||||
"\n",
|
||||
"secret = \"secrets/<scope>/openai-api-key\" # replace `<scope>` with your scope\n",
|
||||
"name = \"my-chat\" # rename this if my-chat already exists\n",
|
||||
"client.create_endpoint(\n",
|
||||
" name=name,\n",
|
||||
" config={\n",
|
||||
" \"served_entities\": [\n",
|
||||
" {\n",
|
||||
" \"name\": \"my-chat\",\n",
|
||||
" \"external_model\": {\n",
|
||||
" \"name\": \"gpt-4\",\n",
|
||||
" \"provider\": \"openai\",\n",
|
||||
" \"task\": \"llm/v1/chat\",\n",
|
||||
" \"openai_config\": {\n",
|
||||
" \"openai_api_key\": \"{{\" + secret + \"}}\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chat = ChatDatabricks(\n",
|
||||
" target_uri=\"databricks\",\n",
|
||||
" endpoint=name,\n",
|
||||
" temperature=0.1,\n",
|
||||
")\n",
|
||||
"chat([HumanMessage(content=\"hello\")])"
|
||||
"os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n",
|
||||
"os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\"Enter your Databricks access token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping a serving endpoint: Foundation model\n",
|
||||
"\n",
|
||||
"The following code uses the `databricks-bge-large-en` serving endpoint (no endpoint creation is required) to generate embeddings from input text."
|
||||
"Alternatively, you can pass those parameters when initializing the `Databricks` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.051055908203125, 0.007221221923828125, 0.003879547119140625]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import DatabricksEmbeddings\n",
|
||||
"from langchain_community.llms import Databricks\n",
|
||||
"\n",
|
||||
"embeddings = DatabricksEmbeddings(endpoint=\"databricks-bge-large-en\")\n",
|
||||
"embeddings.embed_query(\"hello\")[:3]"
|
||||
"databricks = Databricks(\n",
|
||||
" host=\"https://your-workspace.cloud.databricks.com\",\n",
|
||||
" # We strongly recommend NOT to hardcode your access token in your code, instead use secret management tools\n",
|
||||
" # or environment variables to store your access token securely. The following example uses Databricks Secrets\n",
|
||||
" # to retrieve the access token that is available within the Databricks notebook.\n",
|
||||
" token=dbutils.secrets.get(scope=\"YOUR_SECRET_SCOPE\", key=\"databricks-token\"), # noqa: F821\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping a serving endpoint: Custom model\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Prerequisites:\n",
|
||||
"The LangChain Databricks integration lives in the `langchain-community` package. Also, `mlflow >= 2.9 ` is required to run the code in this notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community mlflow>=2.9.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping Model Serving Endpoint\n",
|
||||
"\n",
|
||||
"### Prerequisites:\n",
|
||||
"\n",
|
||||
"* An LLM was registered and deployed to [a Databricks serving endpoint](https://docs.databricks.com/machine-learning/model-serving/index.html).\n",
|
||||
"* You have [\"Can Query\" permission](https://docs.databricks.com/security/auth-authz/access-control/serving-endpoint-acl.html) to the endpoint.\n",
|
||||
@@ -149,9 +120,14 @@
|
||||
"The expected MLflow model signature is:\n",
|
||||
"\n",
|
||||
" * inputs: `[{\"name\": \"prompt\", \"type\": \"string\"}, {\"name\": \"stop\", \"type\": \"list[string]\"}]`\n",
|
||||
" * outputs: `[{\"type\": \"string\"}]`\n",
|
||||
"\n",
|
||||
"If the model signature is incompatible or you want to insert extra configs, you can set `transform_input_fn` and `transform_output_fn` accordingly."
|
||||
" * outputs: `[{\"type\": \"string\"}]`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -173,12 +149,8 @@
|
||||
"source": [
|
||||
"from langchain_community.llms import Databricks\n",
|
||||
"\n",
|
||||
"# If running a Databricks notebook attached to an interactive cluster in \"single user\"\n",
|
||||
"# or \"no isolation shared\" mode, you only need to specify the endpoint name to create\n",
|
||||
"# a `Databricks` instance to query a serving endpoint in the same workspace.\n",
|
||||
"llm = Databricks(endpoint_name=\"dolly\")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
"llm = Databricks(endpoint_name=\"YOUR_ENDPOINT_NAME\")\n",
|
||||
"llm.invoke(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -198,245 +170,16 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm(\"How are you?\", stop=[\".\"])"
|
||||
"llm.invoke(\"How are you?\", stop=[\".\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I am fine. Thank you!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Otherwise, you can manually specify the Databricks workspace hostname and personal access token\n",
|
||||
"# or set `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables, respectively.\n",
|
||||
"# See https://docs.databricks.com/dev-tools/auth.html#databricks-personal-access-tokens\n",
|
||||
"# We strongly recommend not exposing the API token explicitly inside a notebook.\n",
|
||||
"# You can use Databricks secret manager to store your API token securely.\n",
|
||||
"# See https://docs.databricks.com/dev-tools/databricks-utils.html#secrets-utility-dbutilssecrets\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import dbutils\n",
|
||||
"\n",
|
||||
"os.environ[\"DATABRICKS_TOKEN\"] = dbutils.secrets.get(\"myworkspace\", \"api_token\")\n",
|
||||
"\n",
|
||||
"llm = Databricks(host=\"myworkspace.cloud.databricks.com\", endpoint_name=\"dolly\")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I am fine.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# If the serving endpoint accepts extra parameters like `temperature`,\n",
|
||||
"# you can set them in `model_kwargs`.\n",
|
||||
"llm = Databricks(endpoint_name=\"dolly\", model_kwargs={\"temperature\": 0.1})\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I’m Excellent. You?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Use `transform_input_fn` and `transform_output_fn` if the serving endpoint\n",
|
||||
"# expects a different input schema and does not return a JSON string,\n",
|
||||
"# respectively, or you want to apply a prompt template on top.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def transform_input(**request):\n",
|
||||
" full_prompt = f\"\"\"{request[\"prompt\"]}\n",
|
||||
" Be Concise.\n",
|
||||
" \"\"\"\n",
|
||||
" request[\"prompt\"] = full_prompt\n",
|
||||
" return request\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = Databricks(endpoint_name=\"dolly\", transform_input_fn=transform_input)\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping a cluster driver proxy app\n",
|
||||
"### Transform Input and Output\n",
|
||||
"\n",
|
||||
"Prerequisites:\n",
|
||||
"\n",
|
||||
"* An LLM loaded on a Databricks interactive cluster in \"single user\" or \"no isolation shared\" mode.\n",
|
||||
"* A local HTTP server running on the driver node to serve the model at `\"/\"` using HTTP POST with JSON input/output.\n",
|
||||
"* It uses a port number between `[3000, 8000]` and listens to the driver IP address or simply `0.0.0.0` instead of localhost only.\n",
|
||||
"* You have \"Can Attach To\" permission to the cluster.\n",
|
||||
"\n",
|
||||
"The expected server schema (using JSON schema) is:\n",
|
||||
"\n",
|
||||
"* inputs:\n",
|
||||
" ```json\n",
|
||||
" {\"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"prompt\": {\"type\": \"string\"},\n",
|
||||
" \"stop\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}}},\n",
|
||||
" \"required\": [\"prompt\"]}\n",
|
||||
" ```\n",
|
||||
"* outputs: `{\"type\": \"string\"}`\n",
|
||||
"\n",
|
||||
"If the server schema is incompatible or you want to insert extra configs, you can use `transform_input_fn` and `transform_output_fn` accordingly.\n",
|
||||
"\n",
|
||||
"The following is a minimal example for running a driver proxy app to serve an LLM:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from flask import Flask, request, jsonify\n",
|
||||
"import torch\n",
|
||||
"from transformers import pipeline, AutoTokenizer, StoppingCriteria\n",
|
||||
"\n",
|
||||
"model = \"databricks/dolly-v2-3b\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model, padding_side=\"left\")\n",
|
||||
"dolly = pipeline(model=model, tokenizer=tokenizer, trust_remote_code=True, device_map=\"auto\")\n",
|
||||
"device = dolly.device\n",
|
||||
"\n",
|
||||
"class CheckStop(StoppingCriteria):\n",
|
||||
" def __init__(self, stop=None):\n",
|
||||
" super().__init__()\n",
|
||||
" self.stop = stop or []\n",
|
||||
" self.matched = \"\"\n",
|
||||
" self.stop_ids = [tokenizer.encode(s, return_tensors='pt').to(device) for s in self.stop]\n",
|
||||
" def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs):\n",
|
||||
" for i, s in enumerate(self.stop_ids):\n",
|
||||
" if torch.all((s == input_ids[0][-s.shape[1]:])).item():\n",
|
||||
" self.matched = self.stop[i]\n",
|
||||
" return True\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"def llm(prompt, stop=None, **kwargs):\n",
|
||||
" check_stop = CheckStop(stop)\n",
|
||||
" result = dolly(prompt, stopping_criteria=[check_stop], **kwargs)\n",
|
||||
" return result[0][\"generated_text\"].rstrip(check_stop.matched)\n",
|
||||
"\n",
|
||||
"app = Flask(\"dolly\")\n",
|
||||
"\n",
|
||||
"@app.route('/', methods=['POST'])\n",
|
||||
"def serve_llm():\n",
|
||||
" resp = llm(**request.json)\n",
|
||||
" return jsonify(resp)\n",
|
||||
"\n",
|
||||
"app.run(host=\"0.0.0.0\", port=\"7777\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Once the server is running, you can create a `Databricks` instance to wrap it as an LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello, thank you for asking. It is wonderful to hear that you are well.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# If running a Databricks notebook attached to the same cluster that runs the app,\n",
|
||||
"# you only need to specify the driver port to create a `Databricks` instance.\n",
|
||||
"llm = Databricks(cluster_driver_port=\"7777\")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I am well. You?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Otherwise, you can manually specify the cluster ID to use,\n",
|
||||
"# as well as Databricks workspace hostname and personal access token.\n",
|
||||
"\n",
|
||||
"llm = Databricks(cluster_id=\"0000-000000-xxxxxxxx\", cluster_driver_port=\"7777\")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I am very well. It is a pleasure to meet you.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# If the app accepts extra parameters like `temperature`,\n",
|
||||
"# you can set them in `model_kwargs`.\n",
|
||||
"llm = Databricks(cluster_driver_port=\"7777\", model_kwargs={\"temperature\": 0.1})\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
"Sometimes you may want to wrap a serving endpoint that has imcompatible model signature or you want to insert extra configs. You can use the `transform_input_fn` and `transform_output_fn` arguments to define additional pre/post process."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -456,7 +199,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Use `transform_input_fn` and `transform_output_fn` if the app\n",
|
||||
"# Use `transform_input_fn` and `transform_output_fn` if the serving endpoint\n",
|
||||
"# expects a different input schema and does not return a JSON string,\n",
|
||||
"# respectively, or you want to apply a prompt template on top.\n",
|
||||
"\n",
|
||||
@@ -474,12 +217,12 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = Databricks(\n",
|
||||
" cluster_driver_port=\"7777\",\n",
|
||||
" endpoint_name=\"YOUR_ENDPOINT_NAME\",\n",
|
||||
" transform_input_fn=transform_input,\n",
|
||||
" transform_output_fn=transform_output,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
"llm.invoke(\"How are you?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -14,15 +14,15 @@
|
||||
"Oracle Cloud Infrastructure (OCI) Generative AI is a fully managed service that provides a set of state-of-the-art, customizable large language models (LLMs) that cover a wide range of use cases, and which is available through a single API.\n",
|
||||
"Using the OCI Generative AI service you can access ready-to-use pretrained models, or create and host your own fine-tuned custom models based on your own data on dedicated AI clusters. Detailed documentation of the service and API is available __[here](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm)__ and __[here](https://docs.oracle.com/en-us/iaas/api/#/en/generative-ai/20231130/)__.\n",
|
||||
"\n",
|
||||
"This notebook explains how to use OCI's Genrative AI models with LangChain."
|
||||
"This notebook explains how to use OCI's Generative AI complete models with LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisite\n",
|
||||
"We will need to install the oci sdk"
|
||||
"## Setup\n",
|
||||
"Ensure that the oci sdk and the langchain-community package are installed"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -31,31 +31,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install -U oci"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### OCI Generative AI API endpoint \n",
|
||||
"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Authentication\n",
|
||||
"The authentication methods supported for this langchain integration are:\n",
|
||||
"\n",
|
||||
"1. API Key\n",
|
||||
"2. Session token\n",
|
||||
"3. Instance principal\n",
|
||||
"4. Resource principal \n",
|
||||
"\n",
|
||||
"These follows the standard SDK authentication methods detailed __[here](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm)__.\n",
|
||||
" "
|
||||
"!pip install -U oci langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -71,13 +47,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import OCIGenAI\n",
|
||||
"from langchain_community.llms.oci_generative_ai import OCIGenAI\n",
|
||||
"\n",
|
||||
"# use default authN method API-key\n",
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"MY_MODEL\",\n",
|
||||
" model_id=\"cohere.command\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" model_kwargs={\"temperature\": 0, \"max_tokens\": 500},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = llm.invoke(\"Tell me one fact about earth\", temperature=0.7)\n",
|
||||
@@ -85,30 +61,10 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"# Use Session Token to authN\n",
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"MY_MODEL\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" auth_type=\"SECURITY_TOKEN\",\n",
|
||||
" auth_profile=\"MY_PROFILE\", # replace with your profile name\n",
|
||||
" model_kwargs={\"temperature\": 0.7, \"top_p\": 0.75, \"max_tokens\": 200},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"query\"], template=\"{query}\")\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"\n",
|
||||
"response = llm_chain.invoke(\"what is the capital of france?\")\n",
|
||||
"print(response)"
|
||||
"#### Chaining with prompt templates"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -117,49 +73,95 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import OCIGenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"embeddings = OCIGenAIEmbeddings(\n",
|
||||
" model_id=\"MY_EMBEDDING_MODEL\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"vectorstore = FAISS.from_texts(\n",
|
||||
" [\n",
|
||||
" \"Larry Ellison co-founded Oracle Corporation in 1977 with Bob Miner and Ed Oates.\",\n",
|
||||
" \"Oracle Corporation is an American multinational computer technology company headquartered in Austin, Texas, United States.\",\n",
|
||||
" ],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
"{context}\n",
|
||||
" \n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"MY_MODEL\",\n",
|
||||
" model_id=\"cohere.command\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" model_kwargs={\"temperature\": 0, \"max_tokens\": 500},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"query\"], template=\"{query}\")\n",
|
||||
"llm_chain = prompt | llm\n",
|
||||
"\n",
|
||||
"response = llm_chain.invoke(\"what is the capital of france?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"cohere.command\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" model_kwargs={\"temperature\": 0, \"max_tokens\": 500},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(chain.invoke(\"when was oracle founded?\"))\n",
|
||||
"print(chain.invoke(\"where is oracle headquartered?\"))"
|
||||
"for chunk in llm.stream(\"Write me a song about sparkling water.\"):\n",
|
||||
" print(chunk, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Authentication\n",
|
||||
"The authentication methods supported for LlamaIndex are equivalent to those used with other OCI services and follow the __[standard SDK authentication](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm)__ methods, specifically API Key, session token, instance principal, and resource principal.\n",
|
||||
"\n",
|
||||
"API key is the default authentication method used in the examples above. The following example demonstrates how to use a different authentication method (session token)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"cohere.command\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" auth_type=\"SECURITY_TOKEN\",\n",
|
||||
" auth_profile=\"MY_PROFILE\", # replace with your profile name\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dedicated AI Cluster\n",
|
||||
"To access models hosted in a dedicated AI cluster __[create an endpoint](https://docs.oracle.com/en-us/iaas/api/#/en/generative-ai-inference/20231130/)__ whose assigned OCID (currently prefixed by ‘ocid1.generativeaiendpoint.oc1.us-chicago-1’) is used as your model ID.\n",
|
||||
"\n",
|
||||
"When accessing models hosted in a dedicated AI cluster you will need to initialize the OCIGenAI interface with two extra required params (\"provider\" and \"context_size\")."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OCIGenAI(\n",
|
||||
" model_id=\"ocid1.generativeaiendpoint.oc1.us-chicago-1....\",\n",
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"DEDICATED_COMPARTMENT_OCID\",\n",
|
||||
" auth_profile=\"MY_PROFILE\", # replace with your profile name,\n",
|
||||
" provider=\"MODEL_PROVIDER\", # e.g., \"cohere\" or \"meta\"\n",
|
||||
" context_size=\"MODEL_CONTEXT_SIZE\", # e.g., 128000\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -87,7 +87,6 @@
|
||||
" \"do_sample\": True,\n",
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"process_prompt\": True,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
@@ -116,7 +115,6 @@
|
||||
" \"do_sample\": True,\n",
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"process_prompt\": True,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
@@ -177,14 +175,16 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"sambastudio_base_url = \"<Your SambaStudio environment URL>\"\n",
|
||||
"# sambastudio_base_uri = \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/nlp\" set as default\n",
|
||||
"sambastudio_base_uri = (\n",
|
||||
" \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/nlp\" set as default\n",
|
||||
")\n",
|
||||
"sambastudio_project_id = \"<Your SambaStudio project id>\"\n",
|
||||
"sambastudio_endpoint_id = \"<Your SambaStudio endpoint id>\"\n",
|
||||
"sambastudio_api_key = \"<Your SambaStudio endpoint API key>\"\n",
|
||||
"\n",
|
||||
"# Set the environment variables\n",
|
||||
"os.environ[\"SAMBASTUDIO_BASE_URL\"] = sambastudio_base_url\n",
|
||||
"# os.environ[\"SAMBASTUDIO_BASE_URI\"] = sambastudio_base_uri\n",
|
||||
"os.environ[\"SAMBASTUDIO_BASE_URI\"] = sambastudio_base_uri\n",
|
||||
"os.environ[\"SAMBASTUDIO_PROJECT_ID\"] = sambastudio_project_id\n",
|
||||
"os.environ[\"SAMBASTUDIO_ENDPOINT_ID\"] = sambastudio_endpoint_id\n",
|
||||
"os.environ[\"SAMBASTUDIO_API_KEY\"] = sambastudio_api_key"
|
||||
@@ -247,6 +247,40 @@
|
||||
"for chunk in llm.stream(\"Why should I use open source models?\"):\n",
|
||||
" print(chunk, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also call a CoE endpoint expert model "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Using a CoE endpoint\n",
|
||||
"\n",
|
||||
"from langchain_community.llms.sambanova import SambaStudio\n",
|
||||
"\n",
|
||||
"llm = SambaStudio(\n",
|
||||
" streaming=False,\n",
|
||||
" model_kwargs={\n",
|
||||
" \"do_sample\": True,\n",
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"select_expert\": \"Meta-Llama-3-8B-Instruct\",\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
" # \"top_logprobs\": 0,\n",
|
||||
" # \"top_p\": 1.0\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(llm.invoke(\"Why should I use open source models?\"))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -18,16 +18,6 @@ pip install langchain-community boto3
|
||||
|
||||
### Bedrock Chat
|
||||
|
||||
See a [usage example](/docs/integrations/chat/bedrock).
|
||||
|
||||
```python
|
||||
from langchain_aws import ChatBedrock
|
||||
```
|
||||
|
||||
## LLMs
|
||||
|
||||
### Bedrock
|
||||
|
||||
>[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of
|
||||
> high-performing foundation models (FMs) from leading AI companies like `AI21 Labs`, `Anthropic`, `Cohere`,
|
||||
> `Meta`, `Stability AI`, and `Amazon` via a single API, along with a broad set of capabilities you need to
|
||||
@@ -38,6 +28,15 @@ from langchain_aws import ChatBedrock
|
||||
> serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy
|
||||
> generative AI capabilities into your applications using the AWS services you are already familiar with.
|
||||
|
||||
See a [usage example](/docs/integrations/chat/bedrock).
|
||||
|
||||
```python
|
||||
from langchain_aws import ChatBedrock
|
||||
```
|
||||
|
||||
## LLMs
|
||||
|
||||
### Bedrock
|
||||
|
||||
See a [usage example](/docs/integrations/llms/bedrock).
|
||||
|
||||
|
||||
@@ -312,7 +312,7 @@ from langchain.retrievers import AzureAISearchRetriever
|
||||
### Azure Container Apps dynamic sessions
|
||||
|
||||
We need to get the `POOL_MANAGEMENT_ENDPOINT` environment variable from the Azure Container Apps service.
|
||||
See the instructions [here](https://python.langchain.com/v0.2/docs/integrations/tools/azure_dynamic_sessions/#setup).
|
||||
See the instructions [here](/docs/integrations/tools/azure_dynamic_sessions/#setup).
|
||||
|
||||
We need to install a python package.
|
||||
|
||||
@@ -326,6 +326,19 @@ See a [usage example](/docs/integrations/tools/azure_dynamic_sessions).
|
||||
from langchain_azure_dynamic_sessions import SessionsPythonREPLTool
|
||||
```
|
||||
|
||||
### Bing Search
|
||||
|
||||
Follow the documentation [here](/docs/integrations/tools/bing_search) to get a detail explanations and instructions of this tool.
|
||||
|
||||
The environment variable `BING_SUBSCRIPTION_KEY` and `BING_SEARCH_URL` are required from Bing Search resource.
|
||||
|
||||
```bash
|
||||
from langchain_community.tools.bing_search import BingSearchResults
|
||||
from langchain_community.utilities import BingSearchAPIWrapper
|
||||
|
||||
api_wrapper = BingSearchAPIWrapper()
|
||||
tool = BingSearchResults(api_wrapper=api_wrapper)
|
||||
```
|
||||
|
||||
## Toolkits
|
||||
|
||||
|
||||
24
docs/docs/integrations/providers/ascend.mdx
Normal file
24
docs/docs/integrations/providers/ascend.mdx
Normal file
@@ -0,0 +1,24 @@
|
||||
# Ascend
|
||||
|
||||
>[Ascend](https://https://www.hiascend.com/) is Natural Process Unit provide by Huawei
|
||||
|
||||
This page covers how to use ascend NPU with LangChain.
|
||||
|
||||
### Installation
|
||||
|
||||
Install using torch-npu using:
|
||||
|
||||
```bash
|
||||
pip install torch-npu
|
||||
```
|
||||
|
||||
Please follow the installation instructions as specified below:
|
||||
* Install CANN as shown [here](https://www.hiascend.com/document/detail/zh/canncommercial/700/quickstart/quickstart/quickstart_18_0002.html).
|
||||
|
||||
### Embedding Models
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/ascend).
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import AscendEmbeddings
|
||||
```
|
||||
@@ -1,87 +1,111 @@
|
||||
Databricks
|
||||
==========
|
||||
|
||||
The [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform.
|
||||
> [Databricks](https://www.databricks.com/) Intelligence Platform is the world's first data intelligence platform powered by generative AI. Infuse AI into every facet of your business.
|
||||
|
||||
Databricks embraces the LangChain ecosystem in various ways:
|
||||
|
||||
1. Databricks connector for the SQLDatabase Chain: SQLDatabase.from_databricks() provides an easy way to query your data on Databricks through LangChain
|
||||
2. Databricks MLflow integrates with LangChain: Tracking and serving LangChain applications with fewer steps
|
||||
3. Databricks as an LLM provider: Deploy your fine-tuned LLMs on Databricks via serving endpoints or cluster driver proxy apps, and query it as langchain.llms.Databricks
|
||||
4. Databricks Dolly: Databricks open-sourced Dolly which allows for commercial use, and can be accessed through the Hugging Face Hub
|
||||
1. 🚀 **Model Serving** - Access state-of-the-art LLMs, such as DBRX, Llama3, Mixtral, or your fine-tuned models on [Databricks Model Serving](https://www.databricks.com/product/model-serving), via a highly available and low-latency inference endpoint. LangChain provides LLM (`Databricks`), Chat Model (`ChatDatabricks`), and Embeddings (`DatabricksEmbeddings`) implementations, streamlining the integration of your models hosted on Databricks Model Serving with your LangChain applications.
|
||||
2. 📃 **Vector Search** - [Databricks Vector Search](https://www.databricks.com/product/machine-learning/vector-search) is a serverless vector database seamlessly integrated within the Databricks Platform. Using `DatabricksVectorSearch`, you can incorporate the highly scalable and reliable similarity search engine into your LangChain applications.
|
||||
3. 📊 **MLflow** - [MLflow](https://mlflow.org/) is an open-source platform to manage full the ML lifecycle, including experiment management, evaluation, tracing, deployment, and more. [MLflow's LangChain Integration](/docs/integrations/providers/mlflow_tracking) streamlines the process of developing and operating modern compound ML systems.
|
||||
4. 🌐 **SQL Database** - [Databricks SQL](https://www.databricks.com/product/databricks-sql) is integrated with `SQLDatabase` in LangChain, allowing you to access the auto-optimizing, exceptionally performant data warehouse.
|
||||
5. 💡 **Open Models** - Databricks open sources models, such as [DBRX](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm), which are available through the [Hugging Face Hub](https://huggingface.co/databricks/dbrx-instruct). These models can be directly utilized with LangChain, leveraging its integration with the `transformers` library.
|
||||
|
||||
Databricks connector for the SQLDatabase Chain
|
||||
----------------------------------------------
|
||||
You can connect to [Databricks runtimes](https://docs.databricks.com/runtime/index.html) and [Databricks SQL](https://www.databricks.com/product/databricks-sql) using the SQLDatabase wrapper of LangChain.
|
||||
Chat Model
|
||||
----------
|
||||
|
||||
`ChatDatabricks` is a Chat Model class to access chat endpoints hosted on Databricks, including state-of-the-art models such as Llama3, Mixtral, and DBRX, as well as your own fine-tuned models.
|
||||
|
||||
Databricks MLflow integrates with LangChain
|
||||
-------------------------------------------
|
||||
```
|
||||
from langchain_community.chat_models.databricks import ChatDatabricks
|
||||
|
||||
MLflow is an open-source platform to manage the ML lifecycle, including experimentation, reproducibility, deployment, and a central model registry. See the notebook [MLflow Callback Handler](/docs/integrations/providers/mlflow_tracking) for details about MLflow's integration with LangChain.
|
||||
|
||||
Databricks provides a fully managed and hosted version of MLflow integrated with enterprise security features, high availability, and other Databricks workspace features such as experiment and run management and notebook revision capture. MLflow on Databricks offers an integrated experience for tracking and securing machine learning model training runs and running machine learning projects. See [MLflow guide](https://docs.databricks.com/mlflow/index.html) for more details.
|
||||
|
||||
Databricks MLflow makes it more convenient to develop LangChain applications on Databricks. For MLflow tracking, you don't need to set the tracking uri. For MLflow Model Serving, you can save LangChain Chains in the MLflow langchain flavor, and then register and serve the Chain with a few clicks on Databricks, with credentials securely managed by MLflow Model Serving.
|
||||
|
||||
Databricks External Models
|
||||
--------------------------
|
||||
|
||||
[Databricks External Models](https://docs.databricks.com/generative-ai/external-models/index.html) is a service that is designed to streamline the usage and management of various large language model (LLM) providers, such as OpenAI and Anthropic, within an organization. It offers a high-level interface that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM related requests. The following example creates an endpoint that serves OpenAI's GPT-4 model and generates a chat response from it:
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatDatabricks
|
||||
from langchain_core.messages import HumanMessage
|
||||
from mlflow.deployments import get_deploy_client
|
||||
|
||||
|
||||
client = get_deploy_client("databricks")
|
||||
name = f"chat"
|
||||
client.create_endpoint(
|
||||
name=name,
|
||||
config={
|
||||
"served_entities": [
|
||||
{
|
||||
"name": "test",
|
||||
"external_model": {
|
||||
"name": "gpt-4",
|
||||
"provider": "openai",
|
||||
"task": "llm/v1/chat",
|
||||
"openai_config": {
|
||||
"openai_api_key": "{{secrets/<scope>/<key>}}",
|
||||
},
|
||||
},
|
||||
}
|
||||
],
|
||||
},
|
||||
)
|
||||
chat = ChatDatabricks(endpoint=name, temperature=0.1)
|
||||
print(chat([HumanMessage(content="hello")]))
|
||||
# -> content='Hello! How can I assist you today?'
|
||||
chat_model = ChatDatabricks(endpoint="databricks-meta-llama-3-70b-instruct")
|
||||
```
|
||||
|
||||
Databricks Foundation Model APIs
|
||||
--------------------------------
|
||||
See the [usage example](/docs/integrations/chat/databricks) for more guidance on how to use it within your LangChain application.
|
||||
|
||||
[Databricks Foundation Model APIs](https://docs.databricks.com/machine-learning/foundation-models/index.html) allow you to access and query state-of-the-art open source models from dedicated serving endpoints. With Foundation Model APIs, developers can quickly and easily build applications that leverage a high-quality generative AI model without maintaining their own model deployment. The following example uses the `databricks-bge-large-en` endpoint to generate embeddings from text:
|
||||
LLM
|
||||
---
|
||||
|
||||
```python
|
||||
`Databricks` is an LLM class to access completion endpoints hosted on Databricks.
|
||||
|
||||
```
|
||||
from langchain_community.llm.databricks import Databricks
|
||||
|
||||
llm = Databricks(endpoint="your-completion-endpoint")
|
||||
```
|
||||
|
||||
See the [usage example](/docs/integrations/llms/databricks) for more guidance on how to use it within your LangChain application.
|
||||
|
||||
|
||||
Embeddings
|
||||
----------
|
||||
|
||||
`DatabricksEmbeddings` is an Embeddings class to access text-embedding endpoints hosted on Databricks, including state-of-the-art models such as BGE, as well as your own fine-tuned models.
|
||||
|
||||
```
|
||||
from langchain_community.embeddings import DatabricksEmbeddings
|
||||
|
||||
|
||||
embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
|
||||
print(embeddings.embed_query("hello")[:3])
|
||||
# -> [0.051055908203125, 0.007221221923828125, 0.003879547119140625, ...]
|
||||
```
|
||||
|
||||
Databricks as an LLM provider
|
||||
-----------------------------
|
||||
|
||||
The notebook [Wrap Databricks endpoints as LLMs](/docs/integrations/llms/databricks#wrapping-a-serving-endpoint-custom-model) demonstrates how to serve a custom model that has been registered by MLflow as a Databricks endpoint.
|
||||
It supports two types of endpoints: the serving endpoint, which is recommended for both production and development, and the cluster driver proxy app, which is recommended for interactive development.
|
||||
See the [usage example](/docs/integrations/text_embedding/databricks) for more guidance on how to use it within your LangChain application.
|
||||
|
||||
|
||||
Databricks Vector Search
|
||||
------------------------
|
||||
Vector Search
|
||||
-------------
|
||||
|
||||
Databricks Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. With Vector Search, you can create auto-updating vector search indexes from Delta tables managed by Unity Catalog and query them with a simple API to return the most similar vectors. See the notebook [Databricks Vector Search](/docs/integrations/vectorstores/databricks_vector_search) for instructions to use it with LangChain.
|
||||
Databricks Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. With Vector Search, you can create auto-updating vector search indexes from [Delta](https://docs.databricks.com/en/introduction/delta-comparison.html) tables managed by [Unity Catalog](https://www.databricks.com/product/unity-catalog) and query them with a simple API to return the most similar vectors.
|
||||
|
||||
```
|
||||
from langchain_community.vectorstores import DatabricksVectorSearch
|
||||
|
||||
dvs = DatabricksVectorSearch(
|
||||
index, text_column="text", embedding=embeddings, columns=["source"]
|
||||
)
|
||||
docs = dvs.similarity_search("What is vector search?)
|
||||
```
|
||||
|
||||
See the [usage example](/docs/integrations/vectorstores/databricks_vector_search) for how to set up vector indices and integrate them with LangChain.
|
||||
|
||||
|
||||
MLflow Integration
|
||||
------------------
|
||||
|
||||
In the context of LangChain integration, MLflow provides the following capabilities:
|
||||
|
||||
- **Experiment Tracking**: Tracks and stores models, artifacts, and traces from your LangChain experiments.
|
||||
- **Dependency Management**: Automatically records dependency libraries, ensuring consistency among development, staging, and production environments.
|
||||
- **Model Evaluation** Offers native capabilities for evaluating LangChain applications.
|
||||
- **Tracing**: Visually traces data flows through your LangChain application.
|
||||
|
||||
See [MLflow LangChain Integration](/docs/integrations/providers/mlflow_tracking) to learn about the full capabilities of using MLflow with LangChain through extensive code examples and guides.
|
||||
|
||||
SQLDatabase
|
||||
-----------
|
||||
You can connect to Databricks SQL using the SQLDatabase wrapper of LangChain.
|
||||
```
|
||||
from langchain.sql_database import SQLDatabase
|
||||
|
||||
db = SQLDatabase.from_databricks(catalog="samples", schema="nyctaxi")
|
||||
```
|
||||
|
||||
See [Databricks SQL Agent](https://docs.databricks.com/en/large-language-models/langchain.html#databricks-sql-agent) for how to connect Databricks SQL with your LangChain Agent as a powerful querying tool.
|
||||
|
||||
Open Models
|
||||
-----------
|
||||
|
||||
To directly integrate Databricks's open models hosted on HuggingFace, you can use the [HuggingFace Integration](/docs/integrations/platforms/huggingface) of LangChain.
|
||||
|
||||
```
|
||||
from langchain_huggingface import HuggingFaceEndpoint
|
||||
|
||||
llm = HuggingFaceEndpoint(
|
||||
repo_id="databricks/dbrx-instruct",
|
||||
task="text-generation",
|
||||
max_new_tokens=512,
|
||||
do_sample=False,
|
||||
repetition_penalty=1.03,
|
||||
)
|
||||
llm.invoke("What is DBRX model?")
|
||||
```
|
||||
|
||||
@@ -28,6 +28,16 @@ import os
|
||||
os.environ["WATSONX_APIKEY"] = "your IBM watsonx.ai api key"
|
||||
```
|
||||
|
||||
## Chat Model
|
||||
|
||||
### ChatWatsonx
|
||||
|
||||
See a [usage example](/docs/integrations/chat/ibm_watsonx).
|
||||
|
||||
```python
|
||||
from langchain_ibm import ChatWatsonx
|
||||
```
|
||||
|
||||
## LLMs
|
||||
|
||||
### WatsonxLLM
|
||||
|
||||
@@ -2,27 +2,29 @@
|
||||
|
||||
The `LangChain` integrations related to [Oracle Cloud Infrastructure](https://www.oracle.com/artificial-intelligence/).
|
||||
|
||||
## LLMs
|
||||
|
||||
### OCI Generative AI
|
||||
## OCI Generative AI
|
||||
> Oracle Cloud Infrastructure (OCI) [Generative AI](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm) is a fully managed service that provides a set of state-of-the-art,
|
||||
> customizable large language models (LLMs) that cover a wide range of use cases, and which are available through a single API.
|
||||
> Using the OCI Generative AI service you can access ready-to-use pretrained models, or create and host your own fine-tuned
|
||||
> custom models based on your own data on dedicated AI clusters.
|
||||
|
||||
To use, you should have the latest `oci` python SDK installed.
|
||||
To use, you should have the latest `oci` python SDK and the langchain_community package installed.
|
||||
|
||||
```bash
|
||||
pip install -U oci
|
||||
pip install -U oci langchain-community
|
||||
```
|
||||
|
||||
See [usage examples](/docs/integrations/llms/oci_generative_ai).
|
||||
See [chat](/docs/integrations/llms/oci_generative_ai), [complete](/docs/integrations/chat/oci_generative_ai), and [embedding](/docs/integrations/text_embedding/oci_generative_ai) usage examples.
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatOCIGenAI
|
||||
|
||||
from langchain_community.llms import OCIGenAI
|
||||
|
||||
from langchain_community.embeddings import OCIGenAIEmbeddings
|
||||
```
|
||||
|
||||
### OCI Data Science Model Deployment Endpoint
|
||||
## OCI Data Science Model Deployment Endpoint
|
||||
|
||||
> [OCI Data Science](https://docs.oracle.com/en-us/iaas/data-science/using/home.htm) is a
|
||||
> fully managed and serverless platform for data science teams. Using the OCI Data Science
|
||||
@@ -47,12 +49,3 @@ from langchain_community.llms import OCIModelDeploymentVLLM
|
||||
from langchain_community.llms import OCIModelDeploymentTGI
|
||||
```
|
||||
|
||||
## Text Embedding Models
|
||||
|
||||
### OCI Generative AI
|
||||
|
||||
See [usage examples](/docs/integrations/text_embedding/oci_generative_ai).
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import OCIGenAIEmbeddings
|
||||
```
|
||||
21
docs/docs/integrations/providers/pebblo/index.md
Normal file
21
docs/docs/integrations/providers/pebblo/index.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Pebblo
|
||||
|
||||
[Pebblo](https://www.daxa.ai/pebblo) enables developers to safely load and retrieve data to promote their Gen AI app to deployment without
|
||||
worrying about the organization’s compliance and security requirements. The Pebblo SafeLoader identifies semantic topics and entities found in the
|
||||
loaded data and the Pebblo SafeRetriever enforces identity and semantic controls on the retrieved context. The results are
|
||||
summarized on the UI or a PDF report.
|
||||
|
||||
|
||||
## Pebblo Overview:
|
||||
|
||||
`Pebblo` provides a safe way to load and retrieve data for Gen AI applications.
|
||||
It includes:
|
||||
1. **Identity-aware Safe Loader** that loads data and identifies semantic topics and entities.
|
||||
2. **SafeRetrieval** that enforces identity and semantic controls on the retrieved context.
|
||||
3. **User Data Report** that summarizes the data loaded and retrieved.
|
||||
|
||||
## Example Notebooks
|
||||
|
||||
For a more detailed examples of using Pebblo, see the following notebooks:
|
||||
* [PebbloSafeLoader](/docs/integrations/document_loaders/pebblo) shows how to use Pebblo loader to safely load data.
|
||||
* [PebbloRetrievalQA](/docs/integrations/providers/pebblo/pebblo_retrieval_qa) shows how to use Pebblo retrieval QA chain to safely retrieve data.
|
||||
@@ -0,0 +1,584 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3ce451e9-f8f1-4f27-8c6b-4a93a406d504",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Identity-enabled RAG using PebbloRetrievalQA\n",
|
||||
"\n",
|
||||
"> PebbloRetrievalQA is a Retrieval chain with Identity & Semantic Enforcement for question-answering\n",
|
||||
"against a vector database.\n",
|
||||
"\n",
|
||||
"This notebook covers how to retrieve documents using Identity & Semantic Enforcement (Deny Topics/Entities).\n",
|
||||
"For more details on Pebblo and its SafeRetriever feature visit [Pebblo documentation](https://daxa-ai.github.io/pebblo/retrieval_chain/)\n",
|
||||
"\n",
|
||||
"### Steps:\n",
|
||||
"\n",
|
||||
"1. **Loading Documents:**\n",
|
||||
"We will load documents with authorization and semantic metadata into an in-memory Qdrant vectorstore. This vectorstore will be used as a retriever in PebbloRetrievalQA. \n",
|
||||
"\n",
|
||||
"> **Note:** It is recommended to use [PebbloSafeLoader](https://daxa-ai.github.io/pebblo/rag) as the counterpart for loading documents with authentication and semantic metadata on the ingestion side. `PebbloSafeLoader` guarantees the secure and efficient loading of documents while maintaining the integrity of the metadata.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"2. **Testing Enforcement Mechanisms**:\n",
|
||||
" We will test Identity and Semantic Enforcement separately. For each use case, we will define a specific \"ask\" function with the required contexts (*auth_context* and *semantic_context*) and then pose our questions.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ee16b6b-5dac-4b5c-bb69-3ec87398a33c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Dependencies\n",
|
||||
"\n",
|
||||
"We'll use an OpenAI LLM, OpenAI embeddings and a Qdrant vector store in this walkthrough.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e68494fa-f387-4481-9a6c-58294865d7b7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain_core langchain-community langchain-openai qdrant_client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61498d51-0c38-40e2-adcd-19dfdf4d37ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Identity-aware Data Ingestion\n",
|
||||
"\n",
|
||||
"Here we are using Qdrant as a vector database; however, you can use any of the supported vector databases.\n",
|
||||
"\n",
|
||||
"**PebbloRetrievalQA chain supports the following vector databases:**\n",
|
||||
"- Qdrant\n",
|
||||
"- Pinecone\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"**Load vector database with authorization and semantic information in metadata:**\n",
|
||||
"\n",
|
||||
"In this step, we capture the authorization and semantic information of the source document into the `authorized_identities`, `pebblo_semantic_topics`, and `pebblo_semantic_entities` fields within the metadata of the VectorDB entry for each chunk. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"*NOTE: To use the PebbloRetrievalQA chain, you must always place authorization and semantic metadata in the specified fields. These fields must contain a list of strings.*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "ae4fcbc1-bdc3-40d2-b2df-8c82cad1f89c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Vectordb loaded.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores.qdrant import Qdrant\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_openai.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI()\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"collection_name = \"pebblo-identity-and-semantic-rag\"\n",
|
||||
"\n",
|
||||
"page_content = \"\"\"\n",
|
||||
"**ACME Corp Financial Report**\n",
|
||||
"\n",
|
||||
"**Overview:**\n",
|
||||
"ACME Corp, a leading player in the merger and acquisition industry, presents its financial report for the fiscal year ending December 31, 2020. \n",
|
||||
"Despite a challenging economic landscape, ACME Corp demonstrated robust performance and strategic growth.\n",
|
||||
"\n",
|
||||
"**Financial Highlights:**\n",
|
||||
"Revenue soared to $50 million, marking a 15% increase from the previous year, driven by successful deal closures and expansion into new markets. \n",
|
||||
"Net profit reached $12 million, showcasing a healthy margin of 24%.\n",
|
||||
"\n",
|
||||
"**Key Metrics:**\n",
|
||||
"Total assets surged to $80 million, reflecting a 20% growth, highlighting ACME Corp's strong financial position and asset base. \n",
|
||||
"Additionally, the company maintained a conservative debt-to-equity ratio of 0.5, ensuring sustainable financial stability.\n",
|
||||
"\n",
|
||||
"**Future Outlook:**\n",
|
||||
"ACME Corp remains optimistic about the future, with plans to capitalize on emerging opportunities in the global M&A landscape. \n",
|
||||
"The company is committed to delivering value to shareholders while maintaining ethical business practices.\n",
|
||||
"\n",
|
||||
"**Bank Account Details:**\n",
|
||||
"For inquiries or transactions, please refer to ACME Corp's US bank account:\n",
|
||||
"Account Number: 123456789012\n",
|
||||
"Bank Name: Fictitious Bank of America\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"documents = [\n",
|
||||
" Document(\n",
|
||||
" **{\n",
|
||||
" \"page_content\": page_content,\n",
|
||||
" \"metadata\": {\n",
|
||||
" \"pebblo_semantic_topics\": [\"financial-report\"],\n",
|
||||
" \"pebblo_semantic_entities\": [\"us-bank-account-number\"],\n",
|
||||
" \"authorized_identities\": [\"finance-team\", \"exec-leadership\"],\n",
|
||||
" \"page\": 0,\n",
|
||||
" \"source\": \"https://drive.google.com/file/d/xxxxxxxxxxxxx/view\",\n",
|
||||
" \"title\": \"ACME Corp Financial Report.pdf\",\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"vectordb = Qdrant.from_documents(\n",
|
||||
" documents,\n",
|
||||
" embeddings,\n",
|
||||
" location=\":memory:\",\n",
|
||||
" collection_name=collection_name,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Vectordb loaded.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f630bb8b-67ba-41f9-8715-76d006207e75",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieval with Identity Enforcement\n",
|
||||
"\n",
|
||||
"PebbloRetrievalQA chain uses a SafeRetrieval to enforce that the snippets used for in-context are retrieved only from the documents authorized for the user. \n",
|
||||
"To achieve this, the Gen-AI application needs to provide an authorization context for this retrieval chain. \n",
|
||||
"This *auth_context* should be filled with the identity and authorization groups of the user accessing the Gen-AI app.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Here is the sample code for the `PebbloRetrievalQA` with `user_auth`(List of user authorizations, which may include their User ID and \n",
|
||||
" the groups they are part of) from the user accessing the RAG application, passed in `auth_context`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e978bee6-3a8c-459f-ab82-d380d7499b36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chains import PebbloRetrievalQA\n",
|
||||
"from langchain_community.chains.pebblo_retrieval.models import AuthContext, ChainInput\n",
|
||||
"\n",
|
||||
"# Initialize PebbloRetrievalQA chain\n",
|
||||
"qa_chain = PebbloRetrievalQA.from_chain_type(\n",
|
||||
" llm=llm,\n",
|
||||
" retriever=vectordb.as_retriever(),\n",
|
||||
" app_name=\"pebblo-identity-rag\",\n",
|
||||
" description=\"Identity Enforcement app using PebbloRetrievalQA\",\n",
|
||||
" owner=\"ACME Corp\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def ask(question: str, auth_context: dict):\n",
|
||||
" \"\"\"\n",
|
||||
" Ask a question to the PebbloRetrievalQA chain\n",
|
||||
" \"\"\"\n",
|
||||
" auth_context_obj = AuthContext(**auth_context) if auth_context else None\n",
|
||||
" chain_input_obj = ChainInput(query=question, auth_context=auth_context_obj)\n",
|
||||
" return qa_chain.invoke(chain_input_obj.dict())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7a267e96-70cb-468f-b830-83b65e9b7f6f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1. Questions by Authorized User\n",
|
||||
"\n",
|
||||
"We ingested data for authorized identities `[\"finance-team\", \"exec-leadership\"]`, so a user with the authorized identity/group `finance-team` should receive the correct answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "2688fc18-1eac-45a5-be55-aabbe6b25af5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"\n",
|
||||
"Answer: \n",
|
||||
"Revenue: $50 million (15% increase from previous year)\n",
|
||||
"Net profit: $12 million (24% margin)\n",
|
||||
"Total assets: $80 million (20% growth)\n",
|
||||
"Debt-to-equity ratio: 0.5\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"auth = {\n",
|
||||
" \"user_id\": \"finance-user@acme.org\",\n",
|
||||
" \"user_auth\": [\n",
|
||||
" \"finance-team\",\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, auth)\n",
|
||||
"print(f\"Question: {question}\\n\\nAnswer: {resp['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4db6566-6562-4a49-b19c-6d99299b374e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2. Questions by Unauthorized User\n",
|
||||
"\n",
|
||||
"Since the user's authorized identity/group `eng-support` is not included in the authorized identities `[\"finance-team\", \"exec-leadership\"]`, we should not receive an answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "2d736ce3-6e05-48d3-a5e1-fb4e7cccc1ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"\n",
|
||||
"Answer: I don't know.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"auth = {\n",
|
||||
" \"user_id\": \"eng-user@acme.org\",\n",
|
||||
" \"user_auth\": [\n",
|
||||
" \"eng-support\",\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, auth)\n",
|
||||
"print(f\"Question: {question}\\n\\nAnswer: {resp['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "33a8afe1-3071-4118-9714-a17cba809ee4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 3. Using PromptTemplate to provide additional instructions\n",
|
||||
"You can use PromptTemplate to provide additional instructions to the LLM for generating a custom response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "59c055ba-fdd1-48c6-9bc9-2793eb47438d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt_template = PromptTemplate.from_template(\n",
|
||||
" \"\"\"\n",
|
||||
"Answer the question using the provided context. \n",
|
||||
"If no context is provided, just say \"I'm sorry, but that information is unavailable, or Access to it is restricted.\".\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"prompt = prompt_template.format(question=question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c4d27c00-73d9-4ce8-bc70-29535deaf0e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 3.1 Questions by Authorized User"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "e68a13a4-b735-421d-9655-2a9a087ba9e5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"\n",
|
||||
"Answer: \n",
|
||||
"Revenue soared to $50 million, marking a 15% increase from the previous year, and net profit reached $12 million, showcasing a healthy margin of 24%. Total assets also grew by 20% to $80 million, and the company maintained a conservative debt-to-equity ratio of 0.5.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"auth = {\n",
|
||||
" \"user_id\": \"finance-user@acme.org\",\n",
|
||||
" \"user_auth\": [\n",
|
||||
" \"finance-team\",\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"resp = ask(prompt, auth)\n",
|
||||
"print(f\"Question: {question}\\n\\nAnswer: {resp['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7b97a9ca-bdc6-400a-923d-65a8536658be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 3.2 Questions by Unauthorized Users"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "438e48c6-96a2-4d5e-81db-47f8c8f37739",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"\n",
|
||||
"Answer: \n",
|
||||
"I'm sorry, but that information is unavailable, or Access to it is restricted.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"auth = {\n",
|
||||
" \"user_id\": \"eng-user@acme.org\",\n",
|
||||
" \"user_auth\": [\n",
|
||||
" \"eng-support\",\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"resp = ask(prompt, auth)\n",
|
||||
"print(f\"Question: {question}\\n\\nAnswer: {resp['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4306cab3-d070-405f-a23b-5c6011a61c50",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieval with Semantic Enforcement"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c3757cf-832f-483e-aafe-cb09b5130ec0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The PebbloRetrievalQA chain uses SafeRetrieval to ensure that the snippets used in context are retrieved only from documents that comply with the\n",
|
||||
"provided semantic context.\n",
|
||||
"To achieve this, the Gen-AI application must provide a semantic context for this retrieval chain.\n",
|
||||
"This `semantic_context` should include the topics and entities that should be denied for the user accessing the Gen-AI app.\n",
|
||||
"\n",
|
||||
"Below is a sample code for PebbloRetrievalQA with `topics_to_deny` and `entities_to_deny`. These are passed in `semantic_context` to the chain input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "daf37bf7-9a16-4102-8893-5b698cae1b07",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain_community.chains import PebbloRetrievalQA\n",
|
||||
"from langchain_community.chains.pebblo_retrieval.models import (\n",
|
||||
" ChainInput,\n",
|
||||
" SemanticContext,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Initialize PebbloRetrievalQA chain\n",
|
||||
"qa_chain = PebbloRetrievalQA.from_chain_type(\n",
|
||||
" llm=llm,\n",
|
||||
" retriever=vectordb.as_retriever(),\n",
|
||||
" app_name=\"pebblo-semantic-rag\",\n",
|
||||
" description=\"Semantic Enforcement app using PebbloRetrievalQA\",\n",
|
||||
" owner=\"ACME Corp\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def ask(\n",
|
||||
" question: str,\n",
|
||||
" topics_to_deny: Optional[List[str]] = None,\n",
|
||||
" entities_to_deny: Optional[List[str]] = None,\n",
|
||||
"):\n",
|
||||
" \"\"\"\n",
|
||||
" Ask a question to the PebbloRetrievalQA chain\n",
|
||||
" \"\"\"\n",
|
||||
" semantic_context = dict()\n",
|
||||
" if topics_to_deny:\n",
|
||||
" semantic_context[\"pebblo_semantic_topics\"] = {\"deny\": topics_to_deny}\n",
|
||||
" if entities_to_deny:\n",
|
||||
" semantic_context[\"pebblo_semantic_entities\"] = {\"deny\": entities_to_deny}\n",
|
||||
"\n",
|
||||
" semantic_context_obj = (\n",
|
||||
" SemanticContext(**semantic_context) if semantic_context else None\n",
|
||||
" )\n",
|
||||
" chain_input_obj = ChainInput(query=question, semantic_context=semantic_context_obj)\n",
|
||||
" return qa_chain.invoke(chain_input_obj.dict())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9718819b-f5cd-4212-9947-d18cd507c8b7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1. Without semantic enforcement\n",
|
||||
"\n",
|
||||
"Since no semantic enforcement is applied, the system should return the answer without excluding any context due to the semantic labels associated with the context.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "69158be1-f223-4d14-b61f-f4afdf5af526",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Topics to deny: []\n",
|
||||
"Entities to deny: []\n",
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"Answer: \n",
|
||||
"Revenue for ACME Corp increased by 15% to $50 million in 2020, with a net profit of $12 million and a strong asset base of $80 million. The company also maintained a conservative debt-to-equity ratio of 0.5.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic_to_deny = []\n",
|
||||
"entities_to_deny = []\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, topics_to_deny=topic_to_deny, entities_to_deny=entities_to_deny)\n",
|
||||
"print(\n",
|
||||
" f\"Topics to deny: {topic_to_deny}\\nEntities to deny: {entities_to_deny}\\n\"\n",
|
||||
" f\"Question: {question}\\nAnswer: {resp['result']}\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8789c58-0d64-404e-bc09-92f6952022ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2. Deny financial-report topic\n",
|
||||
"\n",
|
||||
"Data has been ingested with the topics: `[\"financial-report\"]`.\n",
|
||||
"Therefore, an app that denies the `financial-report` topic should not receive an answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "9b17b2fc-eefb-4229-a41e-2f943d2eb48e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Topics to deny: ['financial-report']\n",
|
||||
"Entities to deny: []\n",
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"Answer: \n",
|
||||
"\n",
|
||||
"Unfortunately, I do not have access to the financial performance of ACME Corp for the year 2020.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic_to_deny = [\"financial-report\"]\n",
|
||||
"entities_to_deny = []\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, topics_to_deny=topic_to_deny, entities_to_deny=entities_to_deny)\n",
|
||||
"print(\n",
|
||||
" f\"Topics to deny: {topic_to_deny}\\nEntities to deny: {entities_to_deny}\\n\"\n",
|
||||
" f\"Question: {question}\\nAnswer: {resp['result']}\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "894f21b0-2913-4ef6-b5ed-cbca8f74214d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 3. Deny us-bank-account-number entity\n",
|
||||
"Since the entity `us-bank-account-number` is denied, the system should not return the answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "2b8abce3-7af3-437f-8999-2866a4b9beda",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Topics to deny: []\n",
|
||||
"Entities to deny: ['us-bank-account-number']\n",
|
||||
"Question: Share the financial performance of ACME Corp for the year 2020\n",
|
||||
"Answer: I don't have information about ACME Corp's financial performance for 2020.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"topic_to_deny = []\n",
|
||||
"entities_to_deny = [\"us-bank-account-number\"]\n",
|
||||
"question = \"Share the financial performance of ACME Corp for the year 2020\"\n",
|
||||
"resp = ask(question, topics_to_deny=topic_to_deny, entities_to_deny=entities_to_deny)\n",
|
||||
"print(\n",
|
||||
" f\"Topics to deny: {topic_to_deny}\\nEntities to deny: {entities_to_deny}\\n\"\n",
|
||||
" f\"Question: {question}\\nAnswer: {resp['result']}\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
183
docs/docs/integrations/text_embedding/ascend.ipynb
Normal file
183
docs/docs/integrations/text_embedding/ascend.ipynb
Normal file
@@ -0,0 +1,183 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a636f6f3-00d7-4248-8c36-3da51190e882",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.04053403 -0.05560051 -0.04385472 ... 0.09371872 0.02846981\n",
|
||||
" -0.00576814]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import AscendEmbeddings\n",
|
||||
"\n",
|
||||
"model = AscendEmbeddings(\n",
|
||||
" model_path=\"/root/.cache/modelscope/hub/yangjhchs/acge_text_embedding\",\n",
|
||||
" device_id=0,\n",
|
||||
" query_instruction=\"Represend this sentence for searching relevant passages: \",\n",
|
||||
")\n",
|
||||
"emb = model.embed_query(\"hellow\")\n",
|
||||
"print(emb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8d29ddaa-eef3-4a4e-93d8-0f1c13525fb4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See https://huggingface.co/docs/transformers/troubleshooting#incorrect-output-when-padding-tokens-arent-masked.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[-0.00348254 0.03098977 -0.00203087 ... 0.08492374 0.03970494\n",
|
||||
" -0.03372753]\n",
|
||||
" [-0.02198593 -0.01601127 0.00215684 ... 0.06065163 0.00126425\n",
|
||||
" -0.03634358]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"doc_embs = model.embed_documents(\n",
|
||||
" [\"This is a content of the document\", \"This is another document\"]\n",
|
||||
")\n",
|
||||
"print(doc_embs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "797a720d-c478-4254-be2c-975bc4529f57",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<coroutine object Embeddings.aembed_query at 0x7f9fac699cb0>"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.aembed_query(\"hellow\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "57e62e53-4d2c-4532-9b77-a46bc3da1130",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"array([-0.04053403, -0.05560051, -0.04385472, ..., 0.09371872,\n",
|
||||
" 0.02846981, -0.00576814], dtype=float32)"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await model.aembed_query(\"hellow\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7e260457-8b50-4ca3-8f76-8a76d8bba8c8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<coroutine object Embeddings.aembed_documents at 0x7fa093ff1a80>"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.aembed_documents(\n",
|
||||
" [\"This is a content of the document\", \"This is another document\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ce954b94-aaac-4d2c-80be-b2988c16af6d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"array([[-0.00348254, 0.03098977, -0.00203087, ..., 0.08492374,\n",
|
||||
" 0.03970494, -0.03372753],\n",
|
||||
" [-0.02198593, -0.01601127, 0.00215684, ..., 0.06065163,\n",
|
||||
" 0.00126425, -0.03634358]], dtype=float32)"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await model.aembed_documents(\n",
|
||||
" [\"This is a content of the document\", \"This is another document\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7823d69d-de79-4f95-90dd-38f4bdeb9bcc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.14"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -20,6 +20,16 @@
|
||||
"Let's load the Azure OpenAI Embedding class with environment variables set to indicate to use Azure endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "228faf0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain_openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -180,9 +190,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -194,12 +204,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "7377c2ccc78bc62c2683122d48c8cd1fb85a53850a1b1fc29736ed39852c9885"
|
||||
}
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
199
docs/docs/integrations/text_embedding/databricks.ipynb
Normal file
199
docs/docs/integrations/text_embedding/databricks.ipynb
Normal file
@@ -0,0 +1,199 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Databricks\n",
|
||||
"\n",
|
||||
"> [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with Databricks [embedding models](/docs/concepts/#embedding-models). For detailed documentation of all DatabricksEmbeddings features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.databricks.DatabricksEmbeddings.html).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"`DatabricksEmbeddings` class wraps an embedding model endpoint hosted on [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html). This example notebook shows how to wrap your serving endpoint and use it as a embedding model in your LangChain application.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Supported Methods\n",
|
||||
"\n",
|
||||
"`DatabricksEmbeddings` supports all methods of `Embeddings` class including async APIs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Endpoint Requirement\n",
|
||||
"\n",
|
||||
"The serving endpoint `DatabricksEmbeddings` wraps must have OpenAI-compatible embedding input/output format ([reference](https://mlflow.org/docs/latest/llms/deployments/index.html#embeddings)). As long as the input format is compatible, `DatabricksEmbeddings` can be used for any endpoint type hosted on [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html):\n",
|
||||
"\n",
|
||||
"1. Foundation Models - Curated list of state-of-the-art foundation models such as BAAI General Embedding (BGE). These endpoint are ready to use in your Databricks workspace without any set up.\n",
|
||||
"2. Custom Models - You can also deploy custom embedding models to a serving endpoint via MLflow with\n",
|
||||
"your choice of framework such as LangChain, Pytorch, Transformers, etc.\n",
|
||||
"3. External Models - Databricks endpoints can serve models that are hosted outside Databricks as a proxy, such as proprietary model service like OpenAI text-embedding-3.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Databricks models you'll need to create a Databricks account, set up credentials (only if you are outside Databricks workspace), and install required packages.\n",
|
||||
"\n",
|
||||
"### Credentials (only if you are outside Databricks)\n",
|
||||
"\n",
|
||||
"If you are running LangChain app inside Databricks, you can skip this step.\n",
|
||||
"\n",
|
||||
"Otherwise, you need manually set the Databricks workspace hostname and personal access token to `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables, respectively. See [Authentication Documentation](https://docs.databricks.com/en/dev-tools/auth/index.html#databricks-personal-access-tokens) for how to get an access token."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n",
|
||||
"os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\"Enter your Databricks access token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Databricks integration lives in the `langchain-community` package. Also, `mlflow >= 2.9 ` is required to run the code in this notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community mlflow>=2.9.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We first demonstrates how to query BGE model hosted as Foundation Models endpoint with `DatabricksEmbeddings`.\n",
|
||||
"\n",
|
||||
"For other type of endpoints, there are some difference in how to set up the endpoint itself, however, once the endpoint is ready, there is no difference in how to query it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import DatabricksEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = DatabricksEmbeddings(\n",
|
||||
" endpoint=\"databricks-bge-large-en\",\n",
|
||||
" # Specify parameters for embedding queries and documents if needed\n",
|
||||
" # query_params={...},\n",
|
||||
" # document_params={...},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embed single text"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.051055908203125, 0.007221221923828125, 0.003879547119140625]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings.embed_query(\"hello\")[:3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embed documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"documents = [\"This is a dummy document.\", \"This is another dummy document.\"]\n",
|
||||
"response = embeddings.embed_documents(documents)\n",
|
||||
"print([e[:3] for e in response]) # Show first 3 elements of each embedding"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping Other Types of Endpoints\n",
|
||||
"\n",
|
||||
"The example above uses an embedding model hosted as a Foundation Models API. To learn about how to use the other endpoint types, please refer to the documentation for `ChatDatabricks`. While the model type is different, required steps are the same.\n",
|
||||
"\n",
|
||||
"* [Custom Model Endpoint](https://python.langchain.com/v0.2/docs/integrations/chat/databricks/#wrapping-custom-model-endpoint)\n",
|
||||
"* [External Models](https://python.langchain.com/v0.2/docs/integrations/chat/databricks/#wrapping-external-models)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatDatabricks features and configurations head to the API reference: https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.databricks.DatabricksEmbeddings.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -5,9 +5,15 @@
|
||||
"id": "1c0cf975",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Jina\n",
|
||||
"\n",
|
||||
"Let's load the Jina Embedding class."
|
||||
"# Jina"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "da922b13-eaa8-4cdc-98dd-cf8f3d2e6ffa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Install requirements"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -20,6 +26,14 @@
|
||||
"pip install -U langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7911b286-130d-4971-b77c-7c7a077115b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Import libraries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -34,6 +48,14 @@
|
||||
"from PIL import Image"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "59aa1c02-1216-43eb-8473-8e0468f0ddb6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embed text and queries with Jina embedding models through JinaAI API"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -43,9 +65,7 @@
|
||||
"source": [
|
||||
"text_embeddings = JinaEmbeddings(\n",
|
||||
" jina_api_key=\"jina_*\", model_name=\"jina-embeddings-v2-base-en\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"image_embeddings = JinaEmbeddings(jina_api_key=\"jina_*\", model_name=\"jina-clip-v1\")"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -55,15 +75,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\"\n",
|
||||
"\n",
|
||||
"image = \"https://avatars.githubusercontent.com/u/126733545?v=4\"\n",
|
||||
"\n",
|
||||
"description = \"Logo of a parrot and a chain on green background\"\n",
|
||||
"\n",
|
||||
"im = Image.open(requests.get(image, stream=True).raw)\n",
|
||||
"print(\"Image:\")\n",
|
||||
"display(im)"
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -106,6 +118,40 @@
|
||||
"print(doc_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "338ea747-040e-4ed4-8ddf-9b2285987885",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embed images and queries with Jina CLIP through JinaAI API"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "575b5867-59fb-4fd1-911b-afee2eaca088",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"multimodal_embeddings = JinaEmbeddings(jina_api_key=\"jina_*\", model_name=\"jina-clip-v1\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a9b335f5-fa99-4931-95f6-7b187c0e2f30",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image = \"https://avatars.githubusercontent.com/u/126733545?v=4\"\n",
|
||||
"\n",
|
||||
"description = \"Logo of a parrot and a chain on green background\"\n",
|
||||
"\n",
|
||||
"im = Image.open(requests.get(image, stream=True).raw)\n",
|
||||
"print(\"Image:\")\n",
|
||||
"display(im)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -113,7 +159,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_result = image_embeddings.embed_images([image])"
|
||||
"image_result = multimodal_embeddings.embed_images([image])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -133,7 +179,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"description_result = image_embeddings.embed_documents([description])"
|
||||
"description_result = multimodal_embeddings.embed_documents([description])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -167,14 +213,6 @@
|
||||
"source": [
|
||||
"print(cosine_similarity)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7f280807-a02b-4d4e-8ebd-01be33117999",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -193,7 +231,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
"version": "3.9.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,12 +7,14 @@
|
||||
"source": [
|
||||
"# OVHcloud\n",
|
||||
"\n",
|
||||
"> In order to use this model you need to create a new token on the AI Endpoints website: https://endpoints.ai.cloud.ovh.net/.\n",
|
||||
"\n",
|
||||
"This notebook explains how to use OVHCloudEmbeddings, which is included in the langchain_community package, to embed texts in langchain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "3da0fce0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -35,6 +37,20 @@
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Embedding generated by OVHCloudEmbeddings: {embed}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "47c9af05-4d25-40f2-9305-7bccf1e14c64",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Further reading\n",
|
||||
"- [Enhance your applications with AI Endpoints](https://blog.ovhcloud.com/enhance-your-applications-with-ai-endpoints/)\n",
|
||||
"- [How to use AI Endpoints and LangChain4j](https://blog.ovhcloud.com/how-to-use-ai-endpoints-and-langchain4j/)\n",
|
||||
"- [LLMs streaming with AI Endpoints and LangChain4j](https://blog.ovhcloud.com/llms-streaming-with-ai-endpoints-and-langchain4j/)\n",
|
||||
"- [How to use AI Endpoints and LangChain to create a chatbot](https://blog.ovhcloud.com/how-to-use-ai-endpoints-and-langchain-to-create-a-chatbot/)\n",
|
||||
"- [How to use AI Endpoints, LangChain and Javascript to create a chatbot](https://blog.ovhcloud.com/how-to-use-ai-endpoints-langchain-and-javascript-to-create-a-chatbot/)\n",
|
||||
"- [RAG chatbot using AI Endpoints and LangChain](https://blog.ovhcloud.com/rag-chatbot-using-ai-endpoints-and-langchain/)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -43,12 +43,14 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"sambastudio_base_url = \"<Your SambaStudio environment URL>\"\n",
|
||||
"sambastudio_base_uri = \"<Your SambaStudio environment URI>\"\n",
|
||||
"sambastudio_project_id = \"<Your SambaStudio project id>\"\n",
|
||||
"sambastudio_endpoint_id = \"<Your SambaStudio endpoint id>\"\n",
|
||||
"sambastudio_api_key = \"<Your SambaStudio endpoint API key>\"\n",
|
||||
"\n",
|
||||
"# Set the environment variables\n",
|
||||
"os.environ[\"SAMBASTUDIO_EMBEDDINGS_BASE_URL\"] = sambastudio_base_url\n",
|
||||
"os.environ[\"SAMBASTUDIO_EMBEDDINGS_BASE_URI\"] = sambastudio_base_uri\n",
|
||||
"os.environ[\"SAMBASTUDIO_EMBEDDINGS_PROJECT_ID\"] = sambastudio_project_id\n",
|
||||
"os.environ[\"SAMBASTUDIO_EMBEDDINGS_ENDPOINT_ID\"] = sambastudio_endpoint_id\n",
|
||||
"os.environ[\"SAMBASTUDIO_EMBEDDINGS_API_KEY\"] = sambastudio_api_key"
|
||||
@@ -79,6 +81,50 @@
|
||||
"results = embeddings.embed_documents(texts)\n",
|
||||
"print(results)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can manually pass the endpoint parameters and manually set the batch size you have in your SambaStudio embeddings endpoint"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = SambaStudioEmbeddings(\n",
|
||||
" sambastudio_embeddings_base_url=sambastudio_base_url,\n",
|
||||
" sambastudio_embeddings_base_uri=sambastudio_base_uri,\n",
|
||||
" sambastudio_embeddings_project_id=sambastudio_project_id,\n",
|
||||
" sambastudio_embeddings_endpoint_id=sambastudio_endpoint_id,\n",
|
||||
" sambastudio_embeddings_api_key=sambastudio_api_key,\n",
|
||||
" batch_size=32,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Or You can use an embedding model expert included in your deployed CoE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = SambaStudioEmbeddings(\n",
|
||||
" batch_size=1,\n",
|
||||
" model_kwargs={\n",
|
||||
" \"select_expert\": \"e5-mistral-7b-instruct\",\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# Bing Search\n",
|
||||
"\n",
|
||||
"[Microsoft Bing](https://www.bing.com/), commonly referred to as `Bing` or `Bing Search`, is a web search engine owned and operated by `Microsoft`."
|
||||
"> [Bing Search](https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/) is an Azure service and enables safe, ad-free, location-aware search results, surfacing relevant information from billions of web documents. Help your users find what they're looking for from the world-wide-web by harnessing Bing's ability to comb billions of webpages, images, videos, and news with a single API call. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,7 +30,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -186,7 +186,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -195,7 +195,7 @@
|
||||
"BingSearchResults(api_wrapper=BingSearchAPIWrapper(bing_subscription_key='<your subscription key>', bing_search_url='https://api.bing.microsoft.com/v7.0/search', k=10, search_kwargs={}))"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -213,24 +213,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'snippet': 'This chart shows the 14 day <b>weather</b> trend for 33.74°N 89.59°E with daily <b>weather</b> symbols, minimum and maximum temperatures, precipitation amount and probability.. The deviance is coloured within the temperature graph. The stronger the ups and downs, the more uncertain the forecast will be.', 'title': '14 Day Weather 33.74°N 89.59°E - meteoblue', 'link': 'https://www.meteoblue.com/en/weather/14-days/33.739N89.594E6216_Asia%2FShanghai'}\n",
|
||||
"{'snippet': 'Get the monthly <b>weather</b> forecast for Huangpu District, <b>Shanghai</b>, China, including daily high/low, historical averages, to help you plan ahead.', 'title': 'Huangpu District, Shanghai, China Monthly Weather | AccuWeather', 'link': 'https://www.accuweather.com/en/cn/huangpu-district/60782/june-weather/60782'}\n",
|
||||
"{'snippet': '<b>Shanghai</b> Hongqiao Airport is 60 miles from 31°31'42.7"N, 120°24'12.7"E, so the actual climate in 31°31'42.7"N, 120°24'12.7"E can vary a bit. Based on <b>weather</b> reports collected during 1992–2021. Showing: All Year January February March April May June July August September October November December', 'title': 'Climate & Weather Averages in 31°31'42.7"N, 120°24'12.7"E, China', 'link': 'https://www.timeanddate.com/weather/@31.52853,120.40355/climate'}\n",
|
||||
"{'snippet': 'Air Quality gives information using <b>weather</b> conditions, pollutants, and research from The <b>Weather</b> Channel and <b>weather</b>.com ... Today's Air Quality-<b>Shanghai</b>, People's Republic of China. 76.', 'title': 'Shanghai, People's Republic of China Weather', 'link': 'https://weather.com/forecast/air-quality/l/80415bb74d7ded500f89b569c51da53325719ddea6e1394485ad846e812e61d2'}\n"
|
||||
"{'snippet': '<b>Shanghai</b>, <b>Shanghai</b>, China <b>Weather</b> Forecast, with current conditions, wind, air quality, and what to expect for the next 3 days.', 'title': 'Shanghai, Shanghai, China Weather Forecast | AccuWeather', 'link': 'https://www.accuweather.com/en/cn/shanghai/106577/weather-forecast/106577'}\n",
|
||||
"{'snippet': 'Current <b>weather</b> <b>in Shanghai</b> and forecast for today, tomorrow, and next 14 days', 'title': 'Weather for Shanghai, Shanghai Municipality, China - timeanddate.com', 'link': 'https://www.timeanddate.com/weather/china/shanghai'}\n",
|
||||
"{'snippet': '<b>Shanghai</b> 14 Day Extended Forecast. <b>Weather</b> Today <b>Weather</b> Hourly 14 Day Forecast Yesterday/Past <b>Weather</b> Climate (Averages) Currently: 73 °F. Rain showers. Partly sunny. (<b>Weather</b> station: <b>Shanghai</b> Hongqiao Airport, China). See more current <b>weather</b>.', 'title': 'Shanghai, Shanghai Municipality, China 14 day weather forecast', 'link': 'https://www.timeanddate.com/weather/china/shanghai/ext'}\n",
|
||||
"{'snippet': '<b>Shanghai</b> - <b>Weather</b> warnings issued 14-day forecast. <b>Weather</b> warnings issued. Forecast - <b>Shanghai</b>. Day by day forecast. Last updated today at 18:00. Tonight, A clear sky and a gentle breeze. Clear Sky.', 'title': 'Shanghai - BBC Weather', 'link': 'https://www.bbc.com/weather/1796236'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"# .invoke wraps utility.results\n",
|
||||
"response = tool.invoke(\"What is the weather in Shanghai\")\n",
|
||||
"for item in list(response):\n",
|
||||
"response = tool.invoke(\"What is the weather in Shanghai?\")\n",
|
||||
"response = json.loads(response.replace(\"'\", '\"'))\n",
|
||||
"for item in response:\n",
|
||||
" print(item)"
|
||||
]
|
||||
},
|
||||
@@ -255,7 +258,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -269,7 +272,7 @@
|
||||
"Invoking: `bing_search_results_json` with `{'query': 'latest burning man floods'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m[{'snippet': 'Some festivalgoers have shared stories of their successful 6-mile hikes away from <b>Burning</b> <b>Man</b>. The worst of the rain Sunday is expected between 12 p.m. and 4 p.m. local time (3 p.m. to 7 p.m. ET ...', 'title': 'Live updates: Burning Man festival rain strands thousands in ... - CNN', 'link': 'https://www.cnn.com/us/live-news/nevada-desert-burning-man-weather-rain-09-03-23/index.html'}, {'snippet': 'Black Rock Forest, where around 70,000 <b>Burning</b> <b>Man</b> attendees are gathered for the festival, is in the northwest. "Flash <b>flooding</b> caused by excessive rainfall" is possible in parts of eastern ...', 'title': 'Burning Man flooding keeps thousands stranded at Nevada site as ...', 'link': 'https://www.nbcnews.com/news/us-news/live-blog/live-updates-burning-man-flooding-keeps-thousands-stranded-nevada-site-rcna103193'}, {'snippet': 'Thousands of <b>Burning</b> <b>Man</b> attendees finally made their mass exodus after intense rain over the weekend flooded camp sites and filled them with thick, ankle-deep mud – stranding more than 70,000 ...', 'title': 'Burning Man attendees make a mass exodus after a dramatic weekend ... - CNN', 'link': 'https://www.cnn.com/2023/09/05/us/burning-man-storms-shelter-exodus-tuesday/index.html'}, {'snippet': 'You’re going to get stuck,” hosts on <b>Burning</b> <b>Man</b> Information Radio, broadcasting from within the event, told festivalgoers early on Sept. 4. According to NBC News, the Pershing County Sheriff ...', 'title': 'Burning Man attendees make mass exodus after being stranded in ... - TODAY', 'link': 'https://www.today.com/news/what-is-burning-man-flood-death-rcna103231'}]\u001b[0m\u001b[32;1m\u001b[1;3mThe latest Burning Man festival experienced heavy rain and flooding, which resulted in thousands of festivalgoers being stranded. Some attendees had to hike for miles to safety. The rain caused flash flooding in parts of the festival site, including the Black Rock Forest where the event takes place. Campsites were flooded, and thick mud made movement difficult. Eventually, after the intense rain over the weekend, attendees were able to make a mass exodus from the festival. The Pershing County Sheriff's Office warned festivalgoers about the flooding and encouraged them to leave for safety.\u001b[0m\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m[{'snippet': 'Live Updates. Thousands stranded at <b>Burning</b> <b>Man</b> festival after heavy rains. By Maureen Chowdhury, Steve Almasyand Matt Meyer, CNN. Updated 9:00 PM EDT, Sun September 3, 2023. Link Copied!', 'title': 'Thousands stranded at Burning Man festival after heavy rains', 'link': 'https://www.cnn.com/us/live-news/nevada-desert-burning-man-weather-rain-09-03-23/index.html'}, {'snippet': 'Black Rock Forest, where around 70,000 <b>Burning</b> <b>Man</b> attendees are gathered for the festival, is in the northwest. "Flash <b>flooding</b> caused by excessive rainfall" is possible in parts of eastern ...', 'title': 'Burning Man flooding keeps thousands stranded at Nevada site as ...', 'link': 'https://www.nbcnews.com/news/us-news/live-blog/live-updates-burning-man-flooding-keeps-thousands-stranded-nevada-site-rcna103193'}, {'snippet': 'Thousands of <b>Burning</b> <b>Man</b> attendees finally made their mass exodus after intense rain over the weekend flooded camp sites and filled them with thick, ankle-deep mud – stranding more than 70,000 ...', 'title': 'Burning Man attendees make a mass exodus after a dramatic weekend ... - CNN', 'link': 'https://www.cnn.com/2023/09/05/us/burning-man-storms-shelter-exodus-tuesday/index.html'}, {'snippet': 'FILE - In this satellite photo provided by Maxar Technologies, an overview of <b>Burning</b> <b>Man</b> festival in Black Rock, Nev on Monday, Aug. 28, 2023. Authorities in Nevada were investigating a death at the site of the <b>Burning</b> <b>Man</b> festival where thousands of attendees remained stranded as <b>flooding</b> from storms swept through the Nevada desert.', 'title': 'Wait times to exit Burning Man drop after flooding left tens of ...', 'link': 'https://apnews.com/article/burning-man-flooding-nevada-stranded-0726190c9f8378935e2a3cce7f154785'}]\u001b[0m\u001b[32;1m\u001b[1;3mIn the latest Burning Man festival, heavy rains caused flooding and resulted in thousands of attendees being stranded. The festival took place in Black Rock Forest, Nevada, and around 70,000 people were gathered for the event. The excessive rainfall led to flash flooding in some parts of the area. As a result, camp sites were filled with ankle-deep mud, making it difficult for people to leave. Authorities were investigating a death at the festival site, which was affected by the flooding. However, in the following days, thousands of Burning Man attendees were able to make a mass exodus after the rain subsided.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -278,10 +281,10 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What happened in the latest burning man floods?',\n",
|
||||
" 'output': \"The latest Burning Man festival experienced heavy rain and flooding, which resulted in thousands of festivalgoers being stranded. Some attendees had to hike for miles to safety. The rain caused flash flooding in parts of the festival site, including the Black Rock Forest where the event takes place. Campsites were flooded, and thick mud made movement difficult. Eventually, after the intense rain over the weekend, attendees were able to make a mass exodus from the festival. The Pershing County Sheriff's Office warned festivalgoers about the flooding and encouraged them to leave for safety.\"}"
|
||||
" 'output': 'In the latest Burning Man festival, heavy rains caused flooding and resulted in thousands of attendees being stranded. The festival took place in Black Rock Forest, Nevada, and around 70,000 people were gathered for the event. The excessive rainfall led to flash flooding in some parts of the area. As a result, camp sites were filled with ankle-deep mud, making it difficult for people to leave. Authorities were investigating a death at the festival site, which was affected by the flooding. However, in the following days, thousands of Burning Man attendees were able to make a mass exodus after the rain subsided.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -291,7 +294,7 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain import hub\n",
|
||||
"from langchain.agents import AgentExecutor, create_openai_functions_agent\n",
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
|
||||
"from langchain_openai import AzureChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
@@ -310,7 +313,7 @@
|
||||
")\n",
|
||||
"tool = BingSearchResults(api_wrapper=api_wrapper)\n",
|
||||
"tools = [tool]\n",
|
||||
"agent = create_openai_functions_agent(llm, tools, prompt)\n",
|
||||
"agent = create_tool_calling_agent(llm, tools, prompt)\n",
|
||||
"agent_executor = AgentExecutor(\n",
|
||||
" agent=agent,\n",
|
||||
" tools=tools,\n",
|
||||
|
||||
@@ -23,12 +23,12 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet wikibase-rest-api-client mediawikiapi"
|
||||
"%pip install --upgrade --quiet \"wikibase-rest-api-client<0.2\" mediawikiapi"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 2,
|
||||
"id": "955988a1-ebc2-4c9a-9298-c493fe842de1",
|
||||
"metadata": {
|
||||
"execution": {
|
||||
@@ -39,26 +39,6 @@
|
||||
"shell.execute_reply.started": "2024-03-06T22:55:15.973114Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools.wikidata.tool import WikidataAPIWrapper, WikidataQueryRun\n",
|
||||
"\n",
|
||||
"wikidata = WikidataQueryRun(api_wrapper=WikidataAPIWrapper())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9926a8a7-3e4e-4a97-ba43-7e5a274b9561",
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-03-06T22:54:38.551998Z",
|
||||
"iopub.status.busy": "2024-03-06T22:54:38.551266Z",
|
||||
"iopub.status.idle": "2024-03-06T22:54:51.913177Z",
|
||||
"shell.execute_reply": "2024-03-06T22:54:51.911636Z",
|
||||
"shell.execute_reply.started": "2024-03-06T22:54:38.551955Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -77,7 +57,7 @@
|
||||
"sport: athletics\n",
|
||||
"place of birth: Maida Vale, Warrington Lodge\n",
|
||||
"educated at: King's College, Princeton University, Sherborne School, Hazlehurst Community Primary School\n",
|
||||
"employer: Victoria University of Manchester, Government Communications Headquarters, University of Cambridge, National Physical Laboratory\n",
|
||||
"employer: Victoria University of Manchester, Government Communications Headquarters, University of Cambridge, National Physical Laboratory (United Kingdom)\n",
|
||||
"place of death: Wilmslow\n",
|
||||
"field of work: cryptanalysis, computer science, mathematics, logic, cryptography\n",
|
||||
"cause of death: cyanide poisoning\n",
|
||||
@@ -98,13 +78,17 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.tools.wikidata.tool import WikidataAPIWrapper, WikidataQueryRun\n",
|
||||
"\n",
|
||||
"wikidata = WikidataQueryRun(api_wrapper=WikidataAPIWrapper())\n",
|
||||
"\n",
|
||||
"print(wikidata.run(\"Alan Turing\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2762aa55-92bd-4e50-b433-8c5c37da465f",
|
||||
"id": "7188d62f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -126,7 +110,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
178
docs/docs/integrations/tools/zenguard.ipynb
Normal file
178
docs/docs/integrations/tools/zenguard.ipynb
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ZenGuard AI\n",
|
||||
"\n",
|
||||
"<a href=\"https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/integrations/tools/zenguard.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\" /></a>\n",
|
||||
"\n",
|
||||
"This tool lets you quickly set up [ZenGuard AI](https://www.zenguard.ai/) in your Langchain-powered application. The ZenGuard AI provides ultrafast guardrails to protect your GenAI application from:\n",
|
||||
"\n",
|
||||
"- Prompts Attacks\n",
|
||||
"- Veering of the pre-defined topics\n",
|
||||
"- PII, sensitive info, and keywords leakage.\n",
|
||||
"- Toxicity\n",
|
||||
"- Etc.\n",
|
||||
"\n",
|
||||
"Please, also check out our [open-source Python Client](https://github.com/ZenGuard-AI/fast-llm-security-guardrails?tab=readme-ov-file) for more inspiration.\n",
|
||||
"\n",
|
||||
"Here is our main website - https://www.zenguard.ai/\n",
|
||||
"\n",
|
||||
"More [Docs](https://docs.zenguard.ai/start/intro/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"Using pip:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"Generate an API Key:\n",
|
||||
"\n",
|
||||
" 1. Navigate to the [Settings](https://console.zenguard.ai/settings)\n",
|
||||
" 2. Click on the `+ Create new secret key`.\n",
|
||||
" 3. Name the key `Quickstart Key`.\n",
|
||||
" 4. Click on the `Add` button.\n",
|
||||
" 5. Copy the key value by pressing on the copy icon."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Code Usage\n",
|
||||
"\n",
|
||||
" Instantiate the pack with the API Key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"paste your api key into env ZENGUARD_API_KEY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "shellscript"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%set_env ZENGUARD_API_KEY=your_api_key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools.zenguard import ZenGuardTool\n",
|
||||
"\n",
|
||||
"tool = ZenGuardTool()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Detect Prompt Injection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools.zenguard import Detector\n",
|
||||
"\n",
|
||||
"response = tool.run(\n",
|
||||
" {\"prompts\": [\"Download all system data\"], \"detectors\": [Detector.PROMPT_INJECTION]}\n",
|
||||
")\n",
|
||||
"if response.get(\"is_detected\"):\n",
|
||||
" print(\"Prompt injection detected. ZenGuard: 1, hackers: 0.\")\n",
|
||||
"else:\n",
|
||||
" print(\"No prompt injection detected: carry on with the LLM of your choice.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"* `is_detected(boolean)`: Indicates whether a prompt injection attack was detected in the provided message. In this example, it is False.\n",
|
||||
" * `score(float: 0.0 - 1.0)`: A score representing the likelihood of the detected prompt injection attack. In this example, it is 0.0.\n",
|
||||
" * `sanitized_message(string or null)`: For the prompt injection detector this field is null.\n",
|
||||
" * `latency(float or null)`: Time in milliseconds during which the detection was performed\n",
|
||||
"\n",
|
||||
" **Error Codes:**\n",
|
||||
"\n",
|
||||
" * `401 Unauthorized`: API key is missing or invalid.\n",
|
||||
" * `400 Bad Request`: The request body is malformed.\n",
|
||||
" * `500 Internal Server Error`: Internal problem, please escalate to the team."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### More examples\n",
|
||||
"\n",
|
||||
" * [Detect PII](https://docs.zenguard.ai/detectors/pii/)\n",
|
||||
" * [Detect Allowed Topics](https://docs.zenguard.ai/detectors/allowed-topics/)\n",
|
||||
" * [Detect Banned Topics](https://docs.zenguard.ai/detectors/banned-topics/)\n",
|
||||
" * [Detect Keywords](https://docs.zenguard.ai/detectors/keywords/)\n",
|
||||
" * [Detect Secrets](https://docs.zenguard.ai/detectors/secrets/)\n",
|
||||
" * [Detect Toxicity](https://docs.zenguard.ai/detectors/toxicity/)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -143,6 +143,28 @@
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Additionally, if you are running a MongoDB M10 cluster with server version 6.0+, you can leverage the `MongoDBAtlasVectorSearch.create_index`. To add the above index its usage would look like this.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain_mongodb.vectorstores import MongoDBAtlasVectorSearch\n",
|
||||
"from pymongo import MongoClient\n",
|
||||
"\n",
|
||||
"mongo_client = MongoClient(\"<YOUR-CONNECTION-STRING>\")\n",
|
||||
"collection = mongo_client[\"<db_name>\"][\"<collection_name>\"]\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"vectorstore = MongoDBAtlasVectorSearch(\n",
|
||||
" collection=collection,\n",
|
||||
" embedding=embeddings,\n",
|
||||
" index_name=\"<ATLAS_VECTOR_SEARCH_INDEX_NAME>\",\n",
|
||||
" relevance_score_fn=\"cosine\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Creates an index using the index_name provided and relevance_score_fn type\n",
|
||||
"vectorstore.create_index(dimensions=1536)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
@@ -296,6 +318,16 @@
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You can also update the index programmatically using the `MongoDBAtlasVectorSearch.create_index` method.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectorstore.create_index(\n",
|
||||
" dimensions=1536,\n",
|
||||
" filters=[{\"type\":\"filter\", \"path\":\"page\"}],\n",
|
||||
" update=True\n",
|
||||
")\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -8,9 +8,10 @@ sidebar_class_name: hidden
|
||||
**LangChain** is a framework for developing applications powered by large language models (LLMs).
|
||||
|
||||
LangChain simplifies every stage of the LLM application lifecycle:
|
||||
- **Development**: Build your applications using LangChain's open-source [building blocks](/docs/concepts#langchain-expression-language-lcel) and [components](/docs/concepts). Hit the ground running using [third-party integrations](/docs/integrations/platforms/) and [Templates](/docs/templates).
|
||||
- **Development**: Build your applications using LangChain's open-source [building blocks](/docs/concepts#langchain-expression-language-lcel), [components](/docs/concepts), and [third-party integrations](/docs/integrations/platforms/).
|
||||
Use [LangGraph](/docs/concepts/#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support.
|
||||
- **Productionization**: Use [LangSmith](https://docs.smith.langchain.com/) to inspect, monitor and evaluate your chains, so that you can continuously optimize and deploy with confidence.
|
||||
- **Deployment**: Turn any chain into an API with [LangServe](/docs/langserve).
|
||||
- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/).
|
||||
|
||||
import ThemedImage from '@theme/ThemedImage';
|
||||
import useBaseUrl from '@docusaurus/useBaseUrl';
|
||||
@@ -18,8 +19,8 @@ import useBaseUrl from '@docusaurus/useBaseUrl';
|
||||
<ThemedImage
|
||||
alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
|
||||
sources={{
|
||||
light: useBaseUrl('/svg/langchain_stack.svg'),
|
||||
dark: useBaseUrl('/svg/langchain_stack_dark.svg'),
|
||||
light: useBaseUrl('/svg/langchain_stack_062024.svg'),
|
||||
dark: useBaseUrl('/svg/langchain_stack_062024_dark.svg'),
|
||||
}}
|
||||
title="LangChain Framework Overview"
|
||||
/>
|
||||
@@ -30,7 +31,7 @@ Concretely, the framework consists of the following open-source libraries:
|
||||
- **`langchain-community`**: Third party integrations.
|
||||
- Partner packages (e.g. **`langchain-openai`**, **`langchain-anthropic`**, etc.): Some integrations have been further split into their own lightweight packages that only depend on **`langchain-core`**.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **[LangGraph](https://langchain-ai.github.io/langgraph)**: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
- **[LangGraph](https://langchain-ai.github.io/langgraph)**: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it.
|
||||
- **[LangServe](/docs/langserve)**: Deploy LangChain chains as REST APIs.
|
||||
- **[LangSmith](https://docs.smith.langchain.com)**: A developer platform that lets you debug, test, evaluate, and monitor LLM applications.
|
||||
|
||||
@@ -43,15 +44,17 @@ These docs focus on the Python LangChain library. [Head here](https://js.langcha
|
||||
|
||||
## [Tutorials](/docs/tutorials)
|
||||
|
||||
If you're looking to build something specific or are more of a hands-on learner, check out our [tutorials](/docs/tutorials).
|
||||
If you're looking to build something specific or are more of a hands-on learner, check out our [tutorials section](/docs/tutorials).
|
||||
This is the best place to get started.
|
||||
|
||||
These are the best ones to get started with:
|
||||
|
||||
- [Build a Simple LLM Application](/docs/tutorials/llm_chain)
|
||||
- [Build a Chatbot](/docs/tutorials/chatbot)
|
||||
- [Build an Agent](/docs/tutorials/agents)
|
||||
- [Introduction to LangGraph](https://langchain-ai.github.io/langgraph/tutorials/introduction/)
|
||||
|
||||
Explore the full list of tutorials [here](/docs/tutorials).
|
||||
Explore the full list of LangChain tutorials [here](/docs/tutorials), and check out other [LangGraph tutorials here](https://langchain-ai.github.io/langgraph/tutorials/).
|
||||
|
||||
|
||||
## [How-to guides](/docs/how_to)
|
||||
@@ -60,10 +63,14 @@ Explore the full list of tutorials [here](/docs/tutorials).
|
||||
These how-to guides don’t cover topics in depth – you’ll find that material in the [Tutorials](/docs/tutorials) and the [API Reference](https://api.python.langchain.com/en/latest/).
|
||||
However, these guides will help you quickly accomplish common tasks.
|
||||
|
||||
Check out [LangGraph-specific how-tos here](https://langchain-ai.github.io/langgraph/how-tos/).
|
||||
|
||||
## [Conceptual guide](/docs/concepts)
|
||||
|
||||
Introductions to all the key parts of LangChain you’ll need to know! [Here](/docs/concepts) you'll find high level explanations of all LangChain concepts.
|
||||
|
||||
For a deeper dive into LangGraph concepts, check out [this page](https://langchain-ai.github.io/langgraph/concepts/).
|
||||
|
||||
## [API reference](https://api.python.langchain.com)
|
||||
Head to the reference section for full documentation of all classes and methods in the LangChain Python packages.
|
||||
|
||||
@@ -73,10 +80,7 @@ Head to the reference section for full documentation of all classes and methods
|
||||
Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
|
||||
|
||||
### [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph)
|
||||
Build stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
|
||||
### [🦜🏓 LangServe](/docs/langserve)
|
||||
Deploy LangChain runnables and chains as REST APIs.
|
||||
Build stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it.
|
||||
|
||||
|
||||
## Additional resources
|
||||
|
||||
@@ -21,6 +21,16 @@
|
||||
"source": [
|
||||
"# Build an Agent\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Chat Models](/docs/concepts/#chat-models)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Agents](/docs/concepts/#agents)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"By themselves, language models can't take actions - they just output text.\n",
|
||||
"A big use case for LangChain is creating **agents**.\n",
|
||||
"Agents are systems that use LLMs as reasoning engines to determine which actions to take and the inputs to pass them.\n",
|
||||
@@ -28,16 +38,6 @@
|
||||
"\n",
|
||||
"In this tutorial we will build an agent that can interact with a search engine. You will be able to ask this agent questions, watch it call the search tool, and have conversations with it.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"In following this tutorial, you will learn how to:\n",
|
||||
"\n",
|
||||
"- Use [language models](/docs/concepts/#chat-models), in particular their tool calling ability\n",
|
||||
"- Use a Search [Tool](/docs/concepts/#tools) to look up information from the Internet\n",
|
||||
"- Compose a [LangGraph Agent](/docs/concepts/#agents), which use an LLM to determine actions and then execute them\n",
|
||||
"- Debug and trace your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"\n",
|
||||
"## End-to-end agent\n",
|
||||
"\n",
|
||||
"The code snippet below represents a fully functional agent that uses an LLM to decide which tools to use. It is equipped with a generic search tool. It has conversational memory - meaning that it can be used as a multi-turn chatbot.\n",
|
||||
@@ -343,7 +343,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ContentString: [{'id': 'toolu_01VTP7DUvSfgtYxsq9x4EwMp', 'input': {'query': 'weather san francisco'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}]\n",
|
||||
"ContentString: \n",
|
||||
"ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'weather san francisco'}, 'id': 'toolu_01VTP7DUvSfgtYxsq9x4EwMp'}]\n"
|
||||
]
|
||||
}
|
||||
@@ -362,7 +362,7 @@
|
||||
"source": [
|
||||
"We can see that there's now no text content, but there is a tool call! It wants us to call the Tavily Search tool.\n",
|
||||
"\n",
|
||||
"This isn't calling that tool yet - it's just telling us to. In order to actually calll it, we'll want to create our agent."
|
||||
"This isn't calling that tool yet - it's just telling us to. In order to actually call it, we'll want to create our agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -25,6 +25,16 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Chat Models](/docs/concepts/#chat-models)\n",
|
||||
"- [Prompt Templates](/docs/concepts/#prompt-templates)\n",
|
||||
"- [Chat History](/docs/concepts/#chat-history)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"We'll go over an example of how to design and implement an LLM-powered chatbot. \n",
|
||||
@@ -39,18 +49,6 @@
|
||||
"\n",
|
||||
"This tutorial will cover the basics which will be helpful for those two more advanced topics, but feel free to skip directly to there should you choose.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"Here are a few of the high-level components we'll be working with:\n",
|
||||
"\n",
|
||||
"- [`Chat Models`](/docs/concepts/#chat-models). The chatbot interface is based around messages rather than raw text, and therefore is best suited to Chat Models rather than text LLMs.\n",
|
||||
"- [`Prompt Templates`](/docs/concepts/#prompt-templates), which simplify the process of assembling prompts that combine default messages, user input, chat history, and (optionally) additional retrieved context.\n",
|
||||
"- [`Chat History`](/docs/concepts/#chat-history), which allows a chatbot to \"remember\" past interactions and take them into account when responding to followup questions. \n",
|
||||
"- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"\n",
|
||||
"We'll cover how to fit the above components together to create a powerful conversational chatbot.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Jupyter Notebook\n",
|
||||
@@ -145,7 +143,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 12, 'total_tokens': 22}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-8ecc8a9f-8b32-49ad-8e41-5caa26282f76-0', usage_metadata={'input_tokens': 12, 'output_tokens': 10, 'total_tokens': 22})"
|
||||
"AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 12, 'total_tokens': 22}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d939617f-0c3b-45e9-a93f-13dafecbd4b5-0', usage_metadata={'input_tokens': 12, 'output_tokens': 10, 'total_tokens': 22})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
@@ -174,7 +172,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm sorry, I don't have access to that information.\", response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 12, 'total_tokens': 25}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-4e0066e8-0dcc-4aea-b4f9-b9029c81724f-0', usage_metadata={'input_tokens': 12, 'output_tokens': 13, 'total_tokens': 25})"
|
||||
"AIMessage(content=\"I'm sorry, I don't have access to personal information unless you provide it to me. How may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 26, 'prompt_tokens': 12, 'total_tokens': 38}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-47bc8c20-af7b-4fd2-9345-f0e9fdf18ce3-0', usage_metadata={'input_tokens': 12, 'output_tokens': 26, 'total_tokens': 38})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@@ -206,7 +204,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Bob. How can I assist you today, Bob?', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 35, 'total_tokens': 49}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-c377d868-1bfe-491a-82fb-1f9122939796-0', usage_metadata={'input_tokens': 35, 'output_tokens': 14, 'total_tokens': 49})"
|
||||
"AIMessage(content='Your name is Bob. How can I help you, Bob?', response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 35, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9f90291b-4df9-41dc-9ecf-1ee1081f4490-0', usage_metadata={'input_tokens': 35, 'output_tokens': 13, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@@ -263,7 +261,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After that, we can import the relevant classes and set up our chain which wraps the model and adds in this message history. A key part here is the function we pass into as the `get_session_history`. This function is expected to take in a `session_id` and return a Message History object. This `session_id` is used to distinguish between separate conversations, and should be passed in as part of the config when calling the new chain (we'll show how to do that."
|
||||
"After that, we can import the relevant classes and set up our chain which wraps the model and adds in this message history. A key part here is the function we pass into as the `get_session_history`. This function is expected to take in a `session_id` and return a Message History object. This `session_id` is used to distinguish between separate conversations, and should be passed in as part of the config when calling the new chain (we'll show how to do that)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -309,17 +307,10 @@
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 9bdaa45d-604e-4891-9b0a-28754985f10b not found for run 271bd46a-f980-407a-af8a-9399420bce8d. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello Bob! How can I assist you today?'"
|
||||
"'Hi Bob! How can I assist you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -341,17 +332,10 @@
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 16482292-535c-449d-8a9d-d0fccf5112eb not found for run 7f2e501a-d5b4-4d8c-924b-aae9eb9d7267. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Your name is Bob. How can I assist you today, Bob?'"
|
||||
"'Your name is Bob. How can I help you today, Bob?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -380,17 +364,10 @@
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run c14d7130-04c5-445f-9e22-442f7c7e8f07 not found for run 946beadc-5cf1-468f-bac4-ca5ddc10ea73. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"I'm sorry, I don't know your name as you have not provided it.\""
|
||||
"\"I'm sorry, I cannot determine your name as I am an AI assistant and do not have access to that information.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
@@ -421,13 +398,6 @@
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 4f61611c-3875-4b2d-9f89-af452866d55a not found for run 066a30b1-bbb0-4fee-a035-7fdb41c28d91. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -550,17 +520,10 @@
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 51e624b3-19fd-435f-b580-2a3e4f2d0dc9 not found for run b411f007-b2ad-48c3-968c-aa5ecbb58aea. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello Jim! How can I assist you today?'"
|
||||
"'Hello, Jim! How can I assist you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
@@ -582,13 +545,6 @@
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run a30b22cd-698f-48a1-94a0-1a172242e292 not found for run 52b0b60d-5d2a-4610-a572-037602792ad6. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -700,13 +656,6 @@
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run d02b7778-4a91-4831-ace9-b33bb456dc90 not found for run ee0a20dd-5b9e-4862-b3c9-8e2e72b8eb82. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -732,13 +681,6 @@
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 12422d4c-6494-490e-845e-08dcc1c6a4b9 not found for run a82eb759-f51d-4488-871b-6e2d601b4128. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -783,7 +725,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -798,7 +740,7 @@
|
||||
" AIMessage(content='yes!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 34,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -843,16 +785,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"I'm sorry, I don't have access to personal information. How can I assist you today?\""
|
||||
"\"I'm sorry, but I don't have access to your personal information. How can I assist you today?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 35,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -886,7 +828,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -895,7 +837,7 @@
|
||||
"'You asked \"what\\'s 2 + 2?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 36,
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -919,7 +861,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -934,23 +876,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run e1bb2af3-192b-4bd1-8734-6d2dff1d80b6 not found for run 0c734998-cf16-4708-8658-043a6c7b4a91. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"I'm sorry, I don't have access to your name. How can I assist you today?\""
|
||||
"\"I'm sorry, I don't have access to that information. How can I assist you today?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 38,
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -976,23 +911,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 181a1f04-9176-4837-80e8-ce74866775a2 not found for run ad402c5a-8341-4c62-ac58-cdf923b3b9ec. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"You haven't asked a math problem yet. Feel free to ask any math question you have, and I'll do my best to help you with it.\""
|
||||
"\"You haven't asked a math problem yet. Feel free to ask any math-related question you have, and I'll be happy to help you with it.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -1031,25 +959,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run e0ee52b6-1261-4f2d-98ca-f78c9019684b not found for run 0f6d7995-c32c-4bdb-b7a6-b3d932c13389. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"|Sure|,| Todd|!| Here|'s| a| joke| for| you|:\n",
|
||||
"\n",
|
||||
"|Why| don|'t| scientists| trust| atoms|?\n",
|
||||
"\n",
|
||||
"|Because| they| make| up| everything|!||"
|
||||
"|Hi| Todd|!| Sure|,| here|'s| a| joke| for| you|:| Why| couldn|'t| the| bicycle| find| its| way| home|?| Because| it| lost| its| bearings|!| 😄||"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1086,9 +1003,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -1100,7 +1017,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -17,18 +17,21 @@
|
||||
"source": [
|
||||
"# Build an Extraction Chain\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Chat Models](/docs/concepts/#chat-models)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Tool calling](/docs/concepts/#function-tool-calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In this tutorial, we will build a chain to extract structured information from unstructured text. \n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"This tutorial will only work with models that support **function/tool calling**\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"Concepts we will cover are:\n",
|
||||
"- Using [language models](/docs/concepts/#chat-models)\n",
|
||||
"- Using [function/tool calling](/docs/concepts/#function-tool-calling)\n",
|
||||
"- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n"
|
||||
"This tutorial will only work with models that support **tool calling**\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -231,7 +231,7 @@
|
||||
"id": "d508b79d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"More commonly, we can \"chain\" the model with this output parser. This means this output parser will get called everytime in this chain. This chain takes on the input type of the language model (string or list of message) and returns the output type of the output parser (string).\n",
|
||||
"More commonly, we can \"chain\" the model with this output parser. This means this output parser will get called every time in this chain. This chain takes on the input type of the language model (string or list of message) and returns the output type of the output parser (string).\n",
|
||||
"\n",
|
||||
"We can easily create the chain using the `|` operator. The `|` operator is used in LangChain to combine two elements together."
|
||||
]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user