mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-21 06:33:41 +00:00
Compare commits
29 Commits
v0.0.347
...
v0.0.349-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d9bfdc95ea | ||
|
|
2fa81739b6 | ||
|
|
84a57f5350 | ||
|
|
f5befe3b89 | ||
|
|
c24f277b7c | ||
|
|
1ef13661b9 | ||
|
|
6fbfc375b9 | ||
|
|
6da0cfea0e | ||
|
|
0797358c1b | ||
|
|
300305e5e5 | ||
|
|
b32fcb550d | ||
|
|
b3f226e8f8 | ||
|
|
ba083887e5 | ||
|
|
37bee92b8a | ||
|
|
1d7e5c51aa | ||
|
|
477b274a62 | ||
|
|
02ee0073cf | ||
|
|
ff0d5514c1 | ||
|
|
1d725327eb | ||
|
|
7be3eb6fbd | ||
|
|
a05230a4ba | ||
|
|
18aba7fdef | ||
|
|
52052cc7b9 | ||
|
|
e4d6e55c5e | ||
|
|
eb209e7ee3 | ||
|
|
b2280fd874 | ||
|
|
7186faefb2 | ||
|
|
76f30f5297 | ||
|
|
54040b00a4 |
13
.github/workflows/_all_ci.yml
vendored
13
.github/workflows/_all_ci.yml
vendored
@@ -48,13 +48,12 @@ jobs:
|
||||
|
||||
compile-integration-tests:
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
if: ${{ inputs.working-directory != 'libs/core' }}
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
pydantic-compatibility:
|
||||
uses: ./.github/workflows/_pydantic_compatibility.yml
|
||||
dependencies:
|
||||
uses: ./.github/workflows/_dependencies.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
@@ -68,7 +67,6 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
if: ${{ inputs.working-directory == 'libs/langchain' }}
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
defaults:
|
||||
run:
|
||||
@@ -88,12 +86,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
|
||||
- name: Install langchain core editable
|
||||
shell: bash
|
||||
run: |
|
||||
poetry run pip install -e ../core
|
||||
poetry install -E extended_testing --with test
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
|
||||
- name: Install integration dependencies
|
||||
shell: bash
|
||||
run: poetry install --with=test_integration
|
||||
run: poetry install --with=test_integration,test
|
||||
|
||||
- name: Check integration tests compile
|
||||
shell: bash
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: pydantic v1/v2 compatibility
|
||||
name: dependencies
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Pydantic v1/v2 compatibility - Python ${{ matrix.python-version }}
|
||||
name: dependencies - Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -44,6 +44,14 @@ jobs:
|
||||
shell: bash
|
||||
run: poetry install
|
||||
|
||||
- name: Check imports with base dependencies
|
||||
shell: bash
|
||||
run: poetry run make check_imports
|
||||
|
||||
- name: Install test dependencies
|
||||
shell: bash
|
||||
run: poetry install --with test
|
||||
|
||||
- name: Install langchain editable
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ inputs.langchain-location }}
|
||||
29
.github/workflows/_lint.yml
vendored
29
.github/workflows/_lint.yml
vendored
@@ -90,4 +90,31 @@ jobs:
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
make lint
|
||||
make lint_package
|
||||
|
||||
- name: Install test dependencies
|
||||
# Also installs dev/lint/test/typing dependencies, to ensure we have
|
||||
# type hints for as many of our libraries as possible.
|
||||
# This helps catch errors that require dependencies to be spotted, for example:
|
||||
# https://github.com/langchain-ai/langchain/pull/10249/files#diff-935185cd488d015f026dcd9e19616ff62863e8cde8c0bee70318d3ccbca98341
|
||||
#
|
||||
# If you change this configuration, make sure to change the `cache-key`
|
||||
# in the `poetry_setup` action above to stop using the old cache.
|
||||
# It doesn't matter how you change it, any change will cause a cache-bust.
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry install --with test
|
||||
|
||||
- name: Get .mypy_cache to speed up mypy
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache
|
||||
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
make lint_tests
|
||||
|
||||
13
.github/workflows/langchain_community_release.yml
vendored
Normal file
13
.github/workflows/langchain_community_release.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: libs/community Release
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses:
|
||||
./.github/workflows/_release.yml
|
||||
with:
|
||||
working-directory: libs/community
|
||||
secrets: inherit
|
||||
12
.github/workflows/scheduled_test.yml
vendored
12
.github/workflows/scheduled_test.yml
vendored
@@ -52,13 +52,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
poetry install --with=test_integration
|
||||
poetry run pip install google-cloud-aiplatform
|
||||
poetry run pip install "boto3>=1.28.57"
|
||||
if [[ ${{ matrix.python-version }} != "3.8" ]]
|
||||
then
|
||||
poetry run pip install fireworks-ai
|
||||
fi
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
@@ -68,7 +62,9 @@ jobs:
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
run: |
|
||||
make scheduled_tests
|
||||
|
||||
1
.github/workflows/templates_ci.yml
vendored
1
.github/workflows/templates_ci.yml
vendored
@@ -33,5 +33,4 @@ jobs:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: templates
|
||||
langchain-location: ../libs/langchain
|
||||
secrets: inherit
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -167,8 +167,7 @@ docs/node_modules/
|
||||
docs/.docusaurus/
|
||||
docs/.cache-loader/
|
||||
docs/_dist
|
||||
docs/api_reference/api_reference.rst
|
||||
docs/api_reference/experimental_api_reference.rst
|
||||
docs/api_reference/*api_reference.rst
|
||||
docs/api_reference/_build
|
||||
docs/api_reference/*/
|
||||
!docs/api_reference/_static/
|
||||
|
||||
2
Makefile
2
Makefile
@@ -41,7 +41,7 @@ spell_fix:
|
||||
# LINTING AND FORMATTING
|
||||
######################
|
||||
|
||||
lint:
|
||||
lint lint_package lint_tests:
|
||||
poetry run ruff docs templates cookbook
|
||||
poetry run ruff format docs templates cookbook --diff
|
||||
poetry run ruff --select I docs templates cookbook
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"source": [
|
||||
"import re\n",
|
||||
"\n",
|
||||
"from IPython.display import Image\n",
|
||||
"from IPython.display import Image, display\n",
|
||||
"from steamship import Block, Steamship"
|
||||
]
|
||||
},
|
||||
@@ -180,7 +180,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -37,7 +37,8 @@
|
||||
"source": [
|
||||
"#!pip install qianfan\n",
|
||||
"#!pip install bce-python-sdk\n",
|
||||
"#!pip install elasticsearch == 7.11.0"
|
||||
"#!pip install elasticsearch == 7.11.0\n",
|
||||
"#!pip install sentence-transformers"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -54,8 +55,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sentence_transformers\n",
|
||||
"from baidubce.auth.bce_credentials import BceCredentials\n",
|
||||
"from baidubce.bce_client_configuration import BceClientConfiguration\n",
|
||||
"from langchain.chains.retrieval_qa import RetrievalQA\n",
|
||||
"from langchain.document_loaders.baiducloud_bos_directory import BaiduBOSDirectoryLoader\n",
|
||||
"from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n",
|
||||
"from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint\n",
|
||||
@@ -161,15 +164,22 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"version": "3.9.17"
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
@@ -177,5 +187,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@
|
||||
"from tqdm import tqdm\n",
|
||||
"\n",
|
||||
"for i in tqdm(range(len(title_embeddings))):\n",
|
||||
" title = titles[i].replace(\"'\", \"''\")\n",
|
||||
" title = song_titles[i].replace(\"'\", \"''\")\n",
|
||||
" embedding = title_embeddings[i]\n",
|
||||
" sql_command = (\n",
|
||||
" f'UPDATE \"Track\" SET \"embeddings\" = ARRAY{embedding} WHERE \"Name\" ='\n",
|
||||
@@ -681,9 +681,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.18"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -187,7 +187,7 @@
|
||||
" for key in path:\n",
|
||||
" try:\n",
|
||||
" current = current[key]\n",
|
||||
" except:\n",
|
||||
" except KeyError:\n",
|
||||
" return None\n",
|
||||
" return current\n",
|
||||
"\n",
|
||||
|
||||
@@ -196,11 +196,13 @@ def _load_package_modules(
|
||||
return modules_by_namespace
|
||||
|
||||
|
||||
def _construct_doc(pkg: str, members_by_namespace: Dict[str, ModuleMembers]) -> str:
|
||||
def _construct_doc(
|
||||
package_namespace: str, members_by_namespace: Dict[str, ModuleMembers]
|
||||
) -> str:
|
||||
"""Construct the contents of the reference.rst file for the given package.
|
||||
|
||||
Args:
|
||||
pkg: The package name
|
||||
package_namespace: The package top level namespace
|
||||
members_by_namespace: The members of the package, dict organized by top level
|
||||
module contains a list of classes and functions
|
||||
inside of the top level namespace.
|
||||
@@ -210,7 +212,7 @@ def _construct_doc(pkg: str, members_by_namespace: Dict[str, ModuleMembers]) ->
|
||||
"""
|
||||
full_doc = f"""\
|
||||
=======================
|
||||
``{pkg}`` API Reference
|
||||
``{package_namespace}`` API Reference
|
||||
=======================
|
||||
|
||||
"""
|
||||
@@ -222,13 +224,13 @@ def _construct_doc(pkg: str, members_by_namespace: Dict[str, ModuleMembers]) ->
|
||||
functions = _members["functions"]
|
||||
if not (classes or functions):
|
||||
continue
|
||||
section = f":mod:`{pkg}.{module}`"
|
||||
section = f":mod:`{package_namespace}.{module}`"
|
||||
underline = "=" * (len(section) + 1)
|
||||
full_doc += f"""\
|
||||
{section}
|
||||
{underline}
|
||||
|
||||
.. automodule:: {pkg}.{module}
|
||||
.. automodule:: {package_namespace}.{module}
|
||||
:no-members:
|
||||
:no-inherited-members:
|
||||
|
||||
@@ -238,7 +240,7 @@ def _construct_doc(pkg: str, members_by_namespace: Dict[str, ModuleMembers]) ->
|
||||
full_doc += f"""\
|
||||
Classes
|
||||
--------------
|
||||
.. currentmodule:: {pkg}
|
||||
.. currentmodule:: {package_namespace}
|
||||
|
||||
.. autosummary::
|
||||
:toctree: {module}
|
||||
@@ -270,7 +272,7 @@ Classes
|
||||
full_doc += f"""\
|
||||
Functions
|
||||
--------------
|
||||
.. currentmodule:: {pkg}
|
||||
.. currentmodule:: {package_namespace}
|
||||
|
||||
.. autosummary::
|
||||
:toctree: {module}
|
||||
@@ -282,57 +284,57 @@ Functions
|
||||
return full_doc
|
||||
|
||||
|
||||
def _document_langchain_experimental() -> None:
|
||||
"""Document the langchain_experimental package."""
|
||||
# Generate experimental_api_reference.rst
|
||||
exp_members = _load_package_modules(EXP_DIR)
|
||||
exp_doc = ".. _experimental_api_reference:\n\n" + _construct_doc(
|
||||
"langchain_experimental", exp_members
|
||||
)
|
||||
with open(EXP_WRITE_FILE, "w") as f:
|
||||
f.write(exp_doc)
|
||||
def _build_rst_file(package_name: str = "langchain") -> None:
|
||||
"""Create a rst file for building of documentation.
|
||||
|
||||
Args:
|
||||
package_name: Can be either "langchain" or "core" or "experimental".
|
||||
"""
|
||||
package_members = _load_package_modules(_package_dir(package_name))
|
||||
with open(_out_file_path(package_name), "w") as f:
|
||||
f.write(
|
||||
_doc_first_line(package_name)
|
||||
+ _construct_doc(package_namespace[package_name], package_members)
|
||||
)
|
||||
|
||||
|
||||
def _document_langchain_core() -> None:
|
||||
"""Document the langchain_core package."""
|
||||
# Generate core_api_reference.rst
|
||||
core_members = _load_package_modules(CORE_DIR)
|
||||
core_doc = ".. _core_api_reference:\n\n" + _construct_doc(
|
||||
"langchain_core", core_members
|
||||
)
|
||||
with open(CORE_WRITE_FILE, "w") as f:
|
||||
f.write(core_doc)
|
||||
package_namespace = {
|
||||
"langchain": "langchain",
|
||||
"experimental": "langchain_experimental",
|
||||
"core": "langchain_core",
|
||||
}
|
||||
|
||||
|
||||
def _document_langchain() -> None:
|
||||
"""Document the main langchain package."""
|
||||
# load top level module members
|
||||
lc_members = _load_package_modules(PKG_DIR)
|
||||
def _package_dir(package_name: str = "langchain") -> Path:
|
||||
"""Return the path to the directory containing the documentation."""
|
||||
return ROOT_DIR / "libs" / package_name / package_namespace[package_name]
|
||||
|
||||
# Add additional packages
|
||||
tools = _load_package_modules(PKG_DIR, "tools")
|
||||
agents = _load_package_modules(PKG_DIR, "agents")
|
||||
schema = _load_package_modules(PKG_DIR, "schema")
|
||||
|
||||
lc_members.update(
|
||||
{
|
||||
"agents.output_parsers": agents["output_parsers"],
|
||||
"agents.format_scratchpad": agents["format_scratchpad"],
|
||||
"tools.render": tools["render"],
|
||||
}
|
||||
)
|
||||
def _out_file_path(package_name: str = "langchain") -> Path:
|
||||
"""Return the path to the file containing the documentation."""
|
||||
name_prefix = {
|
||||
"langchain": "",
|
||||
"experimental": "experimental_",
|
||||
"core": "core_",
|
||||
}
|
||||
return HERE / f"{name_prefix[package_name]}api_reference.rst"
|
||||
|
||||
lc_doc = ".. _api_reference:\n\n" + _construct_doc("langchain", lc_members)
|
||||
|
||||
with open(WRITE_FILE, "w") as f:
|
||||
f.write(lc_doc)
|
||||
def _doc_first_line(package_name: str = "langchain") -> str:
|
||||
"""Return the path to the file containing the documentation."""
|
||||
prefix = {
|
||||
"langchain": "",
|
||||
"experimental": "experimental",
|
||||
"core": "core",
|
||||
}
|
||||
return f".. {prefix[package_name]}_api_reference:\n\n"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Generate the reference.rst file for each package."""
|
||||
_document_langchain()
|
||||
_document_langchain_experimental()
|
||||
_document_langchain_core()
|
||||
"""Generate the api_reference.rst file for each package."""
|
||||
_build_rst_file(package_name="core")
|
||||
_build_rst_file(package_name="langchain")
|
||||
_build_rst_file(package_name="experimental")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -91,7 +91,7 @@
|
||||
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
|
||||
" try:\n",
|
||||
" print(openai_llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
" except RateLimitError:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
},
|
||||
@@ -114,7 +114,7 @@
|
||||
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
|
||||
" try:\n",
|
||||
" print(llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
" except RateLimitError:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
},
|
||||
@@ -156,7 +156,7 @@
|
||||
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
|
||||
" try:\n",
|
||||
" print(chain.invoke({\"animal\": \"kangaroo\"}))\n",
|
||||
" except:\n",
|
||||
" except RateLimitError:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
},
|
||||
@@ -190,10 +190,10 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"with patch(\"openai.ChatCompletion.create\", side_effect=error):\n",
|
||||
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
|
||||
" try:\n",
|
||||
" print(chain.invoke({\"animal\": \"kangaroo\"}))\n",
|
||||
" except:\n",
|
||||
" except RateLimitError:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
},
|
||||
@@ -291,7 +291,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
"id": "465cfbef-5bba-4b3b-b02d-fe2eba39db17",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Evaluating Structured Output: JSON Evaluators\n",
|
||||
"# JSON Evaluators\n",
|
||||
"\n",
|
||||
"Evaluating [extraction](https://python.langchain.com/docs/use_cases/extraction) and function calling applications often comes down to validation that the LLM's string output can be parsed correctly and how it compares to a reference object. The following JSON validators provide provide functionality to check your model's output in a consistent way.\n",
|
||||
"Evaluating [extraction](https://python.langchain.com/docs/use_cases/extraction) and function calling applications often comes down to validation that the LLM's string output can be parsed correctly and how it compares to a reference object. The following `JSON` validators provide functionality to check your model's output consistently.\n",
|
||||
"\n",
|
||||
"## JsonValidityEvaluator\n",
|
||||
"\n",
|
||||
"The `JsonValidityEvaluator` is designed to check the validity of a JSON string prediction.\n",
|
||||
"The `JsonValidityEvaluator` is designed to check the validity of a `JSON` string prediction.\n",
|
||||
"\n",
|
||||
"### Overview:\n",
|
||||
"- **Requires Input?**: No\n",
|
||||
@@ -377,7 +377,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -8,9 +8,12 @@
|
||||
"# String Distance\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/string_distance.ipynb)\n",
|
||||
"\n",
|
||||
"One of the simplest ways to compare an LLM or chain's string output against a reference label is by using string distance measurements such as Levenshtein or postfix distance. This can be used alongside approximate/fuzzy matching criteria for very basic unit testing.\n",
|
||||
">In information theory, linguistics, and computer science, the [Levenshtein distance (Wikipedia)](https://en.wikipedia.org/wiki/Levenshtein_distance) is a string metric for measuring the difference between two sequences. Informally, the Levenshtein distance between two words is the minimum number of single-character edits (insertions, deletions or substitutions) required to change one word into the other. It is named after the Soviet mathematician Vladimir Levenshtein, who considered this distance in 1965.\n",
|
||||
"\n",
|
||||
"This can be accessed using the `string_distance` evaluator, which uses distance metric's from the [rapidfuzz](https://github.com/maxbachmann/RapidFuzz) library.\n",
|
||||
"\n",
|
||||
"One of the simplest ways to compare an LLM or chain's string output against a reference label is by using string distance measurements such as `Levenshtein` or `postfix` distance. This can be used alongside approximate/fuzzy matching criteria for very basic unit testing.\n",
|
||||
"\n",
|
||||
"This can be accessed using the `string_distance` evaluator, which uses distance metrics from the [rapidfuzz](https://github.com/maxbachmann/RapidFuzz) library.\n",
|
||||
"\n",
|
||||
"**Note:** The returned scores are _distances_, meaning lower is typically \"better\".\n",
|
||||
"\n",
|
||||
@@ -213,9 +216,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
|
||||
" try:\n",
|
||||
" print(openai_llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
" except RateLimitError:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
},
|
||||
@@ -116,7 +116,7 @@
|
||||
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
|
||||
" try:\n",
|
||||
" print(llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
" except RateLimitError:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
},
|
||||
@@ -158,7 +158,7 @@
|
||||
"with patch(\"openai.resources.chat.completions.Completions.create\", side_effect=error):\n",
|
||||
" try:\n",
|
||||
" print(chain.invoke({\"animal\": \"kangaroo\"}))\n",
|
||||
" except:\n",
|
||||
" except RateLimitError:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -284,6 +284,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.llms import LlamaCpp\n",
|
||||
"\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb)\n",
|
||||
"\n",
|
||||
">[Presidio](https://microsoft.github.io/presidio/) (Origin from Latin praesidium ‘protection, garrison’) helps to ensure sensitive data is properly managed and governed. It provides fast identification and anonymization modules for private entities in text and images such as credit card numbers, names, locations, social security numbers, bitcoin wallets, US phone numbers, financial data and more.\n",
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
"\n",
|
||||
"Data anonymization is crucial before passing information to a language model like GPT-4 because it helps protect privacy and maintain confidentiality. If data is not anonymized, sensitive information such as names, addresses, contact numbers, or other identifiers linked to specific individuals could potentially be learned and misused. Hence, by obscuring or removing this personally identifiable information (PII), data can be used freely without compromising individuals' privacy rights or breaching data protection laws and regulations.\n",
|
||||
@@ -530,7 +532,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,12 +7,13 @@
|
||||
"source": [
|
||||
"# PromptLayer\n",
|
||||
"\n",
|
||||
">[PromptLayer](https://docs.promptlayer.com/introduction) is a platform for prompt engineering. It also helps with the LLM observability to visualize requests, version prompts, and track usage.\n",
|
||||
">\n",
|
||||
">While `PromptLayer` does have LLMs that integrate directly with LangChain (e.g. [`PromptLayerOpenAI`](https://python.langchain.com/docs/integrations/llms/promptlayer_openai)), using a callback is the recommended way to integrate `PromptLayer` with LangChain.\n",
|
||||
"\n",
|
||||
">[PromptLayer](https://promptlayer.com) is a an LLM observability platform that lets you visualize requests, version prompts, and track usage. In this guide we will go over how to setup the `PromptLayerCallbackHandler`. \n",
|
||||
"In this guide, we will go over how to setup the `PromptLayerCallbackHandler`. \n",
|
||||
"\n",
|
||||
"While `PromptLayer` does have LLMs that integrate directly with LangChain (e.g. [`PromptLayerOpenAI`](https://python.langchain.com/docs/integrations/llms/promptlayer_openai)), this callback is the recommended way to integrate PromptLayer with LangChain.\n",
|
||||
"\n",
|
||||
"See [our docs](https://docs.promptlayer.com/languages/langchain) for more information."
|
||||
"See [PromptLayer docs](https://docs.promptlayer.com/languages/langchain) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -34,18 +34,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install langchain google-cloud-aiplatform"
|
||||
"!pip install -U google-cloud-aiplatform"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -55,43 +55,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatVertexAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant who translate English to French\"\n",
|
||||
"human = \"Translate this sentence from English to French. I love programming.\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"messages = prompt.format_messages()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" J'aime la programmation.\", additional_kwargs={}, example=False)"
|
||||
"AIMessage(content=\" J'aime la programmation.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat(messages)"
|
||||
"system = \"You are a helpful assistant who translate English to French\"\n",
|
||||
"human = \"Translate this sentence from English to French. I love programming.\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI()\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -103,35 +89,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system = (\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
")\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' 私はプログラミングが大好きです。', additional_kwargs={}, example=False)"
|
||||
"AIMessage(content=' プログラミングが大好きです')"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = (\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
")\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
@@ -162,20 +142,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatVertexAI(\n",
|
||||
" model_name=\"codechat-bison\", max_output_tokens=1000, temperature=0.5\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -185,20 +152,39 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ```python\n",
|
||||
"def is_prime(x): \n",
|
||||
" if (x <= 1): \n",
|
||||
"def is_prime(n):\n",
|
||||
" if n <= 1:\n",
|
||||
" return False\n",
|
||||
" for i in range(2, x): \n",
|
||||
" if (x % i == 0): \n",
|
||||
" for i in range(2, n):\n",
|
||||
" if n % i == 0:\n",
|
||||
" return False\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"def find_prime_numbers(n):\n",
|
||||
" prime_numbers = []\n",
|
||||
" for i in range(2, n + 1):\n",
|
||||
" if is_prime(i):\n",
|
||||
" prime_numbers.append(i)\n",
|
||||
" return prime_numbers\n",
|
||||
"\n",
|
||||
"print(find_prime_numbers(100))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Output:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# For simple string in string out usage, we can use the `predict` method:\n",
|
||||
"print(chat.predict(\"Write a Python function to identify all prime numbers\"))"
|
||||
"chat = ChatVertexAI(\n",
|
||||
" model_name=\"codechat-bison\", max_output_tokens=1000, temperature=0.5\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"message = chat.invoke(\"Write a Python function to identify all prime numbers\")\n",
|
||||
"print(message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -207,66 +193,47 @@
|
||||
"source": [
|
||||
"## Asynchronous calls\n",
|
||||
"\n",
|
||||
"We can make asynchronous calls via the `agenerate` and `ainvoke` methods."
|
||||
"We can make asynchronous calls via the Runnables [Async Interface](/docs/expression_language/interface)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# for running these examples in the notebook:\n",
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"# import nest_asyncio\n",
|
||||
"# nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[ChatGeneration(text=\" J'aime la programmation.\", generation_info=None, message=AIMessage(content=\" J'aime la programmation.\", additional_kwargs={}, example=False))]], llm_output={}, run=[RunInfo(run_id=UUID('223599ef-38f8-4c79-ac6d-a5013060eb9d'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 35,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatVertexAI(\n",
|
||||
" model_name=\"chat-bison\",\n",
|
||||
" max_output_tokens=1000,\n",
|
||||
" temperature=0.7,\n",
|
||||
" top_p=0.95,\n",
|
||||
" top_k=40,\n",
|
||||
")\n",
|
||||
"import nest_asyncio\n",
|
||||
"\n",
|
||||
"asyncio.run(chat.agenerate([messages]))"
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' अहं प्रोग्रामिंग प्रेमामि', additional_kwargs={}, example=False)"
|
||||
"AIMessage(content=' Why do you love programming?')"
|
||||
]
|
||||
},
|
||||
"execution_count": 36,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = (\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
")\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"asyncio.run(\n",
|
||||
" chain.ainvoke(\n",
|
||||
" {\n",
|
||||
@@ -289,56 +256,51 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" 1. China (1,444,216,107)\n",
|
||||
"2. India (1,393,409,038)\n",
|
||||
"3. United States (332,403,650)\n",
|
||||
"4. Indonesia (273,523,615)\n",
|
||||
"5. Pakistan (220,892,340)\n",
|
||||
"6. Brazil (212,559,409)\n",
|
||||
"7. Nigeria (206,139,589)\n",
|
||||
"8. Bangladesh (164,689,383)\n",
|
||||
"9. Russia (145,934,462)\n",
|
||||
"10. Mexico (128,932,488)\n",
|
||||
"11. Japan (126,476,461)\n",
|
||||
"12. Ethiopia (115,063,982)\n",
|
||||
"13. Philippines (109,581,078)\n",
|
||||
"14. Egypt (102,334,404)\n",
|
||||
"15. Vietnam (97,338,589)"
|
||||
" The five most populous countries in the world are:\n",
|
||||
"1. China (1.4 billion)\n",
|
||||
"2. India (1.3 billion)\n",
|
||||
"3. United States (331 million)\n",
|
||||
"4. Indonesia (273 million)\n",
|
||||
"5. Pakistan (220 million)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"human\", \"List out the 15 most populous countries in the world\")]\n",
|
||||
" [(\"human\", \"List out the 5 most populous countries in the world\")]\n",
|
||||
")\n",
|
||||
"messages = prompt.format_messages()\n",
|
||||
"for chunk in chat.stream(messages):\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI()\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"for chunk in chain.stream({}):\n",
|
||||
" sys.stdout.write(chunk.content)\n",
|
||||
" sys.stdout.flush()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -350,7 +312,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -129,12 +129,6 @@
|
||||
"**The above request should now appear on your [PromptLayer dashboard](https://www.promptlayer.com).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "05e9e2fe",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
@@ -152,6 +146,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import promptlayer\n",
|
||||
"\n",
|
||||
"chat = PromptLayerChatOpenAI(return_pl_id=True)\n",
|
||||
"chat_results = chat.generate([[HumanMessage(content=\"I am a cat and I want\")]])\n",
|
||||
"\n",
|
||||
@@ -172,7 +168,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "base",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -186,7 +182,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.8 (default, Apr 13 2021, 12:59:45) \n[Clang 10.0.0 ]"
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -153,7 +153,7 @@
|
||||
"source": [
|
||||
"# Now all of the Tortoise's messages will take the AI message class\n",
|
||||
"# which maps to the 'assistant' role in OpenAI's training format\n",
|
||||
"alternating_sessions[0][\"messages\"][:3]"
|
||||
"chat_sessions[0][\"messages\"][:3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -191,7 +191,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"training_data = convert_messages_for_finetuning(alternating_sessions)\n",
|
||||
"training_data = convert_messages_for_finetuning(chat_sessions)\n",
|
||||
"print(f\"Prepared {len(training_data)} dialogues for training\")"
|
||||
]
|
||||
},
|
||||
@@ -416,7 +416,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -23,8 +23,18 @@
|
||||
"source": [
|
||||
"from langchain.document_loaders import ArcGISLoader\n",
|
||||
"\n",
|
||||
"url = \"https://maps1.vcgov.org/arcgis/rest/services/Beaches/MapServer/7\"\n",
|
||||
"loader = ArcGISLoader(url)"
|
||||
"URL = \"https://maps1.vcgov.org/arcgis/rest/services/Beaches/MapServer/7\"\n",
|
||||
"loader = ArcGISLoader(URL)\n",
|
||||
"\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e174ebd-bbbd-4a66-a644-51e0df12982d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's measure loader latency."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -261,7 +271,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader_geom = ArcGISLoader(url, return_geometry=True)"
|
||||
"loader_geom = ArcGISLoader(URL, return_geometry=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -30,6 +30,16 @@
|
||||
"#!pip install datadog-api-client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"DD_API_KEY = \"...\"\n",
|
||||
"DD_APP_KEY = \"...\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -73,7 +83,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -87,10 +97,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.11"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -65,6 +65,16 @@
|
||||
"%pip install langchain -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2ab73cc1-d8e0-4b6d-bb03-9522b112fce5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"etherscanAPIKey = \"...\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
|
||||
@@ -74,7 +74,9 @@
|
||||
"source": [
|
||||
"# see https://python.langchain.com/docs/use_cases/summarization for more details\n",
|
||||
"from langchain.chains.summarize import load_summarize_chain\n",
|
||||
"from langchain.llms.fake import FakeListLLM\n",
|
||||
"\n",
|
||||
"llm = FakeListLLM()\n",
|
||||
"chain = load_summarize_chain(llm, chain_type=\"map_reduce\")\n",
|
||||
"chain.run(docs)"
|
||||
]
|
||||
@@ -96,7 +98,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -166,6 +166,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def decode_to_str(item: tf.Tensor) -> str:\n",
|
||||
" return item.numpy().decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
|
||||
@@ -12,6 +12,18 @@
|
||||
"This example goes over how to use LangChain to interact with [Anyscale Endpoint](https://app.endpoints.anyscale.com/). "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "515070aa-e241-480e-8d9a-afdf52f35322",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ANYSCALE_API_BASE = \"...\"\n",
|
||||
"ANYSCALE_API_KEY = \"...\"\n",
|
||||
"ANYSCALE_MODEL_NAME = \"...\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -160,7 +172,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.8"
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -112,6 +112,24 @@
|
||||
"## Using NIBittensorLLM with Conversational Agent and Google Search Tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import Tool\n",
|
||||
"from langchain.utilities import GoogleSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"search = GoogleSearchAPIWrapper()\n",
|
||||
"\n",
|
||||
"tool = Tool(\n",
|
||||
" name=\"Google Search\",\n",
|
||||
" description=\"Search Google for recent results.\",\n",
|
||||
" func=search.run,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -129,7 +147,7 @@
|
||||
"\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [tool]\n",
|
||||
"prefix = \"\"\"Answer prompt based on LLM if there is need to search something then use internet and observe internet result and give accurate reply of user questions also try to use authenticated sources\"\"\"\n",
|
||||
"suffix = \"\"\"Begin!\n",
|
||||
" {chat_history}\n",
|
||||
@@ -137,14 +155,14 @@
|
||||
" {agent_scratchpad}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools,\n",
|
||||
" tools=tools,\n",
|
||||
" prefix=prefix,\n",
|
||||
" suffix=suffix,\n",
|
||||
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = NIBittensorLLM(\n",
|
||||
" system_prompt=\"Your task is to determine response based on user prompt\"\n",
|
||||
" system_prompt=\"Your task is to determine a response based on user prompt\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
@@ -176,7 +194,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -33,7 +33,13 @@
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install mlflow>=2.9\n",
|
||||
"```"
|
||||
"```\n",
|
||||
"\n",
|
||||
"Also, we need `dbutils` for this example.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install dbutils\n",
|
||||
"```\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -269,6 +275,8 @@
|
||||
"\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import dbutils\n",
|
||||
"\n",
|
||||
"os.environ[\"DATABRICKS_TOKEN\"] = dbutils.secrets.get(\"myworkspace\", \"api_token\")\n",
|
||||
"\n",
|
||||
"llm = Databricks(host=\"myworkspace.cloud.databricks.com\", endpoint_name=\"dolly\")\n",
|
||||
@@ -606,7 +614,7 @@
|
||||
"widgets": {}
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "llm",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -620,10 +628,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "b50f0598",
|
||||
"metadata": {
|
||||
"jp-MarkdownHeadingCollapsed": true,
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
@@ -113,7 +112,6 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "4bf59c12",
|
||||
"metadata": {
|
||||
"jp-MarkdownHeadingCollapsed": true,
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
@@ -231,6 +229,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.cache import UpstashRedisCache\n",
|
||||
"from upstash_redis import Redis\n",
|
||||
"\n",
|
||||
@@ -1589,7 +1588,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -289,7 +289,7 @@
|
||||
"source": [
|
||||
"pipeline = load_pipeline()\n",
|
||||
"llm = SelfHostedPipeline.from_pipeline(\n",
|
||||
" pipeline=pipeline, hardware=gpu, model_reqs=model_reqs\n",
|
||||
" pipeline=pipeline, hardware=gpu, model_reqs=[\"pip:./\", \"transformers\", \"torch\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -308,6 +308,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pickle\n",
|
||||
"\n",
|
||||
"rh.blob(pickle.dumps(pipeline), path=\"models/pipeline.pkl\").save().to(\n",
|
||||
" gpu, path=\"models\"\n",
|
||||
")\n",
|
||||
@@ -332,7 +334,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -54,7 +54,6 @@ See a [usage example](/docs/integrations/chat/google_vertex_ai_palm).
|
||||
from langchain.chat_models import ChatVertexAI
|
||||
```
|
||||
|
||||
|
||||
## Document Loaders
|
||||
### Google BigQuery
|
||||
|
||||
@@ -132,8 +131,6 @@ See a [usage example and authorization instructions](/docs/integrations/document
|
||||
from langchain.document_loaders import GoogleSpeechToTextLoader
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Vector Stores
|
||||
|
||||
### Google Vertex AI Vector Search
|
||||
@@ -264,14 +261,9 @@ from langchain.tools import GooglePlacesTool
|
||||
|
||||
### Google Search
|
||||
|
||||
We need to install a python package.
|
||||
|
||||
```bash
|
||||
pip install google-api-python-client
|
||||
```
|
||||
|
||||
- Set up a Custom Search Engine, following [these instructions](https://stackoverflow.com/questions/37083058/programmatically-searching-google-in-python-using-custom-search)
|
||||
- Get an API Key and Custom Search Engine ID from the previous step, and set them as environment variables `GOOGLE_API_KEY` and `GOOGLE_CSE_ID` respectively
|
||||
- Get an API Key and Custom Search Engine ID from the previous step, and set them as environment variables
|
||||
`GOOGLE_API_KEY` and `GOOGLE_CSE_ID` respectively.
|
||||
|
||||
```python
|
||||
from langchain.utilities import GoogleSearchAPIWrapper
|
||||
@@ -286,6 +278,74 @@ from langchain.agents import load_tools
|
||||
tools = load_tools(["google-search"])
|
||||
```
|
||||
|
||||
### Google Finance
|
||||
|
||||
We need to install a python package.
|
||||
|
||||
```bash
|
||||
pip install google-search-results
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/google_finance).
|
||||
|
||||
```python
|
||||
from langchain.tools.google_finance import GoogleFinanceQueryRun
|
||||
from langchain.utilities.google_finance import GoogleFinanceAPIWrapper
|
||||
```
|
||||
|
||||
### Google Jobs
|
||||
|
||||
We need to install a python package.
|
||||
|
||||
```bash
|
||||
pip install google-search-results
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/google_jobs).
|
||||
|
||||
```python
|
||||
from langchain.tools.google_jobs import GoogleJobsQueryRun
|
||||
from langchain.utilities.google_finance import GoogleFinanceAPIWrapper
|
||||
```
|
||||
|
||||
### Google Lens
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/google_lens).
|
||||
|
||||
```python
|
||||
from langchain.tools.google_lens import GoogleLensQueryRun
|
||||
from langchain.utilities.google_lens import GoogleLensAPIWrapper
|
||||
```
|
||||
|
||||
### Google Scholar
|
||||
|
||||
We need to install a python package.
|
||||
|
||||
```bash
|
||||
pip install google-search-results
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/google_scholar).
|
||||
|
||||
```python
|
||||
from langchain.tools.google_scholar import GoogleScholarQueryRun
|
||||
from langchain.utilities.google_scholar import GoogleScholarAPIWrapper
|
||||
```
|
||||
|
||||
### Google Trends
|
||||
|
||||
We need to install a python package.
|
||||
|
||||
```bash
|
||||
pip install google-search-results
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/google_trends).
|
||||
|
||||
```python
|
||||
from langchain.tools.google_trends import GoogleTrendsQueryRun
|
||||
from langchain.utilities.google_trends import GoogleTrendsAPIWrapper
|
||||
```
|
||||
|
||||
## Document Transformers
|
||||
|
||||
@@ -413,6 +473,14 @@ See a [usage example and authorization instructions](/docs/integrations/tools/se
|
||||
from langchain.utilities import SerpAPIWrapper
|
||||
```
|
||||
|
||||
### Serper.dev
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/google_serper).
|
||||
|
||||
```python
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
```
|
||||
|
||||
### YouTube
|
||||
|
||||
>[YouTube Search](https://github.com/joetats/youtube_search) package searches `YouTube` videos avoiding using their heavily rate-limited API.
|
||||
|
||||
@@ -115,6 +115,17 @@ See a [usage example](/docs/integrations/text_embedding/instruct_embeddings).
|
||||
from langchain.embeddings import HuggingFaceInstructEmbeddings
|
||||
```
|
||||
|
||||
#### HuggingFaceBgeEmbeddings
|
||||
|
||||
>[BGE models on the HuggingFace](https://huggingface.co/BAAI/bge-large-en) are [the best open-source embedding models](https://huggingface.co/spaces/mteb/leaderboard).
|
||||
>BGE model is created by the [Beijing Academy of Artificial Intelligence (BAAI)](https://www.baai.ac.cn/english.html). `BAAI` is a private non-profit organization engaged in AI research and development.
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/bge_huggingface).
|
||||
|
||||
```python
|
||||
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
||||
```
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
|
||||
@@ -151,6 +151,20 @@ See a [usage example](/docs/integrations/document_loaders/microsoft_powerpoint).
|
||||
from langchain.document_loaders import UnstructuredPowerPointLoader
|
||||
```
|
||||
|
||||
### Microsoft OneNote
|
||||
|
||||
First, let's install dependencies:
|
||||
|
||||
```bash
|
||||
pip install bs4 msal
|
||||
```
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/onenote).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders.onenote import OneNoteLoader
|
||||
```
|
||||
|
||||
|
||||
## Vector stores
|
||||
|
||||
@@ -259,4 +273,25 @@ from langchain.agents.agent_toolkits import PowerBIToolkit
|
||||
from langchain.utilities.powerbi import PowerBIDataset
|
||||
```
|
||||
|
||||
## More
|
||||
|
||||
### Microsoft Presidio
|
||||
|
||||
>[Presidio](https://microsoft.github.io/presidio/) (Origin from Latin praesidium ‘protection, garrison’)
|
||||
> helps to ensure sensitive data is properly managed and governed. It provides fast identification and
|
||||
> anonymization modules for private entities in text and images such as credit card numbers, names,
|
||||
> locations, social security numbers, bitcoin wallets, US phone numbers, financial data and more.
|
||||
|
||||
First, you need to install several python packages and download a `SpaCy` model.
|
||||
|
||||
```bash
|
||||
pip install langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker
|
||||
python -m spacy download en_core_web_lg
|
||||
```
|
||||
|
||||
See [usage examples](/docs/guides/privacy/presidio_data_anonymization/).
|
||||
|
||||
```python
|
||||
from langchain_experimental.data_anonymizer import PresidioAnonymizer, PresidioReversibleAnonymizer
|
||||
```
|
||||
|
||||
|
||||
@@ -1,14 +1,23 @@
|
||||
# DataForSEO
|
||||
|
||||
>[DataForSeo](https://dataforseo.com/) provides comprehensive SEO and digital marketing data solutions via API.
|
||||
|
||||
This page provides instructions on how to use the DataForSEO search APIs within LangChain.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Get a DataForSEO API Access login and password, and set them as environment variables (`DATAFORSEO_LOGIN` and `DATAFORSEO_PASSWORD` respectively). You can find it in your dashboard.
|
||||
Get a [DataForSEO API Access login and password](https://app.dataforseo.com/register), and set them as environment variables
|
||||
(`DATAFORSEO_LOGIN` and `DATAFORSEO_PASSWORD` respectively).
|
||||
|
||||
## Wrappers
|
||||
```python
|
||||
import os
|
||||
|
||||
### Utility
|
||||
os.environ["DATAFORSEO_LOGIN"] = "your_login"
|
||||
os.environ["DATAFORSEO_PASSWORD"] = "your_password"
|
||||
```
|
||||
|
||||
|
||||
## Utility
|
||||
|
||||
The DataForSEO utility wraps the API. To import this utility, use:
|
||||
|
||||
@@ -18,7 +27,7 @@ from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
|
||||
|
||||
For a detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/dataforseo).
|
||||
|
||||
### Tool
|
||||
## Tool
|
||||
|
||||
You can also load this wrapper as a Tool to use with an Agent:
|
||||
|
||||
@@ -34,18 +43,3 @@ dataforseo = DataForSeoAPIWrapper(api_login="your_login", api_password="your_pas
|
||||
result = dataforseo.run("Bill Gates")
|
||||
print(result)
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
You can store your DataForSEO API Access login and password as environment variables. The wrapper will automatically check for these environment variables if no values are provided:
|
||||
|
||||
```python
|
||||
import os
|
||||
|
||||
os.environ["DATAFORSEO_LOGIN"] = "your_login"
|
||||
os.environ["DATAFORSEO_PASSWORD"] = "your_password"
|
||||
|
||||
dataforseo = DataForSeoAPIWrapper()
|
||||
result = dataforseo.run("weather in Los Angeles")
|
||||
print(result)
|
||||
```
|
||||
|
||||
@@ -1,49 +1,49 @@
|
||||
# PromptLayer
|
||||
|
||||
This page covers how to use [PromptLayer](https://www.promptlayer.com) within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific PromptLayer wrappers.
|
||||
>[PromptLayer](https://docs.promptlayer.com/introduction) is a platform for prompt engineering.
|
||||
> It also helps with the LLM observability to visualize requests, version prompts, and track usage.
|
||||
>
|
||||
>While `PromptLayer` does have LLMs that integrate directly with LangChain (e.g.
|
||||
> [`PromptLayerOpenAI`](https://docs.promptlayer.com/languages/langchain)),
|
||||
> using a callback is the recommended way to integrate `PromptLayer` with LangChain.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
If you want to work with PromptLayer:
|
||||
- Install the promptlayer python library `pip install promptlayer`
|
||||
- Create a PromptLayer account
|
||||
To work with `PromptLayer`, we have to:
|
||||
- Create a `PromptLayer` account
|
||||
- Create an api token and set it as an environment variable (`PROMPTLAYER_API_KEY`)
|
||||
|
||||
## Wrappers
|
||||
Install a Python package:
|
||||
|
||||
### LLM
|
||||
```bash
|
||||
pip install promptlayer
|
||||
```
|
||||
|
||||
|
||||
## Callback
|
||||
|
||||
See a [usage example](/docs/integrations/callbacks/promptlayer).
|
||||
|
||||
```python
|
||||
import promptlayer # Don't forget this import!
|
||||
from langchain.callbacks import PromptLayerCallbackHandler
|
||||
```
|
||||
|
||||
|
||||
## LLM
|
||||
|
||||
See a [usage example](/docs/integrations/llms/promptlayer_openai).
|
||||
|
||||
There exists an PromptLayer OpenAI LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import PromptLayerOpenAI
|
||||
```
|
||||
|
||||
To tag your requests, use the argument `pl_tags` when initializing the LLM
|
||||
|
||||
## Chat Models
|
||||
|
||||
See a [usage example](/docs/integrations/chat/promptlayer_chatopenai).
|
||||
|
||||
```python
|
||||
from langchain.llms import PromptLayerOpenAI
|
||||
llm = PromptLayerOpenAI(pl_tags=["langchain-requests", "chatbot"])
|
||||
from langchain.chat_models import PromptLayerChatOpenAI
|
||||
```
|
||||
|
||||
To get the PromptLayer request id, use the argument `return_pl_id` when initializing the LLM
|
||||
```python
|
||||
from langchain.llms import PromptLayerOpenAI
|
||||
llm = PromptLayerOpenAI(return_pl_id=True)
|
||||
```
|
||||
This will add the PromptLayer request ID in the `generation_info` field of the `Generation` returned when using `.generate` or `.agenerate`
|
||||
|
||||
For example:
|
||||
```python
|
||||
llm_results = llm.generate(["hello world"])
|
||||
for res in llm_results.generations:
|
||||
print("pl request id: ", res[0].generation_info["pl_request_id"])
|
||||
```
|
||||
You can use the PromptLayer request ID to add a prompt, score, or other metadata to your request. [Read more about it here](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
|
||||
|
||||
This LLM is identical to the [OpenAI](/docs/ecosystem/integrations/openai) LLM, except that
|
||||
- all your requests will be logged to your PromptLayer account
|
||||
- you can add `pl_tags` when instantiating to tag your requests on PromptLayer
|
||||
- you can add `return_pl_id` when instantiating to return a PromptLayer request id to use [while tracking requests](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
|
||||
|
||||
|
||||
PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](/docs/integrations/chat/promptlayer_chatopenai) and `PromptLayerOpenAIChat`
|
||||
|
||||
@@ -54,6 +54,15 @@
|
||||
"Also you'll need to create a [Activeloop]((https://activeloop.ai/)) account."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ORG_ID = \"...\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
@@ -160,6 +160,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create Elasticsearch connection\n",
|
||||
"from elasticsearch import Elasticsearch\n",
|
||||
"\n",
|
||||
"es_connection = Elasticsearch(\n",
|
||||
" hosts=[\"https://es_cluster_url:port\"], basic_auth=(\"user\", \"password\")\n",
|
||||
")"
|
||||
@@ -259,9 +261,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Set API key\n",
|
||||
"embaas_api_key = \"YOUR_API_KEY\"\n",
|
||||
"# or set environment variable\n",
|
||||
@@ -139,9 +141,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -131,6 +131,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if you are behind an explicit proxy, you can use the OPENAI_PROXY environment variable to pass through\n",
|
||||
"os.environ[\"OPENAI_PROXY\"] = \"http://proxy.yourcompany.com:8080\""
|
||||
]
|
||||
@@ -138,7 +140,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -152,7 +154,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -225,6 +225,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# if you are behind an explicit proxy, you can use the OPENAI_PROXY environment variable to pass through\n",
|
||||
"os.environ[\"OPENAI_PROXY\"] = \"http://proxy.yourcompany.com:8080\""
|
||||
]
|
||||
@@ -246,7 +248,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -101,8 +101,10 @@
|
||||
"# Or you can try the options below to display the image inline in this notebook\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" import google.colab\n",
|
||||
"\n",
|
||||
" IN_COLAB = True\n",
|
||||
"except:\n",
|
||||
"except ImportError:\n",
|
||||
" IN_COLAB = False\n",
|
||||
"\n",
|
||||
"if IN_COLAB:\n",
|
||||
|
||||
@@ -4,9 +4,13 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# DataForSeo\n",
|
||||
"# DataForSEO\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the `DataForSeo API` to obtain search engine results. The `DataForSeo API` retrieves `SERP` from most popular search engines like `Google`, `Bing`, `Yahoo`. It also allows to get SERPs from different search engine types like `Maps`, `News`, `Events`, etc.\n"
|
||||
">[DataForSeo](https://dataforseo.com/) provides comprehensive SEO and digital marketing data solutions via API.\n",
|
||||
">\n",
|
||||
">The `DataForSeo API` retrieves `SERP` from the most popular search engines like `Google`, `Bing`, `Yahoo`. It also allows to >get SERPs from different search engine types like `Maps`, `News`, `Events`, etc.\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use the [DataForSeo API](https://dataforseo.com/apis) to obtain search engine results. "
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -187,7 +187,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -104,6 +104,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import display\n",
|
||||
"\n",
|
||||
"display(im)"
|
||||
]
|
||||
},
|
||||
@@ -232,7 +234,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -145,6 +145,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import awadb\n",
|
||||
"\n",
|
||||
"awadb_client = awadb.Client()\n",
|
||||
"ret = awadb_client.Load(\"langchain_awadb\")\n",
|
||||
"if ret:\n",
|
||||
@@ -178,7 +180,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -78,6 +78,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
@@ -145,15 +146,22 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"version": "3.9.17"
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
@@ -161,5 +169,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> Note: \n",
|
||||
">* This feature is in Public Preview and available for evaluation purposes, to validate functionality, and to gather feedback from public preview users. It is not recommended for production deployments as we may introduce breaking changes.\n",
|
||||
">* This feature is Generally Available and ready for production deployments.\n",
|
||||
">* The langchain version 0.0.305 ([release notes](https://github.com/langchain-ai/langchain/releases/tag/v0.0.305)) introduces the support for $vectorSearch MQL stage, which is available with MongoDB Atlas 6.0.11 and 7.0.2. Users utilizing earlier versions of MongoDB Atlas need to pin their LangChain version to <=0.0.304\n",
|
||||
"> \n",
|
||||
"> "
|
||||
@@ -34,7 +34,7 @@
|
||||
"id": "1b5ce18d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In the notebook we will demonstrate how to perform `Retrieval Augmented Generation` (RAG) using MongoDB Atlas, OpenAI and Langchain. We will be performing Similarity Search and Question Answering over the PDF document for [GPT 4 technical report](https://arxiv.org/pdf/2303.08774.pdf) that came out in March 2023 and hence is not part of the OpenAI's Large Language Model(LLM)'s parametric memory, which had a knowledge cutoff of September 2021."
|
||||
"In the notebook we will demonstrate how to perform `Retrieval Augmented Generation` (RAG) using MongoDB Atlas, OpenAI and Langchain. We will be performing Similarity Search, Similarity Search with Metadata Pre-Filtering, and Question Answering over the PDF document for [GPT 4 technical report](https://arxiv.org/pdf/2303.08774.pdf) that came out in March 2023 and hence is not part of the OpenAI's Large Language Model(LLM)'s parametric memory, which had a knowledge cutoff of September 2021."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -52,6 +52,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
@@ -99,19 +102,17 @@
|
||||
"\n",
|
||||
"DB_NAME = \"langchain_db\"\n",
|
||||
"COLLECTION_NAME = \"test\"\n",
|
||||
"ATLAS_VECTOR_SEARCH_INDEX_NAME = \"default\"\n",
|
||||
"ATLAS_VECTOR_SEARCH_INDEX_NAME = \"index_name\"\n",
|
||||
"\n",
|
||||
"MONGODB_COLLECTION = client[DB_NAME][COLLECTION_NAME]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cacb61e9",
|
||||
"cell_type": "markdown",
|
||||
"id": "eb0cc10f-b84e-4e5e-b445-eb61f10bf085",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create Vector Search Index"
|
||||
"## Create Vector Search Index"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -119,21 +120,21 @@
|
||||
"id": "1f3ecc42",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's create a vector search index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-search/field-types/knn-vector) to get more details on how to define an Atlas Vector Search index.\n",
|
||||
"Now, let's create a vector search index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-vector-search/create-index/) to get more details on how to define an Atlas Vector Search index.\n",
|
||||
"You can name the index `{ATLAS_VECTOR_SEARCH_INDEX_NAME}` and create the index on the namespace `{DB_NAME}.{COLLECTION_NAME}`. Finally, write the following definition in the JSON editor on MongoDB Atlas:\n",
|
||||
"\n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"mappings\": {\n",
|
||||
" \"dynamic\": true,\n",
|
||||
" \"fields\": {\n",
|
||||
" \"embedding\": {\n",
|
||||
" \"dimensions\": 1536,\n",
|
||||
" \"similarity\": \"cosine\",\n",
|
||||
" \"type\": \"knnVector\"\n",
|
||||
" }\n",
|
||||
" \"name\": \"index_name\",\n",
|
||||
" \"type\": \"vectorSearch\",\n",
|
||||
" \"fields\":[\n",
|
||||
" {\n",
|
||||
" \"type\": \"vector\",\n",
|
||||
" \"path\": \"embedding\",\n",
|
||||
" \"numDimensions\": 1536,\n",
|
||||
" \"similarity\": \"cosine\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
@@ -252,6 +253,65 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02aef29c-5da0-41b8-b4fc-98fd71b94abf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Pre-filtering with Similarity Search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f3b2d36d-d47a-482f-999d-85c23eb67eed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Atlas Vector Search supports pre-filtering using MQL Operators for filtering. Below is an example index and query on the same data loaded above that allows you do metadata filtering on the \"page\" field. You can update your existing index with the filter defined and do pre-filtering with vector search."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b385a46-1e54-471f-95b2-202813d90bb2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"name\": \"index_name\",\n",
|
||||
" \"type\": \"vectorSearch\",\n",
|
||||
" \"fields\":[\n",
|
||||
" {\n",
|
||||
" \"type\": \"vector\",\n",
|
||||
" \"path\": \"embedding\",\n",
|
||||
" \"numDimensions\": 1536,\n",
|
||||
" \"similarity\": \"cosine\"\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"filter\",\n",
|
||||
" \"path\": \"page\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dfc8487d-14ec-42c9-9670-80fe02816196",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What were the compute requirements for training GPT 4\"\n",
|
||||
"\n",
|
||||
"results = vector_search.similarity_search_with_score(\n",
|
||||
" query=query, k=5, pre_filter={\"page\": {\"$eq\": 1}}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Display results\n",
|
||||
"for result in results:\n",
|
||||
" print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d9a2dbe",
|
||||
@@ -296,7 +356,7 @@
|
||||
"source": [
|
||||
"qa_retriever = vector_search.as_retriever(\n",
|
||||
" search_type=\"similarity\",\n",
|
||||
" search_kwargs={\"k\": 100, \"post_filter_pipeline\": [{\"$limit\": 25}]},\n",
|
||||
" search_kwargs={\"k\": 25},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -99,7 +99,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results = ndb.similarity_search(\"Who was inspired by Ada Lovelace?\")\n",
|
||||
"print(res.page_content)"
|
||||
"print(results[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -119,7 +119,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -344,7 +344,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install requests requests-aws4auth"
|
||||
"#!pip install boto3 requests requests-aws4auth"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -362,6 +362,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import boto3\n",
|
||||
"from opensearchpy import RequestsHttpConnection\n",
|
||||
"from requests_aws4auth import AWS4Auth\n",
|
||||
"\n",
|
||||
"service = \"aoss\" # must set the service as 'aoss'\n",
|
||||
@@ -404,6 +406,16 @@
|
||||
"## Using AOS (Amazon OpenSearch Service)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4b02cd8d-f182-476b-935a-737f9c05d8e4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -419,7 +431,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is just an example to show how to use AOS , you need to set proper values.\n",
|
||||
"# This is just an example to show how to use Amazon OpenSearch Service, you need to set proper values.\n",
|
||||
"import boto3\n",
|
||||
"from opensearchpy import RequestsHttpConnection\n",
|
||||
"\n",
|
||||
"service = \"es\" # must set the service as 'es'\n",
|
||||
"region = \"us-east-2\"\n",
|
||||
|
||||
@@ -13,6 +13,16 @@
|
||||
"We want it to be much more conversational."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7b9e9ef1-dc3c-4253-bd8b-5e95637bfe33",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"OPENAI_API_KEY = \"...\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -575,7 +585,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -27,6 +27,10 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains.llm import LLMChain\n",
|
||||
"from langchain.chat_models.openai import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(temperature=0)\n",
|
||||
"prompt_template = \"Tell me a {adjective} joke\"\n",
|
||||
"llm_chain = LLMChain(llm=chat, prompt=PromptTemplate.from_template(prompt_template))\n",
|
||||
@@ -174,7 +178,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.retrievers import BM25Retriever, EnsembleRetriever\n",
|
||||
"from langchain.vectorstores import FAISS"
|
||||
]
|
||||
@@ -81,7 +82,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -95,10 +96,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.8"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain.storage import InMemoryByteStore\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Chroma"
|
||||
]
|
||||
@@ -55,8 +55,8 @@
|
||||
" TextLoader(\"../../state_of_the_union.txt\"),\n",
|
||||
"]\n",
|
||||
"docs = []\n",
|
||||
"for l in loaders:\n",
|
||||
" docs.extend(l.load())\n",
|
||||
"for loader in loaders:\n",
|
||||
" docs.extend(loader.load())\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)\n",
|
||||
"docs = text_splitter.split_documents(docs)"
|
||||
]
|
||||
@@ -83,12 +83,12 @@
|
||||
" collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"# The storage layer for the parent documents\n",
|
||||
"store = InMemoryStore()\n",
|
||||
"store = InMemoryByteStore()\n",
|
||||
"id_key = \"doc_id\"\n",
|
||||
"# The retriever (empty to start)\n",
|
||||
"retriever = MultiVectorRetriever(\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
" base_store=store,\n",
|
||||
" byte_store=store,\n",
|
||||
" id_key=id_key,\n",
|
||||
")\n",
|
||||
"import uuid\n",
|
||||
@@ -143,7 +143,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '80a5dccb-606f-437a-927a-54090fb0247d', 'source': '../../state_of_the_union.txt'})"
|
||||
"Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '3f826cfe-78bd-468d-adb8-f5c2719255df', 'source': '../../state_of_the_union.txt'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -271,12 +271,12 @@
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",
|
||||
"# The storage layer for the parent documents\n",
|
||||
"store = InMemoryStore()\n",
|
||||
"store = InMemoryByteStore()\n",
|
||||
"id_key = \"doc_id\"\n",
|
||||
"# The retriever (empty to start)\n",
|
||||
"retriever = MultiVectorRetriever(\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
" base_store=store,\n",
|
||||
" byte_store=store,\n",
|
||||
" id_key=id_key,\n",
|
||||
")\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
|
||||
@@ -338,7 +338,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content=\"The document summarizes President Biden's State of the Union address. It highlights his nominations for the Supreme Court, his plans for border security and immigration reform, his commitment to protecting women's rights and LGBTQ+ rights, his bipartisan achievements, and his agenda for addressing the opioid epidemic, mental health, supporting veterans, and ending cancer. The document concludes with a message of optimism and unity for the American people.\", metadata={'doc_id': 'aa42f0b8-5119-44f9-808d-58c2b6b76e7b'})"
|
||||
"Document(page_content=\"The document is a speech given by the President of the United States, highlighting various issues and priorities. The President discusses the nomination of Judge Ketanji Brown Jackson for the Supreme Court and emphasizes the importance of securing the border and fixing the immigration system. The President also mentions the need to protect women's rights, support LGBTQ+ Americans, pass the Equality Act, and sign bipartisan bills into law. Additionally, the President addresses the opioid epidemic, mental health, support for veterans, and the fight against cancer. The speech concludes with a message of unity and optimism for the future of the United States.\", metadata={'doc_id': '1f0bb74d-4878-43ae-9a5d-4c63fb308ca1'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
@@ -447,9 +447,9 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[\"What was the author's initial reaction to the use of the IBM 1401 during his school years?\",\n",
|
||||
" \"How did the author's interest in AI originate and evolve over time?\",\n",
|
||||
" 'What led the author to switch his focus from AI to Lisp in grad school?']"
|
||||
"[\"What was the author's initial career choice before deciding to switch to AI?\",\n",
|
||||
" 'Why did the author become disillusioned with AI during his first year of grad school?',\n",
|
||||
" 'What realization did the author have when visiting the Carnegie Institute?']"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
@@ -483,12 +483,12 @@
|
||||
" collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"# The storage layer for the parent documents\n",
|
||||
"store = InMemoryStore()\n",
|
||||
"store = InMemoryByteStore()\n",
|
||||
"id_key = \"doc_id\"\n",
|
||||
"# The retriever (empty to start)\n",
|
||||
"retriever = MultiVectorRetriever(\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
" base_store=store,\n",
|
||||
" byte_store=store,\n",
|
||||
" id_key=id_key,\n",
|
||||
")\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
|
||||
@@ -538,10 +538,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content=\"How did Robert's advice influence the narrator's decision to step down from Y Combinator?\", metadata={'doc_id': 'ea931756-68b8-4cd1-8752-e98d7e3c499f'}),\n",
|
||||
" Document(page_content='What factors led to the decision of handing over the leadership of Y Combinator to someone else?', metadata={'doc_id': 'ea931756-68b8-4cd1-8752-e98d7e3c499f'}),\n",
|
||||
" Document(page_content=\"How does the Bipartisan Infrastructure Law aim to transform America's economic competitiveness in the 21st Century?\", metadata={'doc_id': '63d98582-bd93-4818-b729-e0933d3d4cde'}),\n",
|
||||
" Document(page_content='What measures have been taken to secure the border and fix the immigration system?', metadata={'doc_id': '3d2b150f-dcd3-4277-8734-0a15888fdae4'})]"
|
||||
"[Document(page_content='Who is the nominee for the United States Supreme Court, and what is their background?', metadata={'doc_id': 'd4a82bd9-9001-4bd7-bff1-d8ba2dca9692'}),\n",
|
||||
" Document(page_content='Why did Robert Morris suggest the narrator to quit Y Combinator?', metadata={'doc_id': 'aba9b00d-860b-4b93-8e80-87dc08fa461d'}),\n",
|
||||
" Document(page_content='What events led to the narrator deciding to hand over Y Combinator to someone else?', metadata={'doc_id': 'aba9b00d-860b-4b93-8e80-87dc08fa461d'}),\n",
|
||||
" Document(page_content=\"How does the Bipartisan Infrastructure Law aim to improve America's infrastructure?\", metadata={'doc_id': '822c2ba8-0abe-4f28-a72e-7eb8f477cc3d'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
@@ -572,7 +572,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"9844"
|
||||
"9194"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
@@ -601,7 +601,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -61,8 +61,8 @@
|
||||
" TextLoader(\"../../state_of_the_union.txt\"),\n",
|
||||
"]\n",
|
||||
"docs = []\n",
|
||||
"for l in loaders:\n",
|
||||
" docs.extend(l.load())"
|
||||
"for loader in loaders:\n",
|
||||
" docs.extend(loader.load())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -432,7 +432,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -5,11 +5,33 @@
|
||||
"id": "a6850189",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# NetworkX Graph QA\n",
|
||||
"# NetworkX\n",
|
||||
"\n",
|
||||
">[NetworkX](https://networkx.org/) is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to do question answering over a graph data structure."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f96662d5-1b68-4b38-9da8-56bf3463b138",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up\n",
|
||||
"\n",
|
||||
"We have to install a Python package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "21bd86df-9717-4a27-9233-1404c89cf442",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install networkx"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9e516e3e",
|
||||
@@ -295,7 +317,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -15,6 +15,13 @@ tests:
|
||||
test_watch:
|
||||
poetry run ptw --snapshot-update --now . -- -vv -x tests/unit_tests
|
||||
|
||||
check_imports: langchain_core/**/*.py
|
||||
for f in $^ ; do \
|
||||
python -c "from importlib.machinery import SourceFileLoader; SourceFileLoader('x', '$$f').load_module()" || exit 1; \
|
||||
done
|
||||
extended_tests:
|
||||
poetry run pytest --only-extended $(TEST_FILE)
|
||||
|
||||
|
||||
######################
|
||||
# LINTING AND FORMATTING
|
||||
@@ -24,10 +31,12 @@ test_watch:
|
||||
PYTHON_FILES=.
|
||||
lint format: PYTHON_FILES=.
|
||||
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/experimental --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
|
||||
lint_package: PYTHON_FILES=langchain_core
|
||||
lint_tests: PYTHON_FILES=tests
|
||||
|
||||
lint lint_diff:
|
||||
lint lint_diff lint_package lint_tests:
|
||||
./scripts/check_pydantic.sh .
|
||||
./scripts/check_imports.sh
|
||||
./scripts/lint_imports.sh
|
||||
poetry run ruff .
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff --select I $(PYTHON_FILES)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Literal, Sequence, Union
|
||||
from typing import Any, List, Literal, Sequence, Union
|
||||
|
||||
from langchain_core.load.serializable import Serializable
|
||||
from langchain_core.messages import (
|
||||
@@ -40,6 +40,11 @@ class AgentAction(Serializable):
|
||||
"""Return whether or not the class is serializable."""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "agent"]
|
||||
|
||||
@property
|
||||
def messages(self) -> Sequence[BaseMessage]:
|
||||
"""Return the messages that correspond to this action."""
|
||||
@@ -98,6 +103,11 @@ class AgentFinish(Serializable):
|
||||
"""Return whether or not the class is serializable."""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "agent"]
|
||||
|
||||
@property
|
||||
def messages(self) -> Sequence[BaseMessage]:
|
||||
"""Return the messages that correspond to this observation."""
|
||||
@@ -105,7 +115,7 @@ class AgentFinish(Serializable):
|
||||
|
||||
|
||||
def _convert_agent_action_to_messages(
|
||||
agent_action: AgentAction
|
||||
agent_action: AgentAction,
|
||||
) -> Sequence[BaseMessage]:
|
||||
"""Convert an agent action to a message.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Literal
|
||||
from typing import List, Literal
|
||||
|
||||
from langchain_core.load.serializable import Serializable
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
@@ -21,3 +21,8 @@ class Document(Serializable):
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
"""Return whether this class is serializable."""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "document"]
|
||||
|
||||
@@ -3,6 +3,7 @@ import json
|
||||
import os
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain_core.load.mapping import SERIALIZABLE_MAPPING
|
||||
from langchain_core.load.serializable import Serializable
|
||||
|
||||
DEFAULT_NAMESPACES = ["langchain", "langchain_core"]
|
||||
@@ -62,8 +63,21 @@ class Reviver:
|
||||
if len(namespace) == 1 and namespace[0] == "langchain":
|
||||
raise ValueError(f"Invalid namespace: {value}")
|
||||
|
||||
mod = importlib.import_module(".".join(namespace))
|
||||
cls = getattr(mod, name)
|
||||
# Get the importable path
|
||||
key = tuple(namespace + [name])
|
||||
if key not in SERIALIZABLE_MAPPING:
|
||||
raise ValueError(
|
||||
"Trying to deserialize something that cannot "
|
||||
"be deserialized in current version of langchain-core: "
|
||||
f"{key}"
|
||||
)
|
||||
import_path = SERIALIZABLE_MAPPING[key]
|
||||
# Split into module and name
|
||||
import_dir, import_obj = import_path[:-1], import_path[-1]
|
||||
# Import module
|
||||
mod = importlib.import_module(".".join(import_dir))
|
||||
# Import class
|
||||
cls = getattr(mod, import_obj)
|
||||
|
||||
# The class must be a subclass of Serializable.
|
||||
if not issubclass(cls, Serializable):
|
||||
|
||||
478
libs/core/langchain_core/load/mapping.py
Normal file
478
libs/core/langchain_core/load/mapping.py
Normal file
@@ -0,0 +1,478 @@
|
||||
# First value is the value that it is serialized as
|
||||
# Second value is the path to load it from
|
||||
SERIALIZABLE_MAPPING = {
|
||||
("langchain", "schema", "messages", "AIMessage"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"ai",
|
||||
"AIMessage",
|
||||
),
|
||||
("langchain", "schema", "messages", "AIMessageChunk"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"ai",
|
||||
"AIMessageChunk",
|
||||
),
|
||||
("langchain", "schema", "messages", "BaseMessage"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"base",
|
||||
"BaseMessage",
|
||||
),
|
||||
("langchain", "schema", "messages", "BaseMessageChunk"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"base",
|
||||
"BaseMessageChunk",
|
||||
),
|
||||
("langchain", "schema", "messages", "ChatMessage"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"chat",
|
||||
"ChatMessage",
|
||||
),
|
||||
("langchain", "schema", "messages", "FunctionMessage"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"function",
|
||||
"FunctionMessage",
|
||||
),
|
||||
("langchain", "schema", "messages", "HumanMessage"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"human",
|
||||
"HumanMessage",
|
||||
),
|
||||
("langchain", "schema", "messages", "SystemMessage"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"system",
|
||||
"SystemMessage",
|
||||
),
|
||||
("langchain", "schema", "messages", "ToolMessage"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"tool",
|
||||
"ToolMessage",
|
||||
),
|
||||
("langchain", "schema", "agent", "AgentAction"): (
|
||||
"langchain_core",
|
||||
"agents",
|
||||
"AgentAction",
|
||||
),
|
||||
("langchain", "schema", "agent", "AgentFinish"): (
|
||||
"langchain_core",
|
||||
"agents",
|
||||
"AgentFinish",
|
||||
),
|
||||
("langchain", "schema", "prompt_template", "BasePromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"base",
|
||||
"BasePromptTemplate",
|
||||
),
|
||||
("langchain", "chains", "llm", "LLMChain"): (
|
||||
"langchain",
|
||||
"chains",
|
||||
"llm",
|
||||
"LLMChain",
|
||||
),
|
||||
("langchain", "prompts", "prompt", "PromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"prompt",
|
||||
"PromptTemplate",
|
||||
),
|
||||
("langchain", "prompts", "chat", "MessagesPlaceholder"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"chat",
|
||||
"MessagesPlaceholder",
|
||||
),
|
||||
("langchain", "llms", "openai", "OpenAI"): (
|
||||
"langchain",
|
||||
"llms",
|
||||
"openai",
|
||||
"OpenAI",
|
||||
),
|
||||
("langchain", "prompts", "chat", "ChatPromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"chat",
|
||||
"ChatPromptTemplate",
|
||||
),
|
||||
("langchain", "prompts", "chat", "HumanMessagePromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"chat",
|
||||
"HumanMessagePromptTemplate",
|
||||
),
|
||||
("langchain", "prompts", "chat", "SystemMessagePromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"chat",
|
||||
"SystemMessagePromptTemplate",
|
||||
),
|
||||
("langchain", "schema", "agent", "AgentActionMessageLog"): (
|
||||
"langchain_core",
|
||||
"agents",
|
||||
"AgentActionMessageLog",
|
||||
),
|
||||
("langchain", "schema", "agent", "OpenAIToolAgentAction"): (
|
||||
"langchain",
|
||||
"agents",
|
||||
"output_parsers",
|
||||
"openai_tools",
|
||||
"OpenAIToolAgentAction",
|
||||
),
|
||||
("langchain", "prompts", "chat", "BaseMessagePromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"chat",
|
||||
"BaseMessagePromptTemplate",
|
||||
),
|
||||
("langchain", "schema", "output", "ChatGeneration"): (
|
||||
"langchain_core",
|
||||
"outputs",
|
||||
"chat_generation",
|
||||
"ChatGeneration",
|
||||
),
|
||||
("langchain", "schema", "output", "Generation"): (
|
||||
"langchain_core",
|
||||
"outputs",
|
||||
"generation",
|
||||
"Generation",
|
||||
),
|
||||
("langchain", "schema", "document", "Document"): (
|
||||
"langchain_core",
|
||||
"documents",
|
||||
"base",
|
||||
"Document",
|
||||
),
|
||||
("langchain", "output_parsers", "fix", "OutputFixingParser"): (
|
||||
"langchain",
|
||||
"output_parsers",
|
||||
"fix",
|
||||
"OutputFixingParser",
|
||||
),
|
||||
("langchain", "prompts", "chat", "AIMessagePromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"chat",
|
||||
"AIMessagePromptTemplate",
|
||||
),
|
||||
("langchain", "output_parsers", "regex", "RegexParser"): (
|
||||
"langchain",
|
||||
"output_parsers",
|
||||
"regex",
|
||||
"RegexParser",
|
||||
),
|
||||
("langchain", "schema", "runnable", "DynamicRunnable"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"configurable",
|
||||
"DynamicRunnable",
|
||||
),
|
||||
("langchain", "schema", "prompt", "PromptValue"): (
|
||||
"langchain_core",
|
||||
"prompt_values",
|
||||
"PromptValue",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableBinding"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"base",
|
||||
"RunnableBinding",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableBranch"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"branch",
|
||||
"RunnableBranch",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableWithFallbacks"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"fallbacks",
|
||||
"RunnableWithFallbacks",
|
||||
),
|
||||
("langchain", "schema", "output_parser", "StrOutputParser"): (
|
||||
"langchain_core",
|
||||
"output_parsers",
|
||||
"string",
|
||||
"StrOutputParser",
|
||||
),
|
||||
("langchain", "chat_models", "openai", "ChatOpenAI"): (
|
||||
"langchain",
|
||||
"chat_models",
|
||||
"openai",
|
||||
"ChatOpenAI",
|
||||
),
|
||||
("langchain", "output_parsers", "list", "CommaSeparatedListOutputParser"): (
|
||||
"langchain_core",
|
||||
"output_parsers",
|
||||
"list",
|
||||
"CommaSeparatedListOutputParser",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableParallel"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"base",
|
||||
"RunnableParallel",
|
||||
),
|
||||
("langchain", "chat_models", "azure_openai", "AzureChatOpenAI"): (
|
||||
"langchain",
|
||||
"chat_models",
|
||||
"azure_openai",
|
||||
"AzureChatOpenAI",
|
||||
),
|
||||
("langchain", "chat_models", "bedrock", "BedrockChat"): (
|
||||
"langchain",
|
||||
"chat_models",
|
||||
"bedrock",
|
||||
"BedrockChat",
|
||||
),
|
||||
("langchain", "chat_models", "anthropic", "ChatAnthropic"): (
|
||||
"langchain",
|
||||
"chat_models",
|
||||
"anthropic",
|
||||
"ChatAnthropic",
|
||||
),
|
||||
("langchain", "chat_models", "fireworks", "ChatFireworks"): (
|
||||
"langchain",
|
||||
"chat_models",
|
||||
"fireworks",
|
||||
"ChatFireworks",
|
||||
),
|
||||
("langchain", "chat_models", "google_palm", "ChatGooglePalm"): (
|
||||
"langchain",
|
||||
"chat_models",
|
||||
"google_palm",
|
||||
"ChatGooglePalm",
|
||||
),
|
||||
("langchain", "chat_models", "vertexai", "ChatVertexAI"): (
|
||||
"langchain",
|
||||
"chat_models",
|
||||
"vertexai",
|
||||
"ChatVertexAI",
|
||||
),
|
||||
("langchain", "schema", "output", "ChatGenerationChunk"): (
|
||||
"langchain_core",
|
||||
"outputs",
|
||||
"chat_generation",
|
||||
"ChatGenerationChunk",
|
||||
),
|
||||
("langchain", "schema", "messages", "ChatMessageChunk"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"chat",
|
||||
"ChatMessageChunk",
|
||||
),
|
||||
("langchain", "schema", "messages", "HumanMessageChunk"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"human",
|
||||
"HumanMessageChunk",
|
||||
),
|
||||
("langchain", "schema", "messages", "FunctionMessageChunk"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"function",
|
||||
"FunctionMessageChunk",
|
||||
),
|
||||
("langchain", "schema", "messages", "SystemMessageChunk"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"system",
|
||||
"SystemMessageChunk",
|
||||
),
|
||||
("langchain", "schema", "messages", "ToolMessageChunk"): (
|
||||
"langchain_core",
|
||||
"messages",
|
||||
"tool",
|
||||
"ToolMessageChunk",
|
||||
),
|
||||
("langchain", "schema", "output", "GenerationChunk"): (
|
||||
"langchain_core",
|
||||
"outputs",
|
||||
"generation",
|
||||
"GenerationChunk",
|
||||
),
|
||||
("langchain", "llms", "openai", "BaseOpenAI"): (
|
||||
"langchain",
|
||||
"llms",
|
||||
"openai",
|
||||
"BaseOpenAI",
|
||||
),
|
||||
("langchain", "llms", "bedrock", "Bedrock"): (
|
||||
"langchain",
|
||||
"llms",
|
||||
"bedrock",
|
||||
"Bedrock",
|
||||
),
|
||||
("langchain", "llms", "fireworks", "Fireworks"): (
|
||||
"langchain",
|
||||
"llms",
|
||||
"fireworks",
|
||||
"Fireworks",
|
||||
),
|
||||
("langchain", "llms", "google_palm", "GooglePalm"): (
|
||||
"langchain",
|
||||
"llms",
|
||||
"google_palm",
|
||||
"GooglePalm",
|
||||
),
|
||||
("langchain", "llms", "openai", "AzureOpenAI"): (
|
||||
"langchain",
|
||||
"llms",
|
||||
"openai",
|
||||
"AzureOpenAI",
|
||||
),
|
||||
("langchain", "llms", "replicate", "Replicate"): (
|
||||
"langchain",
|
||||
"llms",
|
||||
"replicate",
|
||||
"Replicate",
|
||||
),
|
||||
("langchain", "llms", "vertexai", "VertexAI"): (
|
||||
"langchain",
|
||||
"llms",
|
||||
"vertexai",
|
||||
"VertexAI",
|
||||
),
|
||||
("langchain", "output_parsers", "combining", "CombiningOutputParser"): (
|
||||
"langchain",
|
||||
"output_parsers",
|
||||
"combining",
|
||||
"CombiningOutputParser",
|
||||
),
|
||||
("langchain", "schema", "prompt_template", "BaseChatPromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"chat",
|
||||
"BaseChatPromptTemplate",
|
||||
),
|
||||
("langchain", "prompts", "chat", "ChatMessagePromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"chat",
|
||||
"ChatMessagePromptTemplate",
|
||||
),
|
||||
("langchain", "prompts", "few_shot_with_templates", "FewShotPromptWithTemplates"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"few_shot_with_templates",
|
||||
"FewShotPromptWithTemplates",
|
||||
),
|
||||
("langchain", "prompts", "pipeline", "PipelinePromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"pipeline",
|
||||
"PipelinePromptTemplate",
|
||||
),
|
||||
("langchain", "prompts", "base", "StringPromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"string",
|
||||
"StringPromptTemplate",
|
||||
),
|
||||
("langchain", "prompts", "base", "StringPromptValue"): (
|
||||
"langchain_core",
|
||||
"prompt_values",
|
||||
"StringPromptValue",
|
||||
),
|
||||
("langchain", "prompts", "chat", "BaseStringMessagePromptTemplate"): (
|
||||
"langchain_core",
|
||||
"prompts",
|
||||
"chat",
|
||||
"BaseStringMessagePromptTemplate",
|
||||
),
|
||||
("langchain", "prompts", "chat", "ChatPromptValue"): (
|
||||
"langchain_core",
|
||||
"prompt_values",
|
||||
"ChatPromptValue",
|
||||
),
|
||||
("langchain", "prompts", "chat", "ChatPromptValueConcrete"): (
|
||||
"langchain_core",
|
||||
"prompt_values",
|
||||
"ChatPromptValueConcrete",
|
||||
),
|
||||
("langchain", "schema", "runnable", "HubRunnable"): (
|
||||
"langchain",
|
||||
"runnables",
|
||||
"hub",
|
||||
"HubRunnable",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableBindingBase"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"base",
|
||||
"RunnableBindingBase",
|
||||
),
|
||||
("langchain", "schema", "runnable", "OpenAIFunctionsRouter"): (
|
||||
"langchain",
|
||||
"runnables",
|
||||
"openai_functions",
|
||||
"OpenAIFunctionsRouter",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RouterRunnable"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"router",
|
||||
"RouterRunnable",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnablePassthrough"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"passthrough",
|
||||
"RunnablePassthrough",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableSequence"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"base",
|
||||
"RunnableSequence",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableEach"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"base",
|
||||
"RunnableEach",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableEachBase"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"base",
|
||||
"RunnableEachBase",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableConfigurableAlternatives"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"configurable",
|
||||
"RunnableConfigurableAlternatives",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableConfigurableFields"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"configurable",
|
||||
"RunnableConfigurableFields",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableWithMessageHistory"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"history",
|
||||
"RunnableWithMessageHistory",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableAssign"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"passthrough",
|
||||
"RunnableAssign",
|
||||
),
|
||||
("langchain", "schema", "runnable", "RunnableRetry"): (
|
||||
"langchain_core",
|
||||
"runnables",
|
||||
"retry",
|
||||
"RunnableRetry",
|
||||
),
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Literal
|
||||
from typing import Any, List, Literal
|
||||
|
||||
from langchain_core.messages.base import (
|
||||
BaseMessage,
|
||||
@@ -17,6 +17,11 @@ class AIMessage(BaseMessage):
|
||||
|
||||
type: Literal["ai"] = "ai"
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
|
||||
AIMessage.update_forward_refs()
|
||||
|
||||
@@ -29,6 +34,11 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
# non-chunk variant.
|
||||
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] # noqa: E501
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
|
||||
if isinstance(other, AIMessageChunk):
|
||||
if self.example != other.example:
|
||||
|
||||
@@ -31,6 +31,11 @@ class BaseMessage(Serializable):
|
||||
"""Return whether this class is serializable."""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
def __add__(self, other: Any) -> ChatPromptTemplate:
|
||||
from langchain_core.prompts.chat import ChatPromptTemplate
|
||||
|
||||
@@ -68,6 +73,11 @@ def merge_content(
|
||||
class BaseMessageChunk(BaseMessage):
|
||||
"""A Message chunk, which can be concatenated with other Message chunks."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
def _merge_kwargs_dict(
|
||||
self, left: Dict[str, Any], right: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Literal
|
||||
from typing import Any, List, Literal
|
||||
|
||||
from langchain_core.messages.base import (
|
||||
BaseMessage,
|
||||
@@ -15,6 +15,11 @@ class ChatMessage(BaseMessage):
|
||||
|
||||
type: Literal["chat"] = "chat"
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
|
||||
ChatMessage.update_forward_refs()
|
||||
|
||||
@@ -27,6 +32,11 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
||||
# non-chunk variant.
|
||||
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
|
||||
if isinstance(other, ChatMessageChunk):
|
||||
if self.role != other.role:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Literal
|
||||
from typing import Any, List, Literal
|
||||
|
||||
from langchain_core.messages.base import (
|
||||
BaseMessage,
|
||||
@@ -15,6 +15,11 @@ class FunctionMessage(BaseMessage):
|
||||
|
||||
type: Literal["function"] = "function"
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
|
||||
FunctionMessage.update_forward_refs()
|
||||
|
||||
@@ -27,6 +32,11 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
||||
# non-chunk variant.
|
||||
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
|
||||
if isinstance(other, FunctionMessageChunk):
|
||||
if self.name != other.name:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Literal
|
||||
from typing import List, Literal
|
||||
|
||||
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
||||
|
||||
@@ -13,6 +13,11 @@ class HumanMessage(BaseMessage):
|
||||
|
||||
type: Literal["human"] = "human"
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
|
||||
HumanMessage.update_forward_refs()
|
||||
|
||||
@@ -24,3 +29,8 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] # noqa: E501
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Literal
|
||||
from typing import List, Literal
|
||||
|
||||
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
||||
|
||||
@@ -10,6 +10,11 @@ class SystemMessage(BaseMessage):
|
||||
|
||||
type: Literal["system"] = "system"
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
|
||||
SystemMessage.update_forward_refs()
|
||||
|
||||
@@ -21,3 +26,8 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
|
||||
# to make sure that the chunk variant can be discriminated from the
|
||||
# non-chunk variant.
|
||||
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] # noqa: E501
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Literal
|
||||
from typing import Any, List, Literal
|
||||
|
||||
from langchain_core.messages.base import (
|
||||
BaseMessage,
|
||||
@@ -15,6 +15,11 @@ class ToolMessage(BaseMessage):
|
||||
|
||||
type: Literal["tool"] = "tool"
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
|
||||
ToolMessage.update_forward_refs()
|
||||
|
||||
@@ -27,6 +32,11 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
|
||||
# non-chunk variant.
|
||||
type: Literal["ToolMessageChunk"] = "ToolMessageChunk" # type: ignore[assignment]
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "messages"]
|
||||
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
|
||||
if isinstance(other, ToolMessageChunk):
|
||||
if self.tool_call_id != other.tool_call_id:
|
||||
|
||||
@@ -26,6 +26,11 @@ class CommaSeparatedListOutputParser(ListOutputParser):
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "output_parsers", "list"]
|
||||
|
||||
def get_format_instructions(self) -> str:
|
||||
return (
|
||||
"Your response should be a list of comma separated values, "
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from typing import List
|
||||
|
||||
from langchain_core.output_parsers.transform import BaseTransformOutputParser
|
||||
|
||||
|
||||
@@ -9,6 +11,11 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
||||
"""Return whether this class is serializable."""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "output_parser"]
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
"""Return the output parser type for serialization."""
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, Literal
|
||||
from typing import Any, Dict, List, Literal
|
||||
|
||||
from langchain_core.messages import BaseMessage, BaseMessageChunk
|
||||
from langchain_core.outputs.generation import Generation
|
||||
@@ -27,6 +27,11 @@ class ChatGeneration(Generation):
|
||||
raise ValueError("Error while initializing ChatGeneration") from e
|
||||
return values
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "output"]
|
||||
|
||||
|
||||
class ChatGenerationChunk(ChatGeneration):
|
||||
"""A ChatGeneration chunk, which can be concatenated with other
|
||||
@@ -41,6 +46,11 @@ class ChatGenerationChunk(ChatGeneration):
|
||||
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment] # noqa: E501
|
||||
"""Type is used exclusively for serialization purposes."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "output"]
|
||||
|
||||
def __add__(self, other: ChatGenerationChunk) -> ChatGenerationChunk:
|
||||
if isinstance(other, ChatGenerationChunk):
|
||||
generation_info = (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, Literal, Optional
|
||||
from typing import Any, Dict, List, Literal, Optional
|
||||
|
||||
from langchain_core.load import Serializable
|
||||
|
||||
@@ -24,10 +24,20 @@ class Generation(Serializable):
|
||||
"""Return whether this class is serializable."""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "output"]
|
||||
|
||||
|
||||
class GenerationChunk(Generation):
|
||||
"""A Generation chunk, which can be concatenated with other Generation chunks."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "output"]
|
||||
|
||||
def __add__(self, other: GenerationChunk) -> GenerationChunk:
|
||||
if isinstance(other, GenerationChunk):
|
||||
generation_info = (
|
||||
|
||||
@@ -24,6 +24,11 @@ class PromptValue(Serializable, ABC):
|
||||
"""Return whether this class is serializable."""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "prompt"]
|
||||
|
||||
@abstractmethod
|
||||
def to_string(self) -> str:
|
||||
"""Return prompt value as string."""
|
||||
@@ -40,6 +45,11 @@ class StringPromptValue(PromptValue):
|
||||
"""Prompt text."""
|
||||
type: Literal["StringPromptValue"] = "StringPromptValue"
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "base"]
|
||||
|
||||
def to_string(self) -> str:
|
||||
"""Return prompt as string."""
|
||||
return self.text
|
||||
@@ -66,6 +76,11 @@ class ChatPromptValue(PromptValue):
|
||||
"""Return prompt as a list of messages."""
|
||||
return list(self.messages)
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
|
||||
class ChatPromptValueConcrete(ChatPromptValue):
|
||||
"""Chat prompt value which explicitly lists out the message types it accepts.
|
||||
@@ -74,3 +89,8 @@ class ChatPromptValueConcrete(ChatPromptValue):
|
||||
messages: Sequence[AnyMessage]
|
||||
|
||||
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
@@ -44,6 +44,11 @@ class BasePromptTemplate(RunnableSerializable[Dict, PromptValue], ABC):
|
||||
default_factory=dict
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "prompt_template"]
|
||||
|
||||
@classmethod
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
"""Return whether this class is serializable."""
|
||||
|
||||
@@ -43,6 +43,11 @@ class BaseMessagePromptTemplate(Serializable, ABC):
|
||||
"""Return whether or not the class is serializable."""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
@abstractmethod
|
||||
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
|
||||
"""Format messages from kwargs. Should return a list of BaseMessages.
|
||||
@@ -82,6 +87,11 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
|
||||
variable_name: str
|
||||
"""Name of variable to use as messages."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
def __init__(self, variable_name: str, **kwargs: Any):
|
||||
return super().__init__(variable_name=variable_name, **kwargs)
|
||||
|
||||
@@ -132,6 +142,11 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
||||
additional_kwargs: dict = Field(default_factory=dict)
|
||||
"""Additional keyword arguments to pass to the prompt template."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
@classmethod
|
||||
def from_template(
|
||||
cls: Type[MessagePromptTemplateT],
|
||||
@@ -221,6 +236,11 @@ class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
role: str
|
||||
"""Role of the message."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
def format(self, **kwargs: Any) -> BaseMessage:
|
||||
"""Format the prompt template.
|
||||
|
||||
@@ -239,6 +259,11 @@ class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
"""Human message prompt template. This is a message sent from the user."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
def format(self, **kwargs: Any) -> BaseMessage:
|
||||
"""Format the prompt template.
|
||||
|
||||
@@ -255,6 +280,11 @@ class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
class AIMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
"""AI message prompt template. This is a message sent from the AI."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
def format(self, **kwargs: Any) -> BaseMessage:
|
||||
"""Format the prompt template.
|
||||
|
||||
@@ -273,6 +303,11 @@ class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
This is a message that is not sent to the user.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
def format(self, **kwargs: Any) -> BaseMessage:
|
||||
"""Format the prompt template.
|
||||
|
||||
@@ -368,6 +403,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
||||
validate_template: bool = False
|
||||
"""Whether or not to try validating the template."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "chat"]
|
||||
|
||||
def __add__(self, other: Any) -> ChatPromptTemplate:
|
||||
"""Combine two prompt templates.
|
||||
|
||||
|
||||
@@ -42,6 +42,11 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
|
||||
validate_template: bool = False
|
||||
"""Whether or not to try validating the template."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "few_shot_with_templates"]
|
||||
|
||||
@root_validator(pre=True)
|
||||
def check_examples_and_selector(cls, values: Dict) -> Dict:
|
||||
"""Check that one and only one of examples/example_selector are provided."""
|
||||
|
||||
@@ -28,6 +28,11 @@ class PipelinePromptTemplate(BasePromptTemplate):
|
||||
pipeline_prompts: List[Tuple[str, BasePromptTemplate]]
|
||||
"""A list of tuples, consisting of a string (`name`) and a Prompt Template."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "pipeline"]
|
||||
|
||||
@root_validator(pre=True)
|
||||
def get_input_variables(cls, values: Dict) -> Dict:
|
||||
"""Get input variables."""
|
||||
|
||||
@@ -54,6 +54,11 @@ class PromptTemplate(StringPromptTemplate):
|
||||
"template_format": self.template_format,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "prompt"]
|
||||
|
||||
input_variables: List[str]
|
||||
"""A list of the names of the variables the prompt template expects."""
|
||||
|
||||
|
||||
@@ -151,6 +151,11 @@ def get_template_variables(template: str, template_format: str) -> List[str]:
|
||||
class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""String prompt that exposes the format method, returning a prompt."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "prompts", "base"]
|
||||
|
||||
def format_prompt(self, **kwargs: Any) -> PromptValue:
|
||||
"""Create Chat Messages."""
|
||||
return StringPromptValue(text=self.format(**kwargs))
|
||||
|
||||
@@ -1349,6 +1349,11 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
last: Runnable[Any, Output]
|
||||
"""The last runnable in the sequence."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@property
|
||||
def steps(self) -> List[Runnable[Any, Any]]:
|
||||
"""All the runnables that make up the sequence in order."""
|
||||
@@ -1358,10 +1363,6 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
return cls.__module__.split(".")[:-1]
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@@ -1939,7 +1940,8 @@ class RunnableParallel(RunnableSerializable[Input, Dict[str, Any]]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
return cls.__module__.split(".")[:-1]
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
@@ -2705,7 +2707,8 @@ class RunnableEachBase(RunnableSerializable[List[Input], List[Output]]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
return cls.__module__.split(".")[:-1]
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
def _invoke(
|
||||
self,
|
||||
@@ -2746,6 +2749,11 @@ class RunnableEach(RunnableEachBase[Input, Output]):
|
||||
with each element of the input sequence.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
def bind(self, **kwargs: Any) -> RunnableEach[Input, Output]:
|
||||
return RunnableEach(bound=self.bound.bind(**kwargs))
|
||||
|
||||
@@ -2910,7 +2918,8 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
return cls.__module__.split(".")[:-1]
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig:
|
||||
config = merge_configs(self.config, *configs)
|
||||
@@ -3086,6 +3095,11 @@ class RunnableBinding(RunnableBindingBase[Input, Output]):
|
||||
runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
def bind(self, **kwargs: Any) -> Runnable[Input, Output]:
|
||||
"""Bind additional kwargs to a Runnable, returning a new Runnable.
|
||||
|
||||
|
||||
@@ -132,8 +132,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""The namespace of a RunnableBranch is the namespace of its default branch."""
|
||||
return cls.__module__.split(".")[:-1]
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
def get_input_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
|
||||
@@ -53,7 +53,8 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
return cls.__module__.split(".")[:-1]
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@property
|
||||
def InputType(self) -> Type[Input]:
|
||||
@@ -217,6 +218,11 @@ class RunnableConfigurableFields(DynamicRunnable[Input, Output]):
|
||||
|
||||
fields: Dict[str, AnyConfigurableField]
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@property
|
||||
def config_specs(self) -> List[ConfigurableFieldSpec]:
|
||||
return get_unique_config_specs(
|
||||
@@ -318,6 +324,11 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
|
||||
of the form <which.id>==<alternative_key>, eg. a key named "temperature" used by
|
||||
the alternative named "gpt3" becomes "model==gpt3/temperature"."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@property
|
||||
def config_specs(self) -> List[ConfigurableFieldSpec]:
|
||||
with _enums_for_spec_lock:
|
||||
|
||||
@@ -125,7 +125,8 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
return cls.__module__.split(".")[:-1]
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@property
|
||||
def runnables(self) -> Iterator[Runnable[Input, Output]]:
|
||||
|
||||
@@ -86,6 +86,11 @@ class RunnableWithMessageHistory(RunnableBindingBase):
|
||||
output_messages_key: Optional[str] = None
|
||||
history_messages_key: Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
runnable: Runnable[
|
||||
|
||||
@@ -167,7 +167,8 @@ class RunnablePassthrough(RunnableSerializable[Other, Other]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
return cls.__module__.split(".")[:-1]
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@property
|
||||
def InputType(self) -> Any:
|
||||
@@ -312,7 +313,8 @@ class RunnableAssign(RunnableSerializable[Dict[str, Any], Dict[str, Any]]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
return cls.__module__.split(".")[:-1]
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
def get_input_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
|
||||
@@ -114,6 +114,11 @@ class RunnableRetry(RunnableBindingBase[Input, Output]):
|
||||
max_attempt_number: int = 3
|
||||
"""The maximum number of attempts to retry the runnable."""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
@property
|
||||
def _kwargs_retrying(self) -> Dict[str, Any]:
|
||||
kwargs: Dict[str, Any] = dict()
|
||||
|
||||
@@ -77,7 +77,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
return cls.__module__.split(".")[:-1]
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "runnable"]
|
||||
|
||||
def invoke(
|
||||
self, input: RouterInput, config: Optional[RunnableConfig] = None
|
||||
|
||||
@@ -6,11 +6,16 @@ __all__ = [
|
||||
"Run",
|
||||
"RunLog",
|
||||
"RunLogPatch",
|
||||
"LogStreamCallbackHandler",
|
||||
]
|
||||
|
||||
from langchain_core.tracers.base import BaseTracer
|
||||
from langchain_core.tracers.evaluation import EvaluatorCallbackHandler
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
from langchain_core.tracers.log_stream import RunLog, RunLogPatch
|
||||
from langchain_core.tracers.log_stream import (
|
||||
LogStreamCallbackHandler,
|
||||
RunLog,
|
||||
RunLogPatch,
|
||||
)
|
||||
from langchain_core.tracers.schemas import Run
|
||||
from langchain_core.tracers.stdout import ConsoleCallbackHandler
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import math
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
@@ -82,7 +83,7 @@ class RunLogPatch:
|
||||
def __add__(self, other: Union[RunLogPatch, Any]) -> RunLog:
|
||||
if type(other) == RunLogPatch:
|
||||
ops = self.ops + other.ops
|
||||
state = jsonpatch.apply_patch(None, ops)
|
||||
state = jsonpatch.apply_patch(None, copy.deepcopy(ops))
|
||||
return RunLog(*ops, state=state)
|
||||
|
||||
raise TypeError(
|
||||
|
||||
30
libs/core/poetry.lock
generated
30
libs/core/poetry.lock
generated
@@ -2508,6 +2508,31 @@ files = [
|
||||
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
|
||||
test = ["argcomplete (>=3.0.3)", "mypy (>=1.6.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"]
|
||||
|
||||
[[package]]
|
||||
name = "types-jinja2"
|
||||
version = "2.11.9"
|
||||
description = "Typing stubs for Jinja2"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-Jinja2-2.11.9.tar.gz", hash = "sha256:dbdc74a40aba7aed520b7e4d89e8f0fe4286518494208b35123bcf084d4b8c81"},
|
||||
{file = "types_Jinja2-2.11.9-py3-none-any.whl", hash = "sha256:60a1e21e8296979db32f9374d8a239af4cb541ff66447bb915d8ad398f9c63b2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
types-MarkupSafe = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-markupsafe"
|
||||
version = "1.1.10"
|
||||
description = "Typing stubs for MarkupSafe"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-MarkupSafe-1.1.10.tar.gz", hash = "sha256:85b3a872683d02aea3a5ac2a8ef590193c344092032f58457287fbf8e06711b1"},
|
||||
{file = "types_MarkupSafe-1.1.10-py3-none-any.whl", hash = "sha256:ca2bee0f4faafc45250602567ef38d533e877d2ddca13003b319c551ff5b3cc5"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-python-dateutil"
|
||||
version = "2.8.19.14"
|
||||
@@ -2703,7 +2728,10 @@ files = [
|
||||
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
|
||||
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
|
||||
|
||||
[extras]
|
||||
extended-testing = ["jinja2"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "b08d47f726dd194af0f801d300402b174c8db96a4184cc1136cb8e5a0e287190"
|
||||
content-hash = "64fa7ef31713835d12d5213f04b52adf7423299d023f9558b8b4e65ce1e5262f"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain-core"
|
||||
version = "0.0.11"
|
||||
version = "0.0.12"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
@@ -14,19 +14,37 @@ pydantic = ">=1,<3"
|
||||
langsmith = "~0.0.63"
|
||||
tenacity = "^8.1.0"
|
||||
jsonpatch = "^1.33"
|
||||
anyio = ">=3,<5"
|
||||
PyYAML = ">=5.3"
|
||||
requests = "^2"
|
||||
packaging = "^23.2"
|
||||
jinja2 = {version = "^3", optional = true}
|
||||
|
||||
[tool.poetry.group.lint]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.lint.dependencies]
|
||||
ruff = "^0.1.5"
|
||||
|
||||
[tool.poetry.group.typing]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.typing.dependencies]
|
||||
mypy = "^0.991"
|
||||
types-pyyaml = "^6.0.12.2"
|
||||
types-requests = "^2.28.11.5"
|
||||
types-jinja2 = "^2.11.9"
|
||||
|
||||
[tool.poetry.group.dev]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
jupyter = "^1.0.0"
|
||||
setuptools = "^67.6.1"
|
||||
|
||||
[tool.poetry.group.test]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.test.dependencies]
|
||||
# The only dependencies that should be added are
|
||||
# dependencies used for running tests (e.g., pytest, freezegun, response).
|
||||
@@ -43,6 +61,9 @@ pytest-asyncio = "^0.21.1"
|
||||
optional = true
|
||||
dependencies = {}
|
||||
|
||||
[tool.poetry.extras]
|
||||
extended_testing = ["jinja2"]
|
||||
|
||||
[tool.ruff]
|
||||
select = [
|
||||
"E", # pycodestyle
|
||||
|
||||
0
libs/core/tests/integration_tests/__init__.py
Normal file
0
libs/core/tests/integration_tests/__init__.py
Normal file
7
libs/core/tests/integration_tests/test_compile.py
Normal file
7
libs/core/tests/integration_tests/test_compile.py
Normal file
@@ -0,0 +1,7 @@
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.compile
|
||||
def test_placeholder() -> None:
|
||||
"""Used for compiling integration tests without running any real tests."""
|
||||
pass
|
||||
87
libs/core/tests/unit_tests/conftest.py
Normal file
87
libs/core/tests/unit_tests/conftest.py
Normal file
@@ -0,0 +1,87 @@
|
||||
"""Configuration for unit tests."""
|
||||
from importlib import util
|
||||
from typing import Dict, Sequence
|
||||
|
||||
import pytest
|
||||
from pytest import Config, Function, Parser
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
"""Add custom command line options to pytest."""
|
||||
parser.addoption(
|
||||
"--only-extended",
|
||||
action="store_true",
|
||||
help="Only run extended tests. Does not allow skipping any extended tests.",
|
||||
)
|
||||
parser.addoption(
|
||||
"--only-core",
|
||||
action="store_true",
|
||||
help="Only run core tests. Never runs any extended tests.",
|
||||
)
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
|
||||
"""Add implementations for handling custom markers.
|
||||
|
||||
At the moment, this adds support for a custom `requires` marker.
|
||||
|
||||
The `requires` marker is used to denote tests that require one or more packages
|
||||
to be installed to run. If the package is not installed, the test is skipped.
|
||||
|
||||
The `requires` marker syntax is:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.requires("package1", "package2")
|
||||
def test_something():
|
||||
...
|
||||
"""
|
||||
# Mapping from the name of a package to whether it is installed or not.
|
||||
# Used to avoid repeated calls to `util.find_spec`
|
||||
required_pkgs_info: Dict[str, bool] = {}
|
||||
|
||||
only_extended = config.getoption("--only-extended") or False
|
||||
only_core = config.getoption("--only-core") or False
|
||||
|
||||
if only_extended and only_core:
|
||||
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
|
||||
|
||||
for item in items:
|
||||
requires_marker = item.get_closest_marker("requires")
|
||||
if requires_marker is not None:
|
||||
if only_core:
|
||||
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
|
||||
continue
|
||||
|
||||
# Iterate through the list of required packages
|
||||
required_pkgs = requires_marker.args
|
||||
for pkg in required_pkgs:
|
||||
# If we haven't yet checked whether the pkg is installed
|
||||
# let's check it and store the result.
|
||||
if pkg not in required_pkgs_info:
|
||||
try:
|
||||
installed = util.find_spec(pkg) is not None
|
||||
except Exception:
|
||||
installed = False
|
||||
required_pkgs_info[pkg] = installed
|
||||
|
||||
if not required_pkgs_info[pkg]:
|
||||
if only_extended:
|
||||
pytest.fail(
|
||||
f"Package `{pkg}` is not installed but is required for "
|
||||
f"extended tests. Please install the given package and "
|
||||
f"try again.",
|
||||
)
|
||||
|
||||
else:
|
||||
# If the package is not installed, we immediately break
|
||||
# and mark the test as skipped.
|
||||
item.add_marker(
|
||||
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
|
||||
)
|
||||
break
|
||||
else:
|
||||
if only_extended:
|
||||
item.add_marker(
|
||||
pytest.mark.skip(reason="Skipping not an extended test.")
|
||||
)
|
||||
@@ -233,7 +233,7 @@ def test_partial() -> None:
|
||||
|
||||
@pytest.mark.requires("jinja2")
|
||||
def test_prompt_jinja2_functionality(
|
||||
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]]
|
||||
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]],
|
||||
) -> None:
|
||||
prefix = "Starting with {{ foo }}"
|
||||
suffix = "Ending with {{ bar }}"
|
||||
@@ -256,7 +256,7 @@ def test_prompt_jinja2_functionality(
|
||||
|
||||
@pytest.mark.requires("jinja2")
|
||||
def test_prompt_jinja2_missing_input_variables(
|
||||
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]]
|
||||
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]],
|
||||
) -> None:
|
||||
"""Test error is raised when input variables are not provided."""
|
||||
prefix = "Starting with {{ foo }}"
|
||||
@@ -303,7 +303,7 @@ def test_prompt_jinja2_missing_input_variables(
|
||||
|
||||
@pytest.mark.requires("jinja2")
|
||||
def test_prompt_jinja2_extra_input_variables(
|
||||
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]]
|
||||
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]],
|
||||
) -> None:
|
||||
"""Test error is raised when there are too many input variables."""
|
||||
prefix = "Starting with {{ foo }}"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user