mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-10 13:27:36 +00:00
sync wip with master (#32436)
Co-authored-by: Kanav Bansal <13186335+bansalkanav@users.noreply.github.com> Co-authored-by: Pranav Bhartiya <124018094+pranauww@users.noreply.github.com> Co-authored-by: Nelson Sproul <nelson.sproul@gmail.com> Co-authored-by: John Bledsoe <jmbledsoe@gmail.com>
This commit is contained in:
parent
ac2de920b1
commit
376f70be96
@ -9,6 +9,14 @@ This project utilizes [uv](https://docs.astral.sh/uv/) v0.5+ as a dependency man
|
||||
|
||||
Install `uv`: **[documentation on how to install it](https://docs.astral.sh/uv/getting-started/installation/)**.
|
||||
|
||||
### Windows Users
|
||||
|
||||
If you're on Windows and don't have `make` installed, you can install it via:
|
||||
- **Option 1**: Install via [Chocolatey](https://chocolatey.org/): `choco install make`
|
||||
- **Option 2**: Install via [Scoop](https://scoop.sh/): `scoop install make`
|
||||
- **Option 3**: Use [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl/)
|
||||
- **Option 4**: Use the direct `uv` commands shown in the sections below
|
||||
|
||||
## Different packages
|
||||
|
||||
This repository contains multiple packages:
|
||||
@ -48,7 +56,11 @@ uv sync
|
||||
Then verify dependency installation:
|
||||
|
||||
```bash
|
||||
# If you have `make` installed:
|
||||
make test
|
||||
|
||||
# If you don't have `make` (Windows alternative):
|
||||
uv run --group test pytest -n auto --disable-socket --allow-unix-socket tests/unit_tests
|
||||
```
|
||||
|
||||
## Testing
|
||||
@ -61,7 +73,11 @@ If you add new logic, please add a unit test.
|
||||
To run unit tests:
|
||||
|
||||
```bash
|
||||
# If you have `make` installed:
|
||||
make test
|
||||
|
||||
# If you don't have make (Windows alternative):
|
||||
uv run --group test pytest -n auto --disable-socket --allow-unix-socket tests/unit_tests
|
||||
```
|
||||
|
||||
There are also [integration tests and code-coverage](../testing.mdx) available.
|
||||
@ -72,7 +88,12 @@ If you are only developing `langchain_core`, you can simply install the dependen
|
||||
|
||||
```bash
|
||||
cd libs/core
|
||||
|
||||
# If you have `make` installed:
|
||||
make test
|
||||
|
||||
# If you don't have `make` (Windows alternative):
|
||||
uv run --group test pytest -n auto --disable-socket --allow-unix-socket tests/unit_tests
|
||||
```
|
||||
|
||||
## Formatting and linting
|
||||
@ -86,20 +107,37 @@ Formatting for this project is done via [ruff](https://docs.astral.sh/ruff/rules
|
||||
To run formatting for docs, cookbook and templates:
|
||||
|
||||
```bash
|
||||
# If you have `make` installed:
|
||||
make format
|
||||
|
||||
# If you don't have make (Windows alternative):
|
||||
uv run --all-groups ruff format .
|
||||
uv run --all-groups ruff check --fix .
|
||||
```
|
||||
|
||||
To run formatting for a library, run the same command from the relevant library directory:
|
||||
|
||||
```bash
|
||||
cd libs/{LIBRARY}
|
||||
|
||||
# If you have `make` installed:
|
||||
make format
|
||||
|
||||
# If you don't have make (Windows alternative):
|
||||
uv run --all-groups ruff format .
|
||||
uv run --all-groups ruff check --fix .
|
||||
```
|
||||
|
||||
Additionally, you can run the formatter only on the files that have been modified in your current branch as compared to the master branch using the format_diff command:
|
||||
|
||||
```bash
|
||||
# If you have `make` installed:
|
||||
make format_diff
|
||||
|
||||
# If you don't have `make` (Windows alternative):
|
||||
# First, get the list of modified files:
|
||||
git diff --relative=libs/langchain --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$' | xargs uv run --all-groups ruff format
|
||||
git diff --relative=libs/langchain --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$' | xargs uv run --all-groups ruff check --fix
|
||||
```
|
||||
|
||||
This is especially useful when you have made changes to a subset of the project and want to ensure your changes are properly formatted without affecting the rest of the codebase.
|
||||
@ -111,20 +149,40 @@ Linting for this project is done via a combination of [ruff](https://docs.astral
|
||||
To run linting for docs, cookbook and templates:
|
||||
|
||||
```bash
|
||||
# If you have `make` installed:
|
||||
make lint
|
||||
|
||||
# If you don't have `make` (Windows alternative):
|
||||
uv run --all-groups ruff check .
|
||||
uv run --all-groups ruff format . --diff
|
||||
uv run --all-groups mypy . --cache-dir .mypy_cache
|
||||
```
|
||||
|
||||
To run linting for a library, run the same command from the relevant library directory:
|
||||
|
||||
```bash
|
||||
cd libs/{LIBRARY}
|
||||
|
||||
# If you have `make` installed:
|
||||
make lint
|
||||
|
||||
# If you don't have `make` (Windows alternative):
|
||||
uv run --all-groups ruff check .
|
||||
uv run --all-groups ruff format . --diff
|
||||
uv run --all-groups mypy . --cache-dir .mypy_cache
|
||||
```
|
||||
|
||||
In addition, you can run the linter only on the files that have been modified in your current branch as compared to the master branch using the lint_diff command:
|
||||
|
||||
```bash
|
||||
# If you have `make` installed:
|
||||
make lint_diff
|
||||
|
||||
# If you don't have `make` (Windows alternative):
|
||||
# First, get the list of modified files:
|
||||
git diff --relative=libs/langchain --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$' | xargs uv run --all-groups ruff check
|
||||
git diff --relative=libs/langchain --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$' | xargs uv run --all-groups ruff format --diff
|
||||
git diff --relative=libs/langchain --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$' | xargs uv run --all-groups mypy --cache-dir .mypy_cache
|
||||
```
|
||||
|
||||
This can be very helpful when you've made changes to only certain parts of the project and want to ensure your changes meet the linting standards without having to check the entire codebase.
|
||||
@ -139,13 +197,21 @@ Note that `codespell` finds common typos, so it could have false-positive (corre
|
||||
To check spelling for this project:
|
||||
|
||||
```bash
|
||||
# If you have `make` installed:
|
||||
make spell_check
|
||||
|
||||
# If you don't have `make` (Windows alternative):
|
||||
uv run --all-groups codespell --toml pyproject.toml
|
||||
```
|
||||
|
||||
To fix spelling in place:
|
||||
|
||||
```bash
|
||||
# If you have `make` installed:
|
||||
make spell_fix
|
||||
|
||||
# If you don't have `make` (Windows alternative):
|
||||
uv run --all-groups codespell --toml pyproject.toml -w
|
||||
```
|
||||
|
||||
If codespell is incorrectly flagging a word, you can skip spellcheck for that word by adding it to the codespell config in the `pyproject.toml` file.
|
||||
|
@ -24,7 +24,7 @@
|
||||
"\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"The **default** implementation does **not** provide support for token-by-token streaming, but it ensures that the the model can be swapped in for any other model as it supports the same standard interface.\n",
|
||||
"The **default** implementation does **not** provide support for token-by-token streaming, but it ensures that the model can be swapped in for any other model as it supports the same standard interface.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
|
@ -323,7 +323,7 @@
|
||||
"source": [
|
||||
"## RAG based approach\n",
|
||||
"\n",
|
||||
"Another simple idea is to chunk up the text, but instead of extracting information from every chunk, just focus on the the most relevant chunks.\n",
|
||||
"Another simple idea is to chunk up the text, but instead of extracting information from every chunk, just focus on the most relevant chunks.\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"It can be difficult to identify which chunks are relevant.\n",
|
||||
|
@ -104,7 +104,7 @@
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`filter_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
"`filter_messages` can be used imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -199,7 +199,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def _clear():\n",
|
||||
" \"\"\"Hacky helper method to clear content. See the `full` mode section to to understand why it works.\"\"\"\n",
|
||||
" \"\"\"Hacky helper method to clear content. See the `full` mode section to understand why it works.\"\"\"\n",
|
||||
" index([], record_manager, vectorstore, cleanup=\"full\", source_id_key=\"source\")"
|
||||
]
|
||||
},
|
||||
|
@ -88,7 +88,7 @@
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`merge_message_runs` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
"`merge_message_runs` can be used imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -52,7 +52,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatHuggingFace](https://python.langchain.com/api_reference/huggingface/chat_models/langchain_huggingface.chat_models.huggingface.ChatHuggingFace.html) | [langchain_huggingface](https://python.langchain.com/api_reference/huggingface/index.html) | ✅ | ❌ | ❌ |  |  |\n",
|
||||
"| [ChatHuggingFace](https://python.langchain.com/api_reference/huggingface/chat_models/langchain_huggingface.chat_models.huggingface.ChatHuggingFace.html) | [langchain-huggingface](https://python.langchain.com/api_reference/huggingface/index.html) | ✅ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
@ -61,7 +61,7 @@
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access `langchain_huggingface` models you'll need to create a/an `Hugging Face` account, get an API key, and install the `langchain_huggingface` integration package.\n",
|
||||
"To access `langchain_huggingface` models you'll need to create a `Hugging Face` account, get an API key, and install the `langchain-huggingface` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
|
@ -24,7 +24,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/mistral) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatMistralAI](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html) | [langchain_mistralai](https://python.langchain.com/api_reference/mistralai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"| [ChatMistralAI](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html) | [langchain-mistralai](https://python.langchain.com/api_reference/mistralai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
@ -34,7 +34,7 @@
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"To access `ChatMistralAI` models you'll need to create a Mistral account, get an API key, and install the `langchain_mistralai` integration package.\n",
|
||||
"To access `ChatMistralAI` models you'll need to create a Mistral account, get an API key, and install the `langchain-mistralai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
@ -80,7 +80,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Mistral integration lives in the `langchain_mistralai` package:"
|
||||
"The LangChain Mistral integration lives in the `langchain-mistralai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -90,7 +90,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_mistralai"
|
||||
"%pip install -qU langchain-mistralai"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -41,7 +41,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatNVIDIA](https://python.langchain.com/api_reference/nvidia_ai_endpoints/chat_models/langchain_nvidia_ai_endpoints.chat_models.ChatNVIDIA.html) | [langchain_nvidia_ai_endpoints](https://python.langchain.com/api_reference/nvidia_ai_endpoints/index.html) | ✅ | beta | ❌ |  |  |\n",
|
||||
"| [ChatNVIDIA](https://python.langchain.com/api_reference/nvidia_ai_endpoints/chat_models/langchain_nvidia_ai_endpoints.chat_models.ChatNVIDIA.html) | [langchain-nvidia-ai-endpoints](https://python.langchain.com/api_reference/nvidia_ai_endpoints/index.html) | ✅ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
@ -102,7 +102,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain NVIDIA AI Endpoints integration lives in the `langchain_nvidia_ai_endpoints` package:"
|
||||
"The LangChain NVIDIA AI Endpoints integration lives in the `langchain-nvidia-ai-endpoints` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -69,7 +69,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_agentql"
|
||||
"%pip install -qU langchain-agentql"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -310,7 +310,7 @@
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"chain = load_qa_chain(llm=OpenAI(), chain_type=\"map_reduce\")\n",
|
||||
"query = [\"Who are the autors?\"]\n",
|
||||
"query = [\"Who are the authors?\"]\n",
|
||||
"\n",
|
||||
"chain.run(input_documents=documents, question=query)"
|
||||
]
|
||||
|
@ -25,7 +25,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet azureml-fsspec, azure-ai-generative"
|
||||
"%pip install --upgrade --quiet azureml-fsspec azure-ai-generative"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [BSHTMLLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"| [BSHTMLLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
@ -52,7 +52,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **bs4**."
|
||||
"Install **langchain-community** and **bs4**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -61,7 +61,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community bs4"
|
||||
"%pip install -qU langchain-community bs4"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -245,7 +245,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -q --progress-bar off --no-warn-conflicts langchain-core langchain-huggingface langchain_milvus langchain python-dotenv"
|
||||
"%pip install -q --progress-bar off --no-warn-conflicts langchain-core langchain-huggingface langchain-milvus langchain python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/firecrawl/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [FireCrawlLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.firecrawl.FireCrawlLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"| [FireCrawlLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.firecrawl.FireCrawlLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
|
@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/json/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [JSONLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"| [JSONLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
@ -51,7 +51,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **jq**:"
|
||||
"Install **langchain-community** and **jq**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -60,7 +60,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community jq "
|
||||
"%pip install -qU langchain-community jq "
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -13,7 +13,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [MathPixPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.MathpixPDFLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"| [MathPixPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.MathpixPDFLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
"Install **langchain-community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -69,7 +69,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -15,7 +15,7 @@
|
||||
"source": [
|
||||
"[Socrata](https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6) provides an API for city open data. \n",
|
||||
"\n",
|
||||
"For a dataset such as [SF crime](https://data.sfgov.org/Public-Safety/Police-Department-Incident-Reports-Historical-2003/tmnf-yvry), to to the `API` tab on top right. \n",
|
||||
"For a dataset such as [SF crime](https://data.sfgov.org/Public-Safety/Police-Department-Incident-Reports-Historical-2003/tmnf-yvry), see the `API` tab on top right. \n",
|
||||
"\n",
|
||||
"That provides you with the `dataset identifier`.\n",
|
||||
"\n",
|
||||
|
@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"|:-----------------------------------------------------------------------------------------------------------------------------------------------------| :--- | :---: | :---: | :---: |\n",
|
||||
"| [PDFMinerLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PDFMinerLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ |\n",
|
||||
"| [PDFMinerLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PDFMinerLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"--------- \n",
|
||||
"\n",
|
||||
@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **pdfminer**."
|
||||
"Install **langchain-community** and **pdfminer**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -82,7 +82,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community pdfminer.six"
|
||||
"%pip install -qU langchain-community pdfminer.six"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -938,7 +938,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_openai"
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -13,7 +13,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PDFPlumberLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PDFPlumberLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"| [PDFPlumberLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PDFPlumberLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
@ -47,7 +47,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
"Install **langchain-community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -56,7 +56,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -117,7 +117,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The fields:\n",
|
||||
" - `es_host_url` is the endpoint to to MetadataIQ Elasticsearch database\n",
|
||||
" - `es_host_url` is the endpoint to MetadataIQ Elasticsearch database\n",
|
||||
" - `es_index_index` is the name of the index where PowerScale writes it file system metadata\n",
|
||||
" - `es_api_key` is the **encoded** version of your elasticsearch API key\n",
|
||||
" - `folder_path` is the path on PowerScale to be queried for changes"
|
||||
|
@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PyMuPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyMuPDFLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"| [PyMuPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyMuPDFLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"--------- \n",
|
||||
"\n",
|
||||
@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **pymupdf**."
|
||||
"Install **langchain-community** and **pymupdf**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -71,7 +71,7 @@
|
||||
"start_time": "2025-01-16T09:48:33.057015Z"
|
||||
}
|
||||
},
|
||||
"source": "%pip install -qU langchain_community pymupdf",
|
||||
"source": "%pip install -qU langchain-community pymupdf",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@ -569,7 +569,7 @@
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"%pip install -qU langchain_openai"
|
||||
"%pip install -qU langchain-openai"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -23,7 +23,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PyMuPDF4LLMLoader](https://github.com/lakinduboteju/langchain-pymupdf4llm) | [langchain_pymupdf4llm](https://pypi.org/project/langchain-pymupdf4llm) | ✅ | ❌ | ❌ |\n",
|
||||
"| [PyMuPDF4LLMLoader](https://github.com/lakinduboteju/langchain-pymupdf4llm) | [langchain-pymupdf4llm](https://pypi.org/project/langchain-pymupdf4llm) | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"### Loader features\n",
|
||||
"\n",
|
||||
@ -61,7 +61,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **langchain-pymupdf4llm**."
|
||||
"Install **langchain-community** and **langchain-pymupdf4llm**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -78,7 +78,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community langchain-pymupdf4llm"
|
||||
"%pip install -qU langchain-community langchain-pymupdf4llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -554,7 +554,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_openai"
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -14,7 +14,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PyPDFDirectoryLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"| [PyPDFDirectoryLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
@ -53,7 +53,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
"Install **langchain-community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -74,7 +74,7 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": "%pip install -qU langchain_community pypdf pillow"
|
||||
"source": "%pip install -qU langchain-community pypdf pillow"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
|
@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PyPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"| [PyPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
" \n",
|
||||
"--------- \n",
|
||||
"\n",
|
||||
@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **pypdf**."
|
||||
"Install **langchain-community** and **pypdf**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -81,7 +81,7 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": "%pip install -qU langchain_community pypdfium2"
|
||||
"source": "%pip install -qU langchain-community pypdfium2"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -802,7 +802,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_openai"
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PyPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"| [PyPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
" \n",
|
||||
"--------- \n",
|
||||
"\n",
|
||||
@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **pypdf**."
|
||||
"Install **langchain-community** and **pypdf**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -82,7 +82,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community pypdf"
|
||||
"%pip install -qU langchain-community pypdf"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -818,7 +818,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_openai"
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -14,7 +14,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/recursive_url_loader/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [RecursiveUrlLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.recursive_url_loader.RecursiveUrlLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"| [RecursiveUrlLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.recursive_url_loader.RecursiveUrlLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
|
@ -11,7 +11,7 @@
|
||||
"\n",
|
||||
"This loader fetches the text from the Posts of Subreddits or Reddit users, using the `praw` Python package.\n",
|
||||
"\n",
|
||||
"Make a [Reddit Application](https://www.reddit.com/prefs/apps/) and initialize the loader with with your Reddit API credentials."
|
||||
"Make a [Reddit Application](https://www.reddit.com/prefs/apps/) and initialize the loader with your Reddit API credentials."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/sitemap/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [SiteMapLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.sitemap.SitemapLoader.html#langchain_community.document_loaders.sitemap.SitemapLoader) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"| [SiteMapLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.sitemap.SitemapLoader.html#langchain_community.document_loaders.sitemap.SitemapLoader) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
@ -51,7 +51,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
"Install **langchain-community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/unstructured/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [UnstructuredLoader](https://python.langchain.com/api_reference/unstructured/document_loaders/langchain_unstructured.document_loaders.UnstructuredLoader.html) | [langchain_unstructured](https://python.langchain.com/api_reference/unstructured/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"| [UnstructuredLoader](https://python.langchain.com/api_reference/unstructured/document_loaders/langchain_unstructured.document_loaders.UnstructuredLoader.html) | [langchain-unstructured](https://python.langchain.com/api_reference/unstructured/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
|
@ -151,10 +151,10 @@
|
||||
"Red arrow magic !\n",
|
||||
"Something white\n",
|
||||
"Something Red\n",
|
||||
"This a a completly useless diagramm, cool !!\n",
|
||||
"This a completely useless diagram, cool !!\n",
|
||||
"\n",
|
||||
"But this is for example !\n",
|
||||
"This diagramm is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"This diagram is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"This is a page with something...\n",
|
||||
"\n",
|
||||
"WAW I have learned something !\n",
|
||||
@ -183,10 +183,10 @@
|
||||
"This is a title\n",
|
||||
"Something white\n",
|
||||
"Something Red\n",
|
||||
"This a a completly useless diagramm, cool !!\n",
|
||||
"This a completely useless diagram, cool !!\n",
|
||||
"\n",
|
||||
"But this is for example !\n",
|
||||
"This diagramm is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"This diagram is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"Another RED arrow wow\n",
|
||||
"Arrow with point but red\n",
|
||||
"Green line\n",
|
||||
@ -219,10 +219,10 @@
|
||||
"Red arrow magic !\n",
|
||||
"Something white\n",
|
||||
"Something Red\n",
|
||||
"This a a completly useless diagramm, cool !!\n",
|
||||
"This a completely useless diagram, cool !!\n",
|
||||
"\n",
|
||||
"But this is for example !\n",
|
||||
"This diagramm is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"This diagram is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor\n",
|
||||
"\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0\\u00a0-\\u00a0incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in\n",
|
||||
"\n",
|
||||
@ -252,10 +252,10 @@
|
||||
"This is a title\n",
|
||||
"Something white\n",
|
||||
"Something Red\n",
|
||||
"This a a completly useless diagramm, cool !!\n",
|
||||
"This a completely useless diagram, cool !!\n",
|
||||
"\n",
|
||||
"But this is for example !\n",
|
||||
"This diagramm is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"This diagram is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"\n",
|
||||
"------ Page 7 ------\n",
|
||||
"Title page : Useful ↔ Useless page\n",
|
||||
@ -276,10 +276,10 @@
|
||||
"This is a title\n",
|
||||
"Something white\n",
|
||||
"Something Red\n",
|
||||
"This a a completly useless diagramm, cool !!\n",
|
||||
"This a completely useless diagram, cool !!\n",
|
||||
"\n",
|
||||
"But this is for example !\n",
|
||||
"This diagramm is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"This diagram is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"Title of this document : BLABLABLA\n",
|
||||
"\n",
|
||||
"------ Page 8 ------\n",
|
||||
@ -359,10 +359,10 @@
|
||||
"Red arrow magic !\n",
|
||||
"Something white\n",
|
||||
"Something Red\n",
|
||||
"This a a completly useless diagramm, cool !!\n",
|
||||
"This a completely useless diagram, cool !!\n",
|
||||
"\n",
|
||||
"But this is for example !\n",
|
||||
"This diagramm is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"This diagram is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"Useful\\u2194 Useless page\\u00a0\n",
|
||||
"\n",
|
||||
"Tests of some exotics characters :\\u00a0\\u00e3\\u00e4\\u00e5\\u0101\\u0103 \\u00fc\\u2554\\u00a0\\u00a0\\u00bc \\u00c7 \\u25d8\\u25cb\\u2642\\u266b\\u2640\\u00ee\\u2665\n",
|
||||
@ -444,10 +444,10 @@
|
||||
"Red arrow magic !\n",
|
||||
"Something white\n",
|
||||
"Something Red\n",
|
||||
"This a a completly useless diagramm, cool !!\n",
|
||||
"This a completely useless diagram, cool !!\n",
|
||||
"\n",
|
||||
"But this is for example !\n",
|
||||
"This diagramm is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"This diagram is a base of many pages in this file. But it is editable in file \\\"BG WITH CONTENT\\\"\n",
|
||||
"Only connectors on this page. This is the CoNNeCtor page\n"
|
||||
]
|
||||
}
|
||||
|
@ -20,7 +20,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [WebBaseLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"| [WebBaseLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
@ -44,7 +44,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community beautifulsoup4"
|
||||
"%pip install -qU langchain-community beautifulsoup4"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -261,7 +261,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU upstash_redis"
|
||||
"%pip install -qU upstash-redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1543,7 +1543,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_astradb\n",
|
||||
"%pip install -qU langchain-astradb\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
@ -2683,7 +2683,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_couchbase"
|
||||
"%pip install -qU langchain-couchbase"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -34,7 +34,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain_aws"
|
||||
"%pip install --upgrade --quiet langchain-aws"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -22,7 +22,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/cohere/) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Cohere](https://python.langchain.com/api_reference/community/llms/langchain_community.llms.cohere.Cohere.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ❌ | beta | ✅ |  |  |\n"
|
||||
"| [Cohere](https://python.langchain.com/api_reference/community/llms/langchain_community.llms.cohere.Cohere.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ❌ | beta | ✅ |  |  |\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -22,7 +22,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.1/docs/integrations/llms/fireworks/) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Fireworks](https://python.langchain.com/api_reference/fireworks/llms/langchain_fireworks.llms.Fireworks.html#langchain_fireworks.llms.Fireworks) | [langchain_fireworks](https://python.langchain.com/api_reference/fireworks/index.html) | ❌ | ❌ | ✅ |  |  |"
|
||||
"| [Fireworks](https://python.langchain.com/api_reference/fireworks/llms/langchain_fireworks.llms.Fireworks.html#langchain_fireworks.llms.Fireworks) | [langchain-fireworks](https://python.langchain.com/api_reference/fireworks/index.html) | ❌ | ❌ | ✅ |  |  |"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -59,7 +59,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"You need to install the `langchain_fireworks` python package for the rest of the notebook to work."
|
||||
"You need to install the `langchain-fireworks` python package for the rest of the notebook to work."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -29,7 +29,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [NVIDIA](https://python.langchain.com/api_reference/nvidia_ai_endpoints/llms/langchain_nvidia_ai_endpoints.chat_models.ChatNVIDIA.html) | [langchain_nvidia_ai_endpoints](https://python.langchain.com/api_reference/nvidia_ai_endpoints/index.html) | ✅ | beta | ❌ |  |  |\n",
|
||||
"| [NVIDIA](https://python.langchain.com/api_reference/nvidia_ai_endpoints/llms/langchain_nvidia_ai_endpoints.chat_models.ChatNVIDIA.html) | [langchain-nvidia-ai-endpoints](https://python.langchain.com/api_reference/nvidia_ai_endpoints/index.html) | ✅ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
@ -71,7 +71,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain NVIDIA AI Endpoints integration lives in the `langchain_nvidia_ai_endpoints` package:"
|
||||
"The LangChain NVIDIA AI Endpoints integration lives in the `langchain-nvidia-ai-endpoints` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -67,7 +67,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community wikipedia"
|
||||
"%pip install -qU langchain-community wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -31,7 +31,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [AstraDBByteStore](https://python.langchain.com/api_reference/astradb/storage/langchain_astradb.storage.AstraDBByteStore.html) | [langchain_astradb](https://python.langchain.com/api_reference/astradb/index.html) | ❌ | ❌ |  |  |\n",
|
||||
"| [AstraDBByteStore](https://python.langchain.com/api_reference/astradb/storage/langchain_astradb.storage.AstraDBByteStore.html) | [langchain-astradb](https://python.langchain.com/api_reference/astradb/index.html) | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain AstraDB integration lives in the `langchain_astradb` package:"
|
||||
"The LangChain AstraDB integration lives in the `langchain-astradb` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -69,7 +69,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_astradb"
|
||||
"%pip install -qU langchain-astradb"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -29,7 +29,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/cassandra_storage) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [CassandraByteStore](https://python.langchain.com/api_reference/community/storage/langchain_community.storage.cassandra.CassandraByteStore.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ✅ |  |  |\n",
|
||||
"| [CassandraByteStore](https://python.langchain.com/api_reference/community/storage/langchain_community.storage.cassandra.CassandraByteStore.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
@ -44,7 +44,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain `CassandraByteStore` integration lives in the `langchain_community` package. You'll also need to install the `cassio` package or the `cassandra-driver` package as a peer dependency depending on which initialization method you're using:"
|
||||
"The LangChain `CassandraByteStore` integration lives in the `langchain-community` package. You'll also need to install the `cassio` package or the `cassandra-driver` package as a peer dependency depending on which initialization method you're using:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -53,7 +53,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community\n",
|
||||
"%pip install -qU langchain-community\n",
|
||||
"%pip install -qU cassandra-driver\n",
|
||||
"%pip install -qU cassio"
|
||||
]
|
||||
|
@ -29,7 +29,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ElasticsearchEmbeddingsCache](https://python.langchain.com/api_reference/elasticsearch/cache/langchain_elasticsearch.cache.ElasticsearchEmbeddingsCache.html) | [langchain_elasticsearch](https://python.langchain.com/api_reference/elasticsearch/index.html) | ✅ | ❌ |  |  |\n",
|
||||
"| [ElasticsearchEmbeddingsCache](https://python.langchain.com/api_reference/elasticsearch/cache/langchain_elasticsearch.cache.ElasticsearchEmbeddingsCache.html) | [langchain-elasticsearch](https://python.langchain.com/api_reference/elasticsearch/index.html) | ✅ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
@ -42,7 +42,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain `ElasticsearchEmbeddingsCache` integration lives in the `__package_name__` package:"
|
||||
"The LangChain `ElasticsearchEmbeddingsCache` integration lives in the `langchain-elasticsearch` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -51,7 +51,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_elasticsearch"
|
||||
"%pip install -qU langchain-elasticsearch"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -29,7 +29,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/in_memory/) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [InMemoryByteStore](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.InMemoryByteStore.html) | [langchain_core](https://python.langchain.com/api_reference/core/index.html) | ✅ | ✅ |  |  |"
|
||||
"| [InMemoryByteStore](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.InMemoryByteStore.html) | [langchain-core](https://python.langchain.com/api_reference/core/index.html) | ✅ | ✅ |  |  |"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -38,7 +38,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain `InMemoryByteStore` integration lives in the `langchain_core` package:"
|
||||
"The LangChain `InMemoryByteStore` integration lives in the `langchain-core` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -47,7 +47,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_core"
|
||||
"%pip install -qU langchain-core"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -29,7 +29,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/ioredis_storage) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [RedisStore](https://python.langchain.com/api_reference/community/storage/langchain_community.storage.redis.RedisStore.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ✅ |  |  |\n",
|
||||
"| [RedisStore](https://python.langchain.com/api_reference/community/storage/langchain_community.storage.redis.RedisStore.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
@ -42,7 +42,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain `RedisStore` integration lives in the `langchain_community` package:"
|
||||
"The LangChain `RedisStore` integration lives in the `langchain-community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -51,7 +51,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community redis"
|
||||
"%pip install -qU langchain-community redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -31,7 +31,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | [JS support](https://js.langchain.com/docs/integrations/stores/upstash_redis_storage) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [UpstashRedisByteStore](https://python.langchain.com/api_reference/community/storage/langchain_community.storage.upstash_redis.UpstashRedisByteStore.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ✅ |  |  |\n",
|
||||
"| [UpstashRedisByteStore](https://python.langchain.com/api_reference/community/storage/langchain_community.storage.upstash_redis.UpstashRedisByteStore.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Upstash integration lives in the `langchain_community` package. You'll also need to install the `upstash-redis` package as a peer dependency:"
|
||||
"The LangChain Upstash integration lives in the `langchain-community` package. You'll also need to install the `upstash-redis` package as a peer dependency:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -69,7 +69,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community upstash-redis"
|
||||
"%pip install -qU langchain-community upstash-redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -411,7 +411,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain faiss-cpu tiktoken langchain_community\n",
|
||||
"%pip install --upgrade --quiet langchain faiss-cpu tiktoken langchain-community\n",
|
||||
"\n",
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
|
@ -55,7 +55,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet -U langchain_agentql"
|
||||
"%pip install --quiet -U langchain-agentql"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -85,7 +85,7 @@
|
||||
"Install the following Python modules:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install ipykernel python-dotenv cassio langchain_openai langchain langchain-community langchainhub\n",
|
||||
"pip install ipykernel python-dotenv cassio langchain-openai langchain langchain-community langchainhub\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### .env file\n",
|
||||
|
@ -51,7 +51,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community langchain_openai"
|
||||
"%pip install -qU langchain-community langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -19,7 +19,7 @@
|
||||
"\n",
|
||||
"> [Firestore](https://cloud.google.com/firestore) is a serverless document-oriented database that scales to meet any demand. Extend your database application to build AI-powered experiences leveraging Firestore's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use [Firestore](https://cloud.google.com/firestore) to to store vectors and query them using the `FirestoreVectorStore` class.\n",
|
||||
"This notebook goes over how to use [Firestore](https://cloud.google.com/firestore) to store vectors and query them using the `FirestoreVectorStore` class.\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-firestore-python/blob/main/docs/vectorstores.ipynb)"
|
||||
]
|
||||
|
@ -36,7 +36,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pip install -qU langchain_milvus"
|
||||
"pip install -qU langchain-milvus"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -9,13 +9,13 @@
|
||||
"\n",
|
||||
"> An implementation of LangChain vectorstore abstraction using `postgres` as the backend and utilizing the `pgvector` extension.\n",
|
||||
"\n",
|
||||
"The code lives in an integration package called: [langchain_postgres](https://github.com/langchain-ai/langchain-postgres/).\n",
|
||||
"The code lives in an integration package called: [langchain-postgres](https://github.com/langchain-ai/langchain-postgres/).\n",
|
||||
"\n",
|
||||
"## Status\n",
|
||||
"\n",
|
||||
"This code has been ported over from `langchain_community` into a dedicated package called `langchain-postgres`. The following changes have been made:\n",
|
||||
"This code has been ported over from `langchain-community` into a dedicated package called `langchain-postgres`. The following changes have been made:\n",
|
||||
"\n",
|
||||
"* langchain_postgres works only with psycopg3. Please update your connnecion strings from `postgresql+psycopg2://...` to `postgresql+psycopg://langchain:langchain@...` (yes, it's the driver name is `psycopg` not `psycopg3`, but it'll use `psycopg3`.\n",
|
||||
"* `langchain-postgres` works only with psycopg3. Please update your connnecion strings from `postgresql+psycopg2://...` to `postgresql+psycopg://langchain:langchain@...` (yes, it's the driver name is `psycopg` not `psycopg3`, but it'll use `psycopg3`.\n",
|
||||
"* The schema of the embedding store and collection have been changed to make add_documents work correctly with user specified ids.\n",
|
||||
"* One has to pass an explicit connection object now.\n",
|
||||
"\n",
|
||||
@ -35,7 +35,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install -qU langchain_postgres"
|
||||
"pip install -qU langchain-postgres"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -43,7 +43,7 @@
|
||||
"id": "0dd87fcc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can run the following command to spin up a a postgres container with the `pgvector` extension:"
|
||||
"You can run the following command to spin up a postgres container with the `pgvector` extension:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -63,7 +63,7 @@
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"There are no credentials needed to run this notebook, just make sure you downloaded the `langchain_postgres` package and correctly started the postgres container."
|
||||
"There are no credentials needed to run this notebook, just make sure you downloaded the `langchain-postgres` package and correctly started the postgres container."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -10,7 +10,7 @@
|
||||
"\n",
|
||||
"This notebook goes over how to use the `PGVectorStore` API.\n",
|
||||
"\n",
|
||||
"The code lives in an integration package called: [langchain_postgres](https://github.com/langchain-ai/langchain-postgres/)."
|
||||
"The code lives in an integration package called: [langchain-postgres](https://github.com/langchain-ai/langchain-postgres/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -61,7 +61,7 @@
|
||||
"source": [
|
||||
"## Credentials\n",
|
||||
"\n",
|
||||
"There are no credentials needed to run this notebook, just make sure you downloaded the `langchain_sqlserver` package\n",
|
||||
"There are no credentials needed to run this notebook, just make sure you downloaded the `langchain-sqlserver` package\n",
|
||||
"If you want to get best in-class automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
|
@ -193,7 +193,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You should additionally not pass `ToolMessages` back to to a model if they are not preceded by an `AIMessage` with tool calls. For example, this will fail:"
|
||||
"You should additionally not pass `ToolMessages` back to a model if they are not preceded by an `AIMessage` with tool calls. For example, this will fail:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -321,7 +321,7 @@ const FEATURE_TABLES = {
|
||||
},
|
||||
{
|
||||
name: "VertexAILLM",
|
||||
link: "google_vertexai",
|
||||
link: "google_vertex_ai_palm",
|
||||
package: "langchain-google-vertexai",
|
||||
apiLink: "https://python.langchain.com/api_reference/google_vertexai/llms/langchain_google_vertexai.llms.VertexAI.html"
|
||||
},
|
||||
@ -776,7 +776,7 @@ const FEATURE_TABLES = {
|
||||
},
|
||||
{
|
||||
name: "Reddit",
|
||||
link: "RedditPostsLoader",
|
||||
link: "reddit",
|
||||
loaderName: "RedditPostsLoader",
|
||||
apiLink: "https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.reddit.RedditPostsLoader.html"
|
||||
},
|
||||
|
@ -97,7 +97,7 @@ def merge_lists(left: Optional[list], *others: Optional[list]) -> Optional[list]
|
||||
to_merge = [
|
||||
i
|
||||
for i, e_left in enumerate(merged)
|
||||
if e_left["index"] == e["index"]
|
||||
if "index" in e_left and e_left["index"] == e["index"]
|
||||
]
|
||||
if to_merge:
|
||||
# TODO: Remove this once merge_dict is updated with special
|
||||
|
@ -1173,6 +1173,11 @@ def test_tool_message_str() -> None:
|
||||
("foo", [["bar"]], ["foo", "bar"]),
|
||||
(["foo"], ["bar"], ["foobar"]),
|
||||
(["foo"], [["bar"]], ["foo", "bar"]),
|
||||
(
|
||||
[{"text": "foo"}],
|
||||
[[{"index": 0, "text": "bar"}]],
|
||||
[{"text": "foo"}, {"index": 0, "text": "bar"}],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_merge_content(
|
||||
|
@ -6,15 +6,7 @@ import json
|
||||
import warnings
|
||||
from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
|
||||
from operator import itemgetter
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Literal,
|
||||
Optional,
|
||||
TypedDict,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from typing import Any, Callable, Literal, Optional, TypedDict, Union, cast
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.callbacks import (
|
||||
@ -46,10 +38,7 @@ from langchain_core.messages import (
|
||||
ToolMessage,
|
||||
ToolMessageChunk,
|
||||
)
|
||||
from langchain_core.output_parsers import (
|
||||
JsonOutputParser,
|
||||
PydanticOutputParser,
|
||||
)
|
||||
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
|
||||
from langchain_core.output_parsers.base import OutputParserLike
|
||||
from langchain_core.output_parsers.openai_tools import (
|
||||
JsonOutputKeyToolsParser,
|
||||
@ -60,23 +49,13 @@ from langchain_core.output_parsers.openai_tools import (
|
||||
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
||||
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import (
|
||||
from_env,
|
||||
get_pydantic_field_names,
|
||||
secret_from_env,
|
||||
)
|
||||
from langchain_core.utils import from_env, get_pydantic_field_names, secret_from_env
|
||||
from langchain_core.utils.function_calling import (
|
||||
convert_to_openai_function,
|
||||
convert_to_openai_tool,
|
||||
)
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
SecretStr,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from langchain_groq.version import __version__
|
||||
@ -122,7 +101,7 @@ class ChatGroq(BaseChatModel):
|
||||
|
||||
See the `Groq documentation
|
||||
<https://console.groq.com/docs/reasoning#reasoning>`__ for more
|
||||
details and a list of supported reasoning models.
|
||||
details and a list of supported models.
|
||||
model_kwargs: Dict[str, Any]
|
||||
Holds any model parameters valid for create call not
|
||||
explicitly specified.
|
||||
@ -328,20 +307,15 @@ class ChatGroq(BaseChatModel):
|
||||
overridden in ``reasoning_effort``.
|
||||
|
||||
See the `Groq documentation <https://console.groq.com/docs/reasoning#reasoning>`__
|
||||
for more details and a list of supported reasoning models.
|
||||
for more details and a list of supported models.
|
||||
"""
|
||||
reasoning_effort: Optional[Literal["none", "default"]] = Field(default=None)
|
||||
reasoning_effort: Optional[str] = Field(default=None)
|
||||
"""The level of effort the model will put into reasoning. Groq will default to
|
||||
enabling reasoning if left undefined. If set to ``none``, ``reasoning_format`` will
|
||||
not apply and ``reasoning_content`` will not be returned.
|
||||
|
||||
- ``'none'``: Disable reasoning. The model will not use any reasoning tokens when
|
||||
generating a response.
|
||||
- ``'default'``: Enable reasoning.
|
||||
enabling reasoning if left undefined.
|
||||
|
||||
See the `Groq documentation
|
||||
<https://console.groq.com/docs/reasoning#options-for-reasoning-effort>`__ for more
|
||||
details and a list of models that support setting a reasoning effort.
|
||||
details and a list of options and models that support setting a reasoning effort.
|
||||
"""
|
||||
model_kwargs: dict[str, Any] = Field(default_factory=dict)
|
||||
"""Holds any model parameters valid for `create` call not explicitly specified."""
|
||||
@ -601,6 +575,11 @@ class ChatGroq(BaseChatModel):
|
||||
generation_info["system_fingerprint"] = system_fingerprint
|
||||
service_tier = params.get("service_tier") or self.service_tier
|
||||
generation_info["service_tier"] = service_tier
|
||||
reasoning_effort = (
|
||||
params.get("reasoning_effort") or self.reasoning_effort
|
||||
)
|
||||
if reasoning_effort:
|
||||
generation_info["reasoning_effort"] = reasoning_effort
|
||||
logprobs = choice.get("logprobs")
|
||||
if logprobs:
|
||||
generation_info["logprobs"] = logprobs
|
||||
@ -644,6 +623,11 @@ class ChatGroq(BaseChatModel):
|
||||
generation_info["system_fingerprint"] = system_fingerprint
|
||||
service_tier = params.get("service_tier") or self.service_tier
|
||||
generation_info["service_tier"] = service_tier
|
||||
reasoning_effort = (
|
||||
params.get("reasoning_effort") or self.reasoning_effort
|
||||
)
|
||||
if reasoning_effort:
|
||||
generation_info["reasoning_effort"] = reasoning_effort
|
||||
logprobs = choice.get("logprobs")
|
||||
if logprobs:
|
||||
generation_info["logprobs"] = logprobs
|
||||
@ -714,6 +698,9 @@ class ChatGroq(BaseChatModel):
|
||||
"system_fingerprint": response.get("system_fingerprint", ""),
|
||||
}
|
||||
llm_output["service_tier"] = params.get("service_tier") or self.service_tier
|
||||
reasoning_effort = params.get("reasoning_effort") or self.reasoning_effort
|
||||
if reasoning_effort:
|
||||
llm_output["reasoning_effort"] = reasoning_effort
|
||||
return ChatResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
def _create_message_dicts(
|
||||
|
@ -6,9 +6,9 @@ build-backend = "pdm.backend"
|
||||
authors = []
|
||||
license = { text = "MIT" }
|
||||
requires-python = ">=3.9"
|
||||
dependencies = ["langchain-core<1.0.0,>=0.3.68", "groq<1,>=0.29.0"]
|
||||
dependencies = ["langchain-core<1.0.0,>=0.3.72", "groq<1,>=0.30.0"]
|
||||
name = "langchain-groq"
|
||||
version = "0.3.6"
|
||||
version = "0.3.7"
|
||||
description = "An integration package connecting Groq and LangChain"
|
||||
readme = "README.md"
|
||||
|
||||
@ -114,4 +114,4 @@ asyncio_mode = "auto"
|
||||
"tests/**/*.py" = [
|
||||
"S101", # Tests need assertions
|
||||
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes
|
||||
]
|
||||
]
|
||||
|
@ -24,7 +24,10 @@ from tests.unit_tests.fake.callbacks import (
|
||||
FakeCallbackHandlerWithChatStart,
|
||||
)
|
||||
|
||||
MODEL_NAME = "llama-3.3-70b-versatile"
|
||||
DEFAULT_MODEL_NAME = "openai/gpt-oss-20b"
|
||||
|
||||
# gpt-oss doesn't support `reasoning_effort`
|
||||
REASONING_MODEL_NAME = "deepseek-r1-distill-llama-70b"
|
||||
|
||||
|
||||
#
|
||||
@ -34,7 +37,7 @@ MODEL_NAME = "llama-3.3-70b-versatile"
|
||||
def test_invoke() -> None:
|
||||
"""Test Chat wrapper."""
|
||||
chat = ChatGroq(
|
||||
model=MODEL_NAME,
|
||||
model=DEFAULT_MODEL_NAME,
|
||||
temperature=0.7,
|
||||
base_url=None,
|
||||
groq_proxy=None,
|
||||
@ -55,7 +58,7 @@ def test_invoke() -> None:
|
||||
@pytest.mark.scheduled
|
||||
async def test_ainvoke() -> None:
|
||||
"""Test ainvoke tokens from ChatGroq."""
|
||||
chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, max_tokens=10)
|
||||
|
||||
result = await chat.ainvoke("Welcome to the Groqetship!", config={"tags": ["foo"]})
|
||||
assert isinstance(result, BaseMessage)
|
||||
@ -65,7 +68,7 @@ async def test_ainvoke() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_batch() -> None:
|
||||
"""Test batch tokens from ChatGroq."""
|
||||
chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, max_tokens=10)
|
||||
|
||||
result = chat.batch(["Hello!", "Welcome to the Groqetship!"])
|
||||
for token in result:
|
||||
@ -76,7 +79,7 @@ def test_batch() -> None:
|
||||
@pytest.mark.scheduled
|
||||
async def test_abatch() -> None:
|
||||
"""Test abatch tokens from ChatGroq."""
|
||||
chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, max_tokens=10)
|
||||
|
||||
result = await chat.abatch(["Hello!", "Welcome to the Groqetship!"])
|
||||
for token in result:
|
||||
@ -87,7 +90,7 @@ async def test_abatch() -> None:
|
||||
@pytest.mark.scheduled
|
||||
async def test_stream() -> None:
|
||||
"""Test streaming tokens from Groq."""
|
||||
chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, max_tokens=10)
|
||||
|
||||
for token in chat.stream("Welcome to the Groqetship!"):
|
||||
assert isinstance(token, BaseMessageChunk)
|
||||
@ -97,7 +100,7 @@ async def test_stream() -> None:
|
||||
@pytest.mark.scheduled
|
||||
async def test_astream() -> None:
|
||||
"""Test streaming tokens from Groq."""
|
||||
chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, max_tokens=10)
|
||||
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
chunks_with_token_counts = 0
|
||||
@ -136,7 +139,7 @@ async def test_astream() -> None:
|
||||
def test_generate() -> None:
|
||||
"""Test sync generate."""
|
||||
n = 1
|
||||
chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, max_tokens=10)
|
||||
message = HumanMessage(content="Hello", n=1)
|
||||
response = chat.generate([[message], [message]])
|
||||
assert isinstance(response, LLMResult)
|
||||
@ -155,7 +158,7 @@ def test_generate() -> None:
|
||||
async def test_agenerate() -> None:
|
||||
"""Test async generation."""
|
||||
n = 1
|
||||
chat = ChatGroq(model=MODEL_NAME, max_tokens=10, n=1)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, max_tokens=10, n=1)
|
||||
message = HumanMessage(content="Hello")
|
||||
response = await chat.agenerate([[message], [message]])
|
||||
assert isinstance(response, LLMResult)
|
||||
@ -178,7 +181,7 @@ def test_invoke_streaming() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
chat = ChatGroq(
|
||||
model=MODEL_NAME,
|
||||
model=DEFAULT_MODEL_NAME,
|
||||
max_tokens=2,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
@ -195,7 +198,7 @@ async def test_agenerate_streaming() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandlerWithChatStart()
|
||||
chat = ChatGroq(
|
||||
model=MODEL_NAME,
|
||||
model=DEFAULT_MODEL_NAME,
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
@ -222,7 +225,7 @@ async def test_agenerate_streaming() -> None:
|
||||
def test_reasoning_output_invoke() -> None:
|
||||
"""Test reasoning output from ChatGroq with invoke."""
|
||||
chat = ChatGroq(
|
||||
model="deepseek-r1-distill-llama-70b",
|
||||
model=REASONING_MODEL_NAME,
|
||||
reasoning_format="parsed",
|
||||
)
|
||||
message = [
|
||||
@ -241,7 +244,7 @@ def test_reasoning_output_invoke() -> None:
|
||||
def test_reasoning_output_stream() -> None:
|
||||
"""Test reasoning output from ChatGroq with stream."""
|
||||
chat = ChatGroq(
|
||||
model="deepseek-r1-distill-llama-70b",
|
||||
model=REASONING_MODEL_NAME,
|
||||
reasoning_format="parsed",
|
||||
)
|
||||
message = [
|
||||
@ -271,7 +274,7 @@ def test_reasoning_output_stream() -> None:
|
||||
def test_reasoning_effort_none() -> None:
|
||||
"""Test that no reasoning output is returned if effort is set to none."""
|
||||
chat = ChatGroq(
|
||||
model="qwen/qwen3-32b", # Only qwen3 currently supports reasoning_effort
|
||||
model="qwen/qwen3-32b", # Only qwen3 currently supports reasoning_effort = none
|
||||
reasoning_effort="none",
|
||||
)
|
||||
message = HumanMessage(content="What is the capital of France?")
|
||||
@ -281,6 +284,79 @@ def test_reasoning_effort_none() -> None:
|
||||
assert "<think>" not in response.content and "<think/>" not in response.content
|
||||
|
||||
|
||||
@pytest.mark.parametrize("effort", ["low", "medium", "high"])
|
||||
def test_reasoning_effort_levels(effort: str) -> None:
|
||||
"""Test reasoning effort options for different levels."""
|
||||
# As of now, only the new gpt-oss models support `'low'`, `'medium'`, and `'high'`
|
||||
chat = ChatGroq(
|
||||
model=DEFAULT_MODEL_NAME,
|
||||
reasoning_effort=effort,
|
||||
)
|
||||
message = HumanMessage(content="What is the capital of France?")
|
||||
response = chat.invoke([message])
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
assert len(response.content) > 0
|
||||
assert response.response_metadata.get("reasoning_effort") == effort
|
||||
|
||||
|
||||
@pytest.mark.parametrize("effort", ["low", "medium", "high"])
|
||||
def test_reasoning_effort_invoke_override(effort: str) -> None:
|
||||
"""Test that reasoning_effort in invoke() overrides class-level setting."""
|
||||
# Create chat with no reasoning effort at class level
|
||||
chat = ChatGroq(
|
||||
model=DEFAULT_MODEL_NAME,
|
||||
)
|
||||
message = HumanMessage(content="What is the capital of France?")
|
||||
|
||||
# Override reasoning_effort in invoke()
|
||||
response = chat.invoke([message], reasoning_effort=effort)
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
assert len(response.content) > 0
|
||||
assert response.response_metadata.get("reasoning_effort") == effort
|
||||
|
||||
|
||||
def test_reasoning_effort_invoke_override_different_level() -> None:
|
||||
"""Test that reasoning_effort in invoke() overrides class-level setting."""
|
||||
# Create chat with reasoning effort at class level
|
||||
chat = ChatGroq(
|
||||
model=DEFAULT_MODEL_NAME, # openai/gpt-oss-20b supports reasoning_effort
|
||||
reasoning_effort="high",
|
||||
)
|
||||
message = HumanMessage(content="What is the capital of France?")
|
||||
|
||||
# Override reasoning_effort to 'low' in invoke()
|
||||
response = chat.invoke([message], reasoning_effort="low")
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
assert len(response.content) > 0
|
||||
# Should reflect the overridden value, not the class-level setting
|
||||
assert response.response_metadata.get("reasoning_effort") == "low"
|
||||
|
||||
|
||||
def test_reasoning_effort_streaming() -> None:
|
||||
"""Test that reasoning_effort is captured in streaming response metadata."""
|
||||
chat = ChatGroq(
|
||||
model=DEFAULT_MODEL_NAME,
|
||||
reasoning_effort="medium",
|
||||
)
|
||||
message = HumanMessage(content="What is the capital of France?")
|
||||
|
||||
chunks = list(chat.stream([message]))
|
||||
assert len(chunks) > 0
|
||||
|
||||
# Find the final chunk with finish_reason
|
||||
final_chunk = None
|
||||
for chunk in chunks:
|
||||
if chunk.response_metadata.get("finish_reason"):
|
||||
final_chunk = chunk
|
||||
break
|
||||
|
||||
assert final_chunk is not None
|
||||
assert final_chunk.response_metadata.get("reasoning_effort") == "medium"
|
||||
|
||||
|
||||
#
|
||||
# Misc tests
|
||||
#
|
||||
@ -300,7 +376,7 @@ def test_streaming_generation_info() -> None:
|
||||
|
||||
callback = _FakeCallback()
|
||||
chat = ChatGroq(
|
||||
model=MODEL_NAME,
|
||||
model="llama-3.1-8b-instant", # Use a model that properly streams content
|
||||
max_tokens=2,
|
||||
temperature=0,
|
||||
callbacks=[callback],
|
||||
@ -314,7 +390,7 @@ def test_streaming_generation_info() -> None:
|
||||
|
||||
def test_system_message() -> None:
|
||||
"""Test ChatGroq wrapper with system message."""
|
||||
chat = ChatGroq(model=MODEL_NAME, max_tokens=10)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, max_tokens=10)
|
||||
system_message = SystemMessage(content="You are to chat with the user.")
|
||||
human_message = HumanMessage(content="Hello")
|
||||
response = chat.invoke([system_message, human_message])
|
||||
@ -324,7 +400,7 @@ def test_system_message() -> None:
|
||||
|
||||
def test_tool_choice() -> None:
|
||||
"""Test that tool choice is respected."""
|
||||
llm = ChatGroq(model=MODEL_NAME)
|
||||
llm = ChatGroq(model=DEFAULT_MODEL_NAME)
|
||||
|
||||
class MyTool(BaseModel):
|
||||
name: str
|
||||
@ -332,7 +408,7 @@ def test_tool_choice() -> None:
|
||||
|
||||
with_tool = llm.bind_tools([MyTool], tool_choice="MyTool")
|
||||
|
||||
resp = with_tool.invoke("Who was the 27 year old named Erick?")
|
||||
resp = with_tool.invoke("Who was the 27 year old named Erick? Use the tool.")
|
||||
assert isinstance(resp, AIMessage)
|
||||
assert resp.content == "" # should just be tool call
|
||||
tool_calls = resp.additional_kwargs["tool_calls"]
|
||||
@ -354,7 +430,7 @@ def test_tool_choice() -> None:
|
||||
|
||||
def test_tool_choice_bool() -> None:
|
||||
"""Test that tool choice is respected just passing in True."""
|
||||
llm = ChatGroq(model=MODEL_NAME)
|
||||
llm = ChatGroq(model=DEFAULT_MODEL_NAME)
|
||||
|
||||
class MyTool(BaseModel):
|
||||
name: str
|
||||
@ -362,7 +438,7 @@ def test_tool_choice_bool() -> None:
|
||||
|
||||
with_tool = llm.bind_tools([MyTool], tool_choice=True)
|
||||
|
||||
resp = with_tool.invoke("Who was the 27 year old named Erick?")
|
||||
resp = with_tool.invoke("Who was the 27 year old named Erick? Use the tool.")
|
||||
assert isinstance(resp, AIMessage)
|
||||
assert resp.content == "" # should just be tool call
|
||||
tool_calls = resp.additional_kwargs["tool_calls"]
|
||||
@ -379,7 +455,7 @@ def test_tool_choice_bool() -> None:
|
||||
@pytest.mark.xfail(reason="Groq tool_choice doesn't currently force a tool call")
|
||||
def test_streaming_tool_call() -> None:
|
||||
"""Test that tool choice is respected."""
|
||||
llm = ChatGroq(model=MODEL_NAME)
|
||||
llm = ChatGroq(model=DEFAULT_MODEL_NAME)
|
||||
|
||||
class MyTool(BaseModel):
|
||||
name: str
|
||||
@ -417,7 +493,7 @@ def test_streaming_tool_call() -> None:
|
||||
@pytest.mark.xfail(reason="Groq tool_choice doesn't currently force a tool call")
|
||||
async def test_astreaming_tool_call() -> None:
|
||||
"""Test that tool choice is respected."""
|
||||
llm = ChatGroq(model=MODEL_NAME)
|
||||
llm = ChatGroq(model=DEFAULT_MODEL_NAME)
|
||||
|
||||
class MyTool(BaseModel):
|
||||
name: str
|
||||
@ -462,7 +538,9 @@ def test_json_mode_structured_output() -> None:
|
||||
setup: str = Field(description="question to set up a joke")
|
||||
punchline: str = Field(description="answer to resolve the joke")
|
||||
|
||||
chat = ChatGroq(model=MODEL_NAME).with_structured_output(Joke, method="json_mode")
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME).with_structured_output(
|
||||
Joke, method="json_mode"
|
||||
)
|
||||
result = chat.invoke(
|
||||
"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys"
|
||||
)
|
||||
@ -476,38 +554,38 @@ def test_setting_service_tier_class() -> None:
|
||||
message = HumanMessage(content="Welcome to the Groqetship")
|
||||
|
||||
# Initialization
|
||||
chat = ChatGroq(model=MODEL_NAME, service_tier="auto")
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, service_tier="auto")
|
||||
assert chat.service_tier == "auto"
|
||||
response = chat.invoke([message])
|
||||
assert isinstance(response, BaseMessage)
|
||||
assert isinstance(response.content, str)
|
||||
assert response.response_metadata.get("service_tier") == "auto"
|
||||
|
||||
chat = ChatGroq(model=MODEL_NAME, service_tier="flex")
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, service_tier="flex")
|
||||
assert chat.service_tier == "flex"
|
||||
response = chat.invoke([message])
|
||||
assert response.response_metadata.get("service_tier") == "flex"
|
||||
|
||||
chat = ChatGroq(model=MODEL_NAME, service_tier="on_demand")
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, service_tier="on_demand")
|
||||
assert chat.service_tier == "on_demand"
|
||||
response = chat.invoke([message])
|
||||
assert response.response_metadata.get("service_tier") == "on_demand"
|
||||
|
||||
chat = ChatGroq(model=MODEL_NAME)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME)
|
||||
assert chat.service_tier == "on_demand"
|
||||
response = chat.invoke([message])
|
||||
assert response.response_metadata.get("service_tier") == "on_demand"
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
ChatGroq(model=MODEL_NAME, service_tier=None) # type: ignore[arg-type]
|
||||
ChatGroq(model=DEFAULT_MODEL_NAME, service_tier=None) # type: ignore[arg-type]
|
||||
with pytest.raises(ValueError):
|
||||
ChatGroq(model=MODEL_NAME, service_tier="invalid") # type: ignore[arg-type]
|
||||
ChatGroq(model=DEFAULT_MODEL_NAME, service_tier="invalid") # type: ignore[arg-type]
|
||||
|
||||
|
||||
def test_setting_service_tier_request() -> None:
|
||||
"""Test setting service tier defined at request level."""
|
||||
message = HumanMessage(content="Welcome to the Groqetship")
|
||||
chat = ChatGroq(model=MODEL_NAME)
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME)
|
||||
|
||||
response = chat.invoke(
|
||||
[message],
|
||||
@ -537,7 +615,7 @@ def test_setting_service_tier_request() -> None:
|
||||
|
||||
# If an `invoke` call is made with no service tier, we fall back to the class level
|
||||
# setting
|
||||
chat = ChatGroq(model=MODEL_NAME, service_tier="auto")
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, service_tier="auto")
|
||||
response = chat.invoke(
|
||||
[message],
|
||||
)
|
||||
@ -564,7 +642,7 @@ def test_setting_service_tier_request() -> None:
|
||||
|
||||
def test_setting_service_tier_streaming() -> None:
|
||||
"""Test service tier settings for streaming calls."""
|
||||
chat = ChatGroq(model=MODEL_NAME, service_tier="flex")
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, service_tier="flex")
|
||||
chunks = list(chat.stream("Why is the sky blue?", service_tier="auto"))
|
||||
|
||||
assert chunks[-1].response_metadata.get("service_tier") == "auto"
|
||||
@ -572,7 +650,7 @@ def test_setting_service_tier_streaming() -> None:
|
||||
|
||||
async def test_setting_service_tier_request_async() -> None:
|
||||
"""Test async setting of service tier at the request level."""
|
||||
chat = ChatGroq(model=MODEL_NAME, service_tier="flex")
|
||||
chat = ChatGroq(model=DEFAULT_MODEL_NAME, service_tier="flex")
|
||||
response = await chat.ainvoke("Hello!", service_tier="on_demand")
|
||||
|
||||
assert response.response_metadata.get("service_tier") == "on_demand"
|
||||
|
@ -1,5 +1,5 @@
|
||||
version = 1
|
||||
revision = 2
|
||||
revision = 3
|
||||
requires-python = ">=3.9"
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy'",
|
||||
@ -226,7 +226,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "groq"
|
||||
version = "0.29.0"
|
||||
version = "0.30.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@ -236,9 +236,9 @@ dependencies = [
|
||||
{ name = "sniffio" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8c/d6/db0c61bad6ff6a2ba7083a943c62790f37969cb337d96a5a914cd3ac4311/groq-0.29.0.tar.gz", hash = "sha256:109dc4d696c05d44e4c2cd157652c4c6600c3e96f093f6e158facb5691e37847", size = 133989, upload-time = "2025-06-25T23:40:11.508Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a9/b1/72ca20dc9b977b7f604648e8944c77b267bddeb90d8e16bda0cf0e397844/groq-0.30.0.tar.gz", hash = "sha256:919466e48fcbebef08fed3f71debb0f96b0ea8d2ec77842c384aa843019f6e2c", size = 134928, upload-time = "2025-07-11T20:28:36.583Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/0b/ef7a92ec5ec23a7012975ed59ca3cef541d50a9f0d2dea947fe2723d011f/groq-0.29.0-py3-none-any.whl", hash = "sha256:03515ec46be1ef1feef0cd9d876b6f30a39ee2742e76516153d84acd7c97f23a", size = 130814, upload-time = "2025-06-25T23:40:10.391Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/b8/5b90edf9fbd795597220e3d1b5534d845e69a73ffe1fdeb967443ed2a6cf/groq-0.30.0-py3-none-any.whl", hash = "sha256:6d9609a7778ba56432f45c1bac21b005f02c6c0aca9c1c094e65536f162c1e83", size = 131056, upload-time = "2025-07-11T20:28:35.591Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -389,7 +389,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-groq"
|
||||
version = "0.3.6"
|
||||
version = "0.3.7"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "groq" },
|
||||
@ -425,7 +425,7 @@ typing = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "groq", specifier = ">=0.29.0,<1" },
|
||||
{ name = "groq", specifier = ">=0.30.0,<1" },
|
||||
{ name = "langchain-core", editable = "../../core" },
|
||||
]
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user