mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-04 00:00:34 +00:00
Compare commits
48 Commits
harrison/m
...
cc/openai_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4ff68e52e | ||
|
|
844b8b87d7 | ||
|
|
61e329637b | ||
|
|
68bc73fefc | ||
|
|
b8fed06409 | ||
|
|
ef9b5a9e18 | ||
|
|
5e9eb19a83 | ||
|
|
c409f723a2 | ||
|
|
3d9e694f73 | ||
|
|
c921d08b18 | ||
|
|
3f653011e6 | ||
|
|
ee13a3b6fa | ||
|
|
61129557c0 | ||
|
|
4899857042 | ||
|
|
041b196145 | ||
|
|
dd8057a034 | ||
|
|
b94f23883f | ||
|
|
2d031031e3 | ||
|
|
0bb7a823c5 | ||
|
|
df0a8562a9 | ||
|
|
e9b0b84675 | ||
|
|
79bc8259e5 | ||
|
|
7c0d1cb324 | ||
|
|
eb8d32aff2 | ||
|
|
78d036a093 | ||
|
|
6572656cd2 | ||
|
|
e1f034c795 | ||
|
|
b1a02f971b | ||
|
|
b24f90dabe | ||
|
|
3c19cafab0 | ||
|
|
7c1b59d26a | ||
|
|
3460c48af6 | ||
|
|
7e740e5e1f | ||
|
|
7ab615409c | ||
|
|
ce369125f3 | ||
|
|
679a9e7c8f | ||
|
|
26038608a4 | ||
|
|
7546372461 | ||
|
|
72bb858eec | ||
|
|
8da2bec1c3 | ||
|
|
b8d0a95163 | ||
|
|
172e1cdf29 | ||
|
|
6004ba7a0d | ||
|
|
e928672306 | ||
|
|
67fc58011a | ||
|
|
a3a95805eb | ||
|
|
354f5d1c7a | ||
|
|
0d66cc2638 |
@@ -5,26 +5,31 @@ This project includes a [dev container](https://containers.dev/), which lets you
|
||||
You can use the dev container configuration in this folder to build and run the app without needing to install any of its tools locally! You can use it in [GitHub Codespaces](https://github.com/features/codespaces) or the [VS Code Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers).
|
||||
|
||||
## GitHub Codespaces
|
||||
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
|
||||
You may use the button above, or follow these steps to open this repo in a Codespace:
|
||||
1. Click the **Code** drop-down menu at the top of https://github.com/langchain-ai/langchain.
|
||||
|
||||
1. Click the **Code** drop-down menu at the top of <https://github.com/langchain-ai/langchain>.
|
||||
1. Click on the **Codespaces** tab.
|
||||
1. Click **Create codespace on master**.
|
||||
|
||||
For more info, check out the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace).
|
||||
|
||||
## VS Code Dev Containers
|
||||
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
|
||||
Note: If you click the link above you will open the main repo (langchain-ai/langchain) and not your local cloned repo. This is fine if you only want to run and test the library, but if you want to contribute you can use the link below and replace with your username and cloned repo name:
|
||||
```
|
||||
https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/<yourusername>/<yourclonedreponame>
|
||||
> [!NOTE]
|
||||
> If you click the link above you will open the main repo (`langchain-ai/langchain`) and *not* your local cloned repo. This is fine if you only want to run and test the library, but if you want to contribute you can use the link below and replace with your username and cloned repo name:
|
||||
|
||||
```txt
|
||||
https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/<YOUR_USERNAME>/<YOUR_CLONED_REPO_NAME>
|
||||
```
|
||||
|
||||
Then you will have a local cloned repo where you can contribute and then create pull requests.
|
||||
|
||||
If you already have VS Code and Docker installed, you can use the button above to get started. This will cause VS Code to automatically install the Dev Containers extension if needed, clone the source code into a container volume, and spin up a dev container for use.
|
||||
If you already have VS Code and Docker installed, you can use the button above to get started. This will use VSCode to automatically install the Dev Containers extension if needed, clone the source code into a container volume, and spin up a dev container for use.
|
||||
|
||||
Alternatively you can also follow these steps to open this repo in a container using the VS Code Dev Containers extension:
|
||||
|
||||
@@ -40,5 +45,5 @@ You can learn more in the [Dev Containers documentation](https://code.visualstud
|
||||
|
||||
## Tips and tricks
|
||||
|
||||
* If you are working with the same repository folder in a container and Windows, you'll want consistent line endings (otherwise you may see hundreds of changes in the SCM view). The `.gitattributes` file in the root of this repo will disable line ending conversion and should prevent this. See [tips and tricks](https://code.visualstudio.com/docs/devcontainers/tips-and-tricks#_resolving-git-line-ending-issues-in-containers-resulting-in-many-modified-files) for more info.
|
||||
* If you'd like to review the contents of the image used in this dev container, you can check it out in the [devcontainers/images](https://github.com/devcontainers/images/tree/main/src/python) repo.
|
||||
- If you are working with the same repository folder in a container and Windows, you'll want consistent line endings (otherwise you may see hundreds of changes in the SCM view). The `.gitattributes` file in the root of this repo will disable line ending conversion and should prevent this. See [tips and tricks](https://code.visualstudio.com/docs/devcontainers/tips-and-tricks#_resolving-git-line-ending-issues-in-containers-resulting-in-many-modified-files) for more info.
|
||||
- If you'd like to review the contents of the image used in this dev container, you can check it out in the [devcontainers/images](https://github.com/devcontainers/images/tree/main/src/python) repo.
|
||||
|
||||
@@ -1,36 +1,58 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose
|
||||
{
|
||||
// Name for the dev container
|
||||
"name": "langchain",
|
||||
|
||||
// Point to a Docker Compose file
|
||||
"dockerComposeFile": "./docker-compose.yaml",
|
||||
|
||||
// Required when using Docker Compose. The name of the service to connect to once running
|
||||
"service": "langchain",
|
||||
|
||||
// The optional 'workspaceFolder' property is the path VS Code should open by default when
|
||||
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
|
||||
"workspaceFolder": "/workspaces/langchain",
|
||||
|
||||
// Prevent the container from shutting down
|
||||
"overrideCommand": true
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features
|
||||
// "features": {
|
||||
// "ghcr.io/devcontainers-contrib/features/poetry:2": {}
|
||||
// }
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Uncomment the next line to run commands after the container is created.
|
||||
// "postCreateCommand": "cat /etc/os-release",
|
||||
|
||||
// Configure tool-specific properties.
|
||||
// "customizations": {},
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "root"
|
||||
// Name for the dev container
|
||||
"name": "langchain",
|
||||
// Point to a Docker Compose file
|
||||
"dockerComposeFile": "./docker-compose.yaml",
|
||||
// Required when using Docker Compose. The name of the service to connect to once running
|
||||
"service": "langchain",
|
||||
// The optional 'workspaceFolder' property is the path VS Code should open by default when
|
||||
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
|
||||
"workspaceFolder": "/workspaces/langchain",
|
||||
"mounts": [
|
||||
"source=langchain-workspaces,target=/workspaces/langchain,type=volume"
|
||||
],
|
||||
// Prevent the container from shutting down
|
||||
"overrideCommand": true,
|
||||
// Features to add to the dev container. More info: https://containers.dev/features
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/git:1": {},
|
||||
"ghcr.io/devcontainers/features/github-cli:1": {}
|
||||
},
|
||||
"containerEnv": {
|
||||
"UV_LINK_MODE": "copy"
|
||||
},
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
// Run commands after the container is created
|
||||
"postCreateCommand": "uv sync && echo 'LangChain (Python) dev environment ready!'",
|
||||
// Configure tool-specific properties.
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"ms-python.debugpy",
|
||||
"ms-python.mypy-type-checker",
|
||||
"ms-python.isort",
|
||||
"unifiedjs.vscode-mdx",
|
||||
"davidanson.vscode-markdownlint",
|
||||
"ms-toolsai.jupyter",
|
||||
"GitHub.copilot",
|
||||
"GitHub.copilot-chat"
|
||||
],
|
||||
"settings": {
|
||||
"python.defaultInterpreterPath": ".venv/bin/python",
|
||||
"python.formatting.provider": "none",
|
||||
"[python]": {
|
||||
"editor.formatOnSave": true,
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.organizeImports": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "root"
|
||||
}
|
||||
|
||||
@@ -4,26 +4,9 @@ services:
|
||||
build:
|
||||
dockerfile: libs/langchain/dev.Dockerfile
|
||||
context: ..
|
||||
volumes:
|
||||
# Update this to wherever you want VS Code to mount the folder of your project
|
||||
- ..:/workspaces/langchain:cached
|
||||
|
||||
networks:
|
||||
- langchain-network
|
||||
# environment:
|
||||
# MONGO_ROOT_USERNAME: root
|
||||
# MONGO_ROOT_PASSWORD: example123
|
||||
# depends_on:
|
||||
# - mongo
|
||||
# mongo:
|
||||
# image: mongo
|
||||
# restart: unless-stopped
|
||||
# environment:
|
||||
# MONGO_INITDB_ROOT_USERNAME: root
|
||||
# MONGO_INITDB_ROOT_PASSWORD: example123
|
||||
# ports:
|
||||
# - "27017:27017"
|
||||
# networks:
|
||||
# - langchain-network
|
||||
|
||||
networks:
|
||||
langchain-network:
|
||||
|
||||
52
.editorconfig
Normal file
52
.editorconfig
Normal file
@@ -0,0 +1,52 @@
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# All files
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
# Python files
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 88
|
||||
|
||||
# JSON files
|
||||
[*.json]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# YAML files
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Markdown files
|
||||
[*.md]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
# Configuration files
|
||||
[*.{toml,ini,cfg}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
# Shell scripts
|
||||
[*.sh]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Makefile
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
|
||||
# Jupyter notebooks
|
||||
[*.ipynb]
|
||||
# Jupyter may include trailing whitespace in cell
|
||||
# outputs that's semantically meaningful
|
||||
trim_trailing_whitespace = false
|
||||
328
.github/copilot-instructions.md
vendored
328
.github/copilot-instructions.md
vendored
@@ -1,72 +1,116 @@
|
||||
### 1. Avoid Breaking Changes (Stable Public Interfaces)
|
||||
# Global Development Guidelines for LangChain Projects
|
||||
|
||||
* Carefully preserve **function signatures**, argument positions, and names for any exported/public methods.
|
||||
* Be cautious when **renaming**, **removing**, or **reordering** arguments — even small changes can break downstream consumers.
|
||||
* Use keyword-only arguments or clearly mark experimental features to isolate unstable APIs.
|
||||
## Core Development Principles
|
||||
|
||||
Bad:
|
||||
### 1. Maintain Stable Public Interfaces ⚠️ CRITICAL
|
||||
|
||||
**Always attempt to preserve function signatures, argument positions, and names for exported/public methods.**
|
||||
|
||||
❌ **Bad - Breaking Change:**
|
||||
|
||||
```python
|
||||
def get_user(id, verbose=False): # Changed from `user_id`
|
||||
pass
|
||||
```
|
||||
|
||||
Good:
|
||||
✅ **Good - Stable Interface:**
|
||||
|
||||
```python
|
||||
def get_user(user_id: str, verbose: bool = False): # Maintains stable interface
|
||||
def get_user(user_id: str, verbose: bool = False) -> User:
|
||||
"""Retrieve user by ID with optional verbose output."""
|
||||
pass
|
||||
```
|
||||
|
||||
🧠 *Ask yourself:* “Would this change break someone's code if they used it last week?”
|
||||
**Before making ANY changes to public APIs:**
|
||||
|
||||
---
|
||||
- Check if the function/class is exported in `__init__.py`
|
||||
- Look for existing usage patterns in tests and examples
|
||||
- Use keyword-only arguments for new parameters: `*, new_param: str = "default"`
|
||||
- Mark experimental features clearly with docstring warnings (using reStructuredText, like `.. warning::`)
|
||||
|
||||
### 2. Simplify Code and Use Clear Variable Names
|
||||
🧠 *Ask yourself:* "Would this change break someone's code if they used it last week?"
|
||||
|
||||
* Prefer descriptive, **self-explanatory variable names**. Avoid overly short or cryptic identifiers.
|
||||
* Break up overly long or deeply nested functions for **readability and maintainability**.
|
||||
* Avoid unnecessary abstraction or premature optimization.
|
||||
* All generated Python code must include type hints and return types.
|
||||
### 2. Code Quality Standards
|
||||
|
||||
Bad:
|
||||
**All Python code MUST include type hints and return types.**
|
||||
|
||||
❌ **Bad:**
|
||||
|
||||
```python
|
||||
def p(u, d):
|
||||
return [x for x in u if x not in d]
|
||||
```
|
||||
|
||||
Good:
|
||||
✅ **Good:**
|
||||
|
||||
```python
|
||||
def filter_unknown_users(users: List[str], known_users: Set[str]) -> List[str]:
|
||||
def filter_unknown_users(users: list[str], known_users: set[str]) -> list[str]:
|
||||
"""Filter out users that are not in the known users set.
|
||||
|
||||
Args:
|
||||
users: List of user identifiers to filter.
|
||||
known_users: Set of known/valid user identifiers.
|
||||
|
||||
Returns:
|
||||
List of users that are not in the known_users set.
|
||||
"""
|
||||
return [user for user in users if user not in known_users]
|
||||
```
|
||||
|
||||
---
|
||||
**Style Requirements:**
|
||||
|
||||
### 3. Ensure Unit Tests Cover New and Updated Functionality
|
||||
- Use descriptive, **self-explanatory variable names**. Avoid overly short or cryptic identifiers.
|
||||
- Attempt to break up complex functions (>20 lines) into smaller, focused functions where it makes sense
|
||||
- Avoid unnecessary abstraction or premature optimization
|
||||
- Follow existing patterns in the codebase you're modifying
|
||||
|
||||
* Every new feature or bugfix should be **covered by a unit test**.
|
||||
* Test edge cases and failure conditions.
|
||||
* Use `pytest`, `unittest`, or the project’s existing framework consistently.
|
||||
### 3. Testing Requirements
|
||||
|
||||
Checklist:
|
||||
**Every new feature or bugfix MUST be covered by unit tests.**
|
||||
|
||||
* [ ] Does the test suite fail if your new logic is broken?
|
||||
* [ ] Are all expected behaviors exercised (happy path, invalid input, etc)?
|
||||
* [ ] Do tests use fixtures or mocks where needed?
|
||||
**Test Organization:**
|
||||
|
||||
---
|
||||
- Unit tests: `tests/unit_tests/` (no network calls allowed)
|
||||
- Integration tests: `tests/integration_tests/` (network calls permitted)
|
||||
- Use `pytest` as the testing framework
|
||||
|
||||
### 4. Look for Suspicious or Risky Code
|
||||
**Test Quality Checklist:**
|
||||
|
||||
* Watch out for:
|
||||
- [ ] Tests fail when your new logic is broken
|
||||
- [ ] Happy path is covered
|
||||
- [ ] Edge cases and error conditions are tested
|
||||
- [ ] Use fixtures/mocks for external dependencies
|
||||
- [ ] Tests are deterministic (no flaky tests)
|
||||
|
||||
* Use of `eval()`, `exec()`, or `pickle` on user-controlled input.
|
||||
* Silent failure modes (`except: pass`).
|
||||
* Unreachable code or commented-out blocks.
|
||||
* Race conditions or resource leaks (file handles, sockets, threads).
|
||||
Checklist questions:
|
||||
|
||||
Bad:
|
||||
- [ ] Does the test suite fail if your new logic is broken?
|
||||
- [ ] Are all expected behaviors exercised (happy path, invalid input, etc)?
|
||||
- [ ] Do tests use fixtures or mocks where needed?
|
||||
|
||||
```python
|
||||
def test_filter_unknown_users():
|
||||
"""Test filtering unknown users from a list."""
|
||||
users = ["alice", "bob", "charlie"]
|
||||
known_users = {"alice", "bob"}
|
||||
|
||||
result = filter_unknown_users(users, known_users)
|
||||
|
||||
assert result == ["charlie"]
|
||||
assert len(result) == 1
|
||||
```
|
||||
|
||||
### 4. Security and Risk Assessment
|
||||
|
||||
**Security Checklist:**
|
||||
|
||||
- No `eval()`, `exec()`, or `pickle` on user-controlled input
|
||||
- Proper exception handling (no bare `except:`) and use a `msg` variable for error messages
|
||||
- Remove unreachable/commented code before committing
|
||||
- Race conditions or resource leaks (file handles, sockets, threads).
|
||||
- Ensure proper resource cleanup (file handles, connections)
|
||||
|
||||
❌ **Bad:**
|
||||
|
||||
```python
|
||||
def load_config(path):
|
||||
@@ -74,7 +118,7 @@ def load_config(path):
|
||||
return eval(f.read()) # ⚠️ Never eval config
|
||||
```
|
||||
|
||||
Good:
|
||||
✅ **Good:**
|
||||
|
||||
```python
|
||||
import json
|
||||
@@ -84,68 +128,198 @@ def load_config(path: str) -> dict:
|
||||
return json.load(f)
|
||||
```
|
||||
|
||||
---
|
||||
### 5. Documentation Standards
|
||||
|
||||
### 5. Use Google-Style Docstrings (with Args section)
|
||||
**Use Google-style docstrings with Args section for all public functions.**
|
||||
|
||||
* All public functions should include a **Google-style docstring**.
|
||||
* Include an `Args:` section where relevant.
|
||||
* Types should NOT be written in the docstring — use type hints instead.
|
||||
|
||||
Bad:
|
||||
❌ **Insufficient Documentation:**
|
||||
|
||||
```python
|
||||
def send_email(to, msg):
|
||||
"""Send an email to a recipient."""
|
||||
```
|
||||
|
||||
Good:
|
||||
✅ **Complete Documentation:**
|
||||
|
||||
```python
|
||||
def send_email(to: str, msg: str) -> None:
|
||||
def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
"""
|
||||
Sends an email to a recipient.
|
||||
Send an email to a recipient with specified priority.
|
||||
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level (``'low'``, ``'normal'``, ``'high'``).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Use reStructuredText for docstrings to enable rich formatting
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
### 6. Architectural Improvements
|
||||
|
||||
**When you encounter code that could be improved, suggest better designs:**
|
||||
|
||||
❌ **Poor Design:**
|
||||
|
||||
```python
|
||||
def process_data(data, db_conn, email_client, logger):
|
||||
# Function doing too many things
|
||||
validated = validate_data(data)
|
||||
result = db_conn.save(validated)
|
||||
email_client.send_notification(result)
|
||||
logger.log(f"Processed {len(data)} items")
|
||||
return result
|
||||
```
|
||||
|
||||
✅ **Better Design:**
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class ProcessingResult:
|
||||
"""Result of data processing operation."""
|
||||
items_processed: int
|
||||
success: bool
|
||||
errors: List[str] = field(default_factory=list)
|
||||
|
||||
class DataProcessor:
|
||||
"""Handles data validation, storage, and notification."""
|
||||
|
||||
def __init__(self, db_conn: Database, email_client: EmailClient):
|
||||
self.db = db_conn
|
||||
self.email = email_client
|
||||
|
||||
def process(self, data: List[dict]) -> ProcessingResult:
|
||||
"""Process and store data with notifications."""
|
||||
validated = self._validate_data(data)
|
||||
result = self.db.save(validated)
|
||||
self._notify_completion(result)
|
||||
return result
|
||||
```
|
||||
|
||||
**Design Improvement Areas:**
|
||||
|
||||
If there's a **cleaner**, **more scalable**, or **simpler** design, highlight it and suggest improvements that would:
|
||||
|
||||
- Reduce code duplication through shared utilities
|
||||
- Make unit testing easier
|
||||
- Improve separation of concerns (single responsibility)
|
||||
- Make unit testing easier through dependency injection
|
||||
- Add clarity without adding complexity
|
||||
- Prefer dataclasses for structured data
|
||||
|
||||
## Development Tools & Commands
|
||||
|
||||
### Package Management
|
||||
|
||||
```bash
|
||||
# Add package
|
||||
uv add package-name
|
||||
|
||||
# Sync project dependencies
|
||||
uv sync
|
||||
uv lock
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Run unit tests (no network)
|
||||
make test
|
||||
|
||||
# Don't run integration tests, as API keys must be set
|
||||
|
||||
# Run specific test file
|
||||
uv run --group test pytest tests/unit_tests/test_specific.py
|
||||
```
|
||||
|
||||
### Code Quality
|
||||
|
||||
```bash
|
||||
# Lint code
|
||||
make lint
|
||||
|
||||
# Format code
|
||||
make format
|
||||
|
||||
# Type checking
|
||||
uv run --group lint mypy .
|
||||
```
|
||||
|
||||
### Dependency Management Patterns
|
||||
|
||||
**Local Development Dependencies:**
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
langchain-core = { path = "../core", editable = true }
|
||||
langchain-tests = { path = "../standard-tests", editable = true }
|
||||
```
|
||||
|
||||
**For tools, use the `@tool` decorator from `langchain_core.tools`:**
|
||||
|
||||
```python
|
||||
from langchain_core.tools import tool
|
||||
|
||||
@tool
|
||||
def search_database(query: str) -> str:
|
||||
"""Search the database for relevant information.
|
||||
|
||||
Args:
|
||||
query: The search query string.
|
||||
"""
|
||||
# Implementation here
|
||||
return results
|
||||
```
|
||||
|
||||
## Commit Standards
|
||||
|
||||
**Use Conventional Commits format for PR titles:**
|
||||
|
||||
- `feat(core): add multi-tenant support`
|
||||
- `fix(cli): resolve flag parsing error`
|
||||
- `docs: update API usage examples`
|
||||
- `docs(openai): update API usage examples`
|
||||
|
||||
## Framework-Specific Guidelines
|
||||
|
||||
- Follow the existing patterns in `langchain-core` for base abstractions
|
||||
- Use `langchain_core.callbacks` for execution tracking
|
||||
- Implement proper streaming support where applicable
|
||||
- Avoid deprecated components like legacy `LLMChain`
|
||||
|
||||
### Partner Integrations
|
||||
|
||||
- Follow the established patterns in existing partner libraries
|
||||
- Implement standard interfaces (`BaseChatModel`, `BaseEmbeddings`, etc.)
|
||||
- Include comprehensive integration tests
|
||||
- Document API key requirements and authentication
|
||||
|
||||
---
|
||||
|
||||
### 6. Propose Better Designs When Applicable
|
||||
## Quick Reference Checklist
|
||||
|
||||
* If there's a **cleaner**, **more scalable**, or **simpler** design, highlight it.
|
||||
* Suggest improvements, even if they require some refactoring — especially if the new code would:
|
||||
Before submitting code changes:
|
||||
|
||||
* Reduce duplication
|
||||
* Make unit testing easier
|
||||
* Improve separation of concerns
|
||||
* Add clarity without adding complexity
|
||||
|
||||
Instead of:
|
||||
|
||||
```python
|
||||
def save(data, db_conn):
|
||||
# manually serializes fields
|
||||
```
|
||||
|
||||
You might suggest:
|
||||
|
||||
```python
|
||||
# Suggest using dataclasses or Pydantic for automatic serialization and validation
|
||||
```
|
||||
|
||||
### 7. Misc
|
||||
|
||||
* When suggesting package installation commands, use `uv pip install` as this project uses `uv`.
|
||||
* When creating tools for agents, use the @tool decorator from langchain_core.tools. The tool's docstring serves as its functional description for the agent.
|
||||
* Avoid suggesting deprecated components, such as the legacy LLMChain.
|
||||
* We use Conventional Commits format for pull request titles. Example PR titles:
|
||||
* feat(core): add multi‐tenant support
|
||||
* fix(cli): resolve flag parsing error
|
||||
* docs: update API usage examples
|
||||
* docs(openai): update API usage examples
|
||||
- [ ] **Breaking Changes**: Verified no public API changes
|
||||
- [ ] **Type Hints**: All functions have complete type annotations
|
||||
- [ ] **Tests**: New functionality is fully tested
|
||||
- [ ] **Security**: No dangerous patterns (eval, silent failures, etc.)
|
||||
- [ ] **Documentation**: Google-style docstrings for public functions
|
||||
- [ ] **Code Quality**: `make lint` and `make format` pass
|
||||
- [ ] **Architecture**: Suggested improvements where applicable
|
||||
- [ ] **Commit Message**: Follows Conventional Commits format
|
||||
|
||||
1
.github/scripts/check_diff.py
vendored
1
.github/scripts/check_diff.py
vendored
@@ -16,6 +16,7 @@ LANGCHAIN_DIRS = [
|
||||
"libs/core",
|
||||
"libs/text-splitters",
|
||||
"libs/langchain",
|
||||
"libs/langchain_v1",
|
||||
]
|
||||
|
||||
# when set to True, we are ignoring core dependents
|
||||
|
||||
2
.github/scripts/prep_api_docs_build.py
vendored
2
.github/scripts/prep_api_docs_build.py
vendored
@@ -73,6 +73,7 @@ def main():
|
||||
for p in package_yaml["packages"]
|
||||
if (p["repo"].startswith("langchain-ai/") or p.get("include_in_api_ref"))
|
||||
and p["repo"] != "langchain-ai/langchain"
|
||||
and p["name"] != "langchain-ai21" # Skip AI21 due to dependency conflicts
|
||||
])
|
||||
|
||||
# Move libraries to their new locations
|
||||
@@ -82,6 +83,7 @@ def main():
|
||||
if not p.get("disabled", False)
|
||||
and (p["repo"].startswith("langchain-ai/") or p.get("include_in_api_ref"))
|
||||
and p["repo"] != "langchain-ai/langchain"
|
||||
and p["name"] != "langchain-ai21" # Skip AI21 due to dependency conflicts
|
||||
])
|
||||
|
||||
# Delete ones without a pyproject.toml
|
||||
|
||||
12
.github/workflows/_compile_integration_test.yml
vendored
12
.github/workflows/_compile_integration_test.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: compile-integration-test
|
||||
name: '🔗 Compile Integration Tests'
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -25,24 +25,24 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: "uv run pytest -m compile tests/integration_tests #${{ inputs.python-version }}"
|
||||
name: 'Python ${{ inputs.python-version }}'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- name: Install integration dependencies
|
||||
- name: '📦 Install Integration Dependencies'
|
||||
shell: bash
|
||||
run: uv sync --group test --group test_integration
|
||||
|
||||
- name: Check integration tests compile
|
||||
- name: '🔗 Check Integration Tests Compile'
|
||||
shell: bash
|
||||
run: uv run pytest -m compile tests/integration_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
- name: '🧹 Verify Clean Working Directory'
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
10
.github/workflows/_integration_test.yml
vendored
10
.github/workflows/_integration_test.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Integration Tests
|
||||
name: '🚀 Integration Tests'
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -24,20 +24,20 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
name: Python ${{ inputs.python-version }}
|
||||
name: '🚀 Integration Tests (Python ${{ inputs.python-version }})'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
- name: '📦 Install Integration Dependencies'
|
||||
shell: bash
|
||||
run: uv sync --group test --group test_integration
|
||||
|
||||
- name: Run integration tests
|
||||
- name: '🚀 Run Integration Tests'
|
||||
shell: bash
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
|
||||
22
.github/workflows/_lint.yml
vendored
22
.github/workflows/_lint.yml
vendored
@@ -1,4 +1,6 @@
|
||||
name: lint
|
||||
name: '🧹 Code Linting'
|
||||
# Runs code quality checks using ruff, mypy, and other linting tools
|
||||
# Checks both package code and test code for consistency
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -24,19 +26,21 @@ env:
|
||||
UV_FROZEN: "true"
|
||||
|
||||
jobs:
|
||||
# Linting job - runs quality checks on package and test code
|
||||
build:
|
||||
name: "make lint #${{ inputs.python-version }}"
|
||||
name: 'Python ${{ inputs.python-version }}'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: '📋 Checkout Code'
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
- name: '📦 Install Lint & Typing Dependencies'
|
||||
# Also installs dev/lint/test/typing dependencies, to ensure we have
|
||||
# type hints for as many of our libraries as possible.
|
||||
# This helps catch errors that require dependencies to be spotted, for example:
|
||||
@@ -49,12 +53,12 @@ jobs:
|
||||
run: |
|
||||
uv sync --group lint --group typing
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
- name: '🔍 Analyze Package Code with Linters'
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
make lint_package
|
||||
|
||||
- name: Install unit test dependencies
|
||||
- name: '📦 Install Unit Test Dependencies'
|
||||
# Also installs dev/lint/test/typing dependencies, to ensure we have
|
||||
# type hints for as many of our libraries as possible.
|
||||
# This helps catch errors that require dependencies to be spotted, for example:
|
||||
@@ -67,13 +71,13 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
uv sync --inexact --group test
|
||||
- name: Install unit+integration test dependencies
|
||||
- name: '📦 Install Unit + Integration Test Dependencies'
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
uv sync --inexact --group test --group test_integration
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
- name: '🔍 Analyze Test Code with Linters'
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
make lint_tests
|
||||
|
||||
8
.github/workflows/_release.yml
vendored
8
.github/workflows/_release.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: Release
|
||||
run-name: Release ${{ inputs.working-directory }} by @${{ github.actor }}
|
||||
name: '🚀 Package Release'
|
||||
run-name: '🚀 Release ${{ inputs.working-directory }} by @${{ github.actor }}'
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@@ -18,7 +18,7 @@ on:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: "Release from a non-master branch (danger!)"
|
||||
description: "Release from a non-master branch (danger!) - Only use for hotfixes"
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
@@ -26,6 +26,8 @@ env:
|
||||
UV_NO_SYNC: "true"
|
||||
|
||||
jobs:
|
||||
# Build the distribution package and extract version info
|
||||
# Runs in isolated environment with minimal permissions for security
|
||||
build:
|
||||
if: github.ref == 'refs/heads/master' || inputs.dangerous-nonmaster-release
|
||||
environment: Scheduled testing
|
||||
|
||||
22
.github/workflows/_test.yml
vendored
22
.github/workflows/_test.yml
vendored
@@ -1,4 +1,6 @@
|
||||
name: test
|
||||
name: '🧪 Unit Testing'
|
||||
# Runs unit tests with both current and minimum supported dependency versions
|
||||
# to ensure compatibility across the supported range
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -20,31 +22,33 @@ env:
|
||||
UV_NO_SYNC: "true"
|
||||
|
||||
jobs:
|
||||
# Main test job - runs unit tests with current deps, then retests with minimum versions
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: "make test #${{ inputs.python-version }}"
|
||||
name: 'Python ${{ inputs.python-version }}'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: '📋 Checkout Code'
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
uses: "./.github/actions/uv_setup"
|
||||
id: setup-python
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
- name: Install dependencies
|
||||
- name: '📦 Install Test Dependencies'
|
||||
shell: bash
|
||||
run: uv sync --group test --dev
|
||||
|
||||
- name: Run core tests
|
||||
- name: '🧪 Run Core Unit Tests'
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: Get minimum versions
|
||||
- name: '🔍 Calculate Minimum Dependency Versions'
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
shell: bash
|
||||
@@ -55,7 +59,7 @@ jobs:
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
- name: Run unit tests with minimum dependency versions
|
||||
- name: '🧪 Run Tests with Minimum Dependencies'
|
||||
if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
@@ -64,7 +68,7 @@ jobs:
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
- name: '🧹 Verify Clean Working Directory'
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
17
.github/workflows/_test_doc_imports.yml
vendored
17
.github/workflows/_test_doc_imports.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: test_doc_imports
|
||||
name: '📑 Documentation Import Testing'
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -18,29 +18,30 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: "check doc imports #${{ inputs.python-version }}"
|
||||
name: '🔍 Check Doc Imports (Python ${{ inputs.python-version }})'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: '📋 Checkout Code'
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
- name: '📦 Install Test Dependencies'
|
||||
shell: bash
|
||||
run: uv sync --group test
|
||||
|
||||
- name: Install langchain editable
|
||||
- name: '📦 Install LangChain in Editable Mode'
|
||||
run: |
|
||||
VIRTUAL_ENV=.venv uv pip install langchain-experimental langchain-community -e libs/core libs/langchain
|
||||
|
||||
- name: Check doc imports
|
||||
- name: '🔍 Validate Documentation Import Statements'
|
||||
shell: bash
|
||||
run: |
|
||||
uv run python docs/scripts/check_imports.py
|
||||
|
||||
- name: Ensure the test did not create any additional files
|
||||
- name: '🧹 Verify Clean Working Directory'
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
17
.github/workflows/_test_pydantic.yml
vendored
17
.github/workflows/_test_pydantic.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: test pydantic intermediate versions
|
||||
name: '🐍 Pydantic Version Testing'
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -31,29 +31,30 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: "make test # pydantic: ~=${{ inputs.pydantic-version }}, python: ${{ inputs.python-version }}, "
|
||||
name: 'Pydantic ~=${{ inputs.pydantic-version }}'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: '📋 Checkout Code'
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
- name: '📦 Install Test Dependencies'
|
||||
shell: bash
|
||||
run: uv sync --group test
|
||||
|
||||
- name: Overwrite pydantic version
|
||||
- name: '🔄 Install Specific Pydantic Version'
|
||||
shell: bash
|
||||
run: VIRTUAL_ENV=.venv uv pip install pydantic~=${{ inputs.pydantic-version }}
|
||||
|
||||
- name: Run core tests
|
||||
- name: '🧪 Run Core Tests'
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
- name: '🧹 Verify Clean Working Directory'
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
10
.github/workflows/_test_release.yml
vendored
10
.github/workflows/_test_release.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: test-release
|
||||
name: '🧪 Test Release Package'
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + uv
|
||||
- name: '🐍 Set up Python + UV'
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -45,17 +45,17 @@ jobs:
|
||||
# > It is strongly advised to separate jobs for building [...]
|
||||
# > from the publish job.
|
||||
# https://github.com/pypa/gh-action-pypi-publish#non-goals
|
||||
- name: Build project for distribution
|
||||
- name: '📦 Build Project for Distribution'
|
||||
run: uv build
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
- name: '⬆️ Upload Build Artifacts'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Check Version
|
||||
- name: '🔍 Extract Version Information'
|
||||
id: check-version
|
||||
shell: python
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
44
.github/workflows/api_doc_build.yml
vendored
44
.github/workflows/api_doc_build.yml
vendored
@@ -1,17 +1,20 @@
|
||||
name: API Docs Build
|
||||
name: '📚 API Documentation Build'
|
||||
# Runs daily or can be triggered manually for immediate updates
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
- cron: '0 13 * * *' # Daily at 1PM UTC
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
|
||||
jobs:
|
||||
# Only runs on main repository to prevent unnecessary builds on forks
|
||||
build:
|
||||
if: github.repository == 'langchain-ai/langchain' || github.event_name != 'schedule'
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -22,7 +25,7 @@ jobs:
|
||||
path: langchain-api-docs-html
|
||||
token: ${{ secrets.TOKEN_GITHUB_API_DOCS_HTML }}
|
||||
|
||||
- name: Get repos with yq
|
||||
- name: '📋 Extract Repository List with yq'
|
||||
id: get-unsorted-repos
|
||||
uses: mikefarah/yq@master
|
||||
with:
|
||||
@@ -41,7 +44,7 @@ jobs:
|
||||
| .repo
|
||||
' langchain/libs/packages.yml
|
||||
|
||||
- name: Parse YAML and checkout repos
|
||||
- name: '📋 Parse YAML & Checkout Repositories'
|
||||
env:
|
||||
REPOS_UNSORTED: ${{ steps.get-unsorted-repos.outputs.result }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -49,48 +52,59 @@ jobs:
|
||||
# Get unique repositories
|
||||
REPOS=$(echo "$REPOS_UNSORTED" | sort -u)
|
||||
|
||||
# Checkout each unique repository that is in langchain-ai org
|
||||
# Checkout each unique repository
|
||||
for repo in $REPOS; do
|
||||
# Validate repository format (allow any org with proper format)
|
||||
if [[ ! "$repo" =~ ^[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+$ ]]; then
|
||||
echo "Error: Invalid repository format: $repo"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
REPO_NAME=$(echo $repo | cut -d'/' -f2)
|
||||
|
||||
# Additional validation for repo name
|
||||
if [[ ! "$REPO_NAME" =~ ^[a-zA-Z0-9_.-]+$ ]]; then
|
||||
echo "Error: Invalid repository name: $REPO_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Checking out $repo to $REPO_NAME"
|
||||
git clone --depth 1 https://github.com/$repo.git $REPO_NAME
|
||||
done
|
||||
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
- name: '🐍 Setup Python ${{ env.PYTHON_VERSION }}'
|
||||
uses: actions/setup-python@v5
|
||||
id: setup-python
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install initial py deps
|
||||
- name: '📦 Install Initial Python Dependencies'
|
||||
working-directory: langchain
|
||||
run: |
|
||||
python -m pip install -U uv
|
||||
python -m uv pip install --upgrade --no-cache-dir pip setuptools pyyaml
|
||||
|
||||
- name: Move libs
|
||||
- name: '📦 Organize Library Directories'
|
||||
run: python langchain/.github/scripts/prep_api_docs_build.py
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Rm old html
|
||||
- name: '🧹 Remove Old HTML Files'
|
||||
run:
|
||||
rm -rf langchain-api-docs-html/api_reference_build/html
|
||||
|
||||
- name: Install dependencies
|
||||
- name: '📦 Install Documentation Dependencies'
|
||||
working-directory: langchain
|
||||
run: |
|
||||
python -m uv pip install $(ls ./libs/partners | xargs -I {} echo "./libs/partners/{}") --overrides ./docs/vercel_overrides.txt
|
||||
python -m uv pip install libs/core libs/langchain libs/text-splitters libs/community libs/experimental libs/standard-tests
|
||||
python -m uv pip install -r docs/api_reference/requirements.txt
|
||||
|
||||
- name: Set Git config
|
||||
- name: '🔧 Configure Git Settings'
|
||||
working-directory: langchain
|
||||
run: |
|
||||
git config --local user.email "actions@github.com"
|
||||
git config --local user.name "Github Actions"
|
||||
|
||||
- name: Build docs
|
||||
- name: '📚 Build API Documentation'
|
||||
working-directory: langchain
|
||||
run: |
|
||||
python docs/api_reference/create_api_rst.py
|
||||
|
||||
8
.github/workflows/check-broken-links.yml
vendored
8
.github/workflows/check-broken-links.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Check Broken Links
|
||||
name: '🔗 Check Broken Links'
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -14,15 +14,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js 18.x
|
||||
- name: '🟢 Setup Node.js 18.x'
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18.x
|
||||
cache: "yarn"
|
||||
cache-dependency-path: ./docs/yarn.lock
|
||||
- name: Install dependencies
|
||||
- name: '📦 Install Node Dependencies'
|
||||
run: yarn install --immutable --mode=skip-build
|
||||
working-directory: ./docs
|
||||
- name: Check broken links
|
||||
- name: '🔍 Scan Documentation for Broken Links'
|
||||
run: yarn check-broken-links
|
||||
working-directory: ./docs
|
||||
|
||||
6
.github/workflows/check_core_versions.yml
vendored
6
.github/workflows/check_core_versions.yml
vendored
@@ -1,4 +1,6 @@
|
||||
name: Check `core` Version Equality
|
||||
name: '🔍 Check `core` Version Equality'
|
||||
# Ensures version numbers in pyproject.toml and version.py stay in sync
|
||||
# Prevents releases with mismatched version numbers
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -16,7 +18,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check version equality
|
||||
- name: '✅ Verify pyproject.toml & version.py Match'
|
||||
run: |
|
||||
PYPROJECT_VERSION=$(grep -Po '(?<=^version = ")[^"]*' libs/core/pyproject.toml)
|
||||
VERSION_PY_VERSION=$(grep -Po '(?<=^VERSION = ")[^"]*' libs/core/langchain_core/version.py)
|
||||
|
||||
42
.github/workflows/check_diffs.yml
vendored
42
.github/workflows/check_diffs.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: CI
|
||||
name: '🔧 CI'
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -6,6 +6,7 @@ on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
|
||||
# Optimizes CI performance by canceling redundant workflow runs
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
@@ -24,16 +25,23 @@ env:
|
||||
UV_NO_SYNC: "true"
|
||||
|
||||
jobs:
|
||||
# This job analyzes which files changed and creates a dynamic test matrix
|
||||
# to only run tests/lints for the affected packages, improving CI efficiency
|
||||
build:
|
||||
name: 'Detect Changes & Set Matrix'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- name: '📋 Checkout Code'
|
||||
uses: actions/checkout@v4
|
||||
- name: '🐍 Setup Python 3.11'
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- id: files
|
||||
- name: '📂 Get Changed Files'
|
||||
id: files
|
||||
uses: Ana06/get-changed-files@v2.3.0
|
||||
- id: set-matrix
|
||||
- name: '🔍 Analyze Changed Files & Generate Build Matrix'
|
||||
id: set-matrix
|
||||
run: |
|
||||
python -m pip install packaging requests
|
||||
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
|
||||
@@ -45,8 +53,8 @@ jobs:
|
||||
dependencies: ${{ steps.set-matrix.outputs.dependencies }}
|
||||
test-doc-imports: ${{ steps.set-matrix.outputs.test-doc-imports }}
|
||||
test-pydantic: ${{ steps.set-matrix.outputs.test-pydantic }}
|
||||
# Run linting only on packages that have changed files
|
||||
lint:
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.lint != '[]' }}
|
||||
strategy:
|
||||
@@ -59,8 +67,8 @@ jobs:
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
# Run unit tests only on packages that have changed files
|
||||
test:
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.test != '[]' }}
|
||||
strategy:
|
||||
@@ -73,8 +81,8 @@ jobs:
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
# Test compatibility with different Pydantic versions for affected packages
|
||||
test-pydantic:
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.test-pydantic != '[]' }}
|
||||
strategy:
|
||||
@@ -95,12 +103,12 @@ jobs:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.test-doc-imports) }}
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/_test_doc_imports.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
# Verify integration tests compile without actually running them (faster feedback)
|
||||
compile-integration-tests:
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.compile-integration-tests != '[]' }}
|
||||
strategy:
|
||||
@@ -113,8 +121,9 @@ jobs:
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
# Run extended test suites that require additional dependencies
|
||||
extended-tests:
|
||||
name: "cd ${{ matrix.job-configs.working-directory }} / make extended_tests #${{ matrix.job-configs.python-version }}"
|
||||
name: 'Extended Tests'
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.extended-tests != '[]' }}
|
||||
strategy:
|
||||
@@ -130,12 +139,12 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.job-configs.python-version }} + uv
|
||||
- name: '🐍 Set up Python ${{ matrix.job-configs.python-version }} + UV'
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
|
||||
- name: Install dependencies and run extended tests
|
||||
- name: '📦 Install Dependencies & Run Extended Tests'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with uv..."
|
||||
@@ -144,7 +153,7 @@ jobs:
|
||||
VIRTUAL_ENV=.venv uv pip install -r extended_testing_deps.txt
|
||||
VIRTUAL_ENV=.venv make extended_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
- name: '🧹 Verify Clean Working Directory'
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
@@ -156,8 +165,9 @@ jobs:
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
|
||||
# Final status check - ensures all required jobs passed before allowing merge
|
||||
ci_success:
|
||||
name: "CI Success"
|
||||
name: '✅ CI Success'
|
||||
needs: [build, lint, test, compile-integration-tests, extended-tests, test-doc-imports, test-pydantic]
|
||||
if: |
|
||||
always()
|
||||
@@ -167,7 +177,7 @@ jobs:
|
||||
RESULTS_JSON: ${{ toJSON(needs.*.result) }}
|
||||
EXIT_CODE: ${{!contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && '0' || '1'}}
|
||||
steps:
|
||||
- name: "CI Success"
|
||||
- name: '🎉 All Checks Passed'
|
||||
run: |
|
||||
echo $JOBS_JSON
|
||||
echo $RESULTS_JSON
|
||||
|
||||
4
.github/workflows/check_new_docs.yml
vendored
4
.github/workflows/check_new_docs.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Integration Docs Lint
|
||||
name: '📑 Integration Docs Lint'
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -33,6 +33,6 @@ jobs:
|
||||
*.ipynb
|
||||
*.md
|
||||
*.mdx
|
||||
- name: Check new docs
|
||||
- name: '🔍 Check New Documentation Templates'
|
||||
run: |
|
||||
python docs/scripts/check_templates.py ${{ steps.files.outputs.added }}
|
||||
|
||||
35
.github/workflows/codespell.yml
vendored
35
.github/workflows/codespell.yml
vendored
@@ -1,35 +0,0 @@
|
||||
name: CI / cd . / make spell_check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, v0.1, v0.2]
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
codespell:
|
||||
name: (Check for spelling errors)
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
pip install toml
|
||||
|
||||
- name: Extract Ignore Words List
|
||||
run: |
|
||||
# Use a Python script to extract the ignore words list from pyproject.toml
|
||||
python .github/workflows/extract_ignored_words_list.py
|
||||
id: extract_ignore_words
|
||||
|
||||
# - name: Codespell
|
||||
# uses: codespell-project/actions-codespell@v2
|
||||
# with:
|
||||
# skip: guide_imports.json,*.ambr,./cookbook/data/imdb_top_1000.csv,*.lock
|
||||
# ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }}
|
||||
# exclude_file: ./.github/workflows/codespell-exclude
|
||||
10
.github/workflows/codspeed.yml
vendored
10
.github/workflows/codspeed.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: CodSpeed
|
||||
name: '⚡ CodSpeed'
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -18,7 +18,7 @@ env:
|
||||
|
||||
jobs:
|
||||
codspeed:
|
||||
name: Run benchmarks
|
||||
name: 'Benchmark'
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# We have to use 3.12 as 3.13 is not yet supported
|
||||
- name: Install uv
|
||||
- name: '📦 Install UV Package Manager'
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
@@ -47,11 +47,11 @@ jobs:
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install dependencies
|
||||
- name: '📦 Install Test Dependencies'
|
||||
run: uv sync --group test
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
|
||||
- name: Run benchmarks ${{ matrix.working-directory }}
|
||||
- name: '⚡ Run Benchmarks: ${{ matrix.working-directory }}'
|
||||
uses: CodSpeedHQ/action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODSPEED_TOKEN }}
|
||||
|
||||
6
.github/workflows/people.yml
vendored
6
.github/workflows/people.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: LangChain People
|
||||
name: '👥 LangChain People'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
@@ -14,13 +14,13 @@ jobs:
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Dump GitHub context
|
||||
- name: '📋 Dump GitHub Context'
|
||||
env:
|
||||
GITHUB_CONTEXT: ${{ toJson(github) }}
|
||||
run: echo "$GITHUB_CONTEXT"
|
||||
- uses: actions/checkout@v4
|
||||
# Ref: https://github.com/actions/runner/issues/2033
|
||||
- name: Fix git safe.directory in container
|
||||
- name: '🔧 Fix Git Safe Directory in Container'
|
||||
run: mkdir -p /home/runner/work/_temp/_github_home && printf "[safe]\n\tdirectory = /github/workspace" > /home/runner/work/_temp/_github_home/.gitconfig
|
||||
- uses: ./.github/actions/people
|
||||
with:
|
||||
|
||||
9
.github/workflows/pr_lint.yml
vendored
9
.github/workflows/pr_lint.yml
vendored
@@ -4,6 +4,7 @@
|
||||
# Purpose:
|
||||
# Enforces Conventional Commits format for pull request titles to maintain a
|
||||
# clear, consistent, and machine-readable change history across our repository.
|
||||
# This helps with automated changelog generation and semantic versioning.
|
||||
#
|
||||
# Enforced Commit Message Format (Conventional Commits 1.0.0):
|
||||
# <type>[optional scope]: <description>
|
||||
@@ -45,7 +46,7 @@
|
||||
# • Conventional Commits spec: https://www.conventionalcommits.org/en/v1.0.0/
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
name: PR Title Lint
|
||||
name: '🏷️ PR Title Lint'
|
||||
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -55,11 +56,12 @@ on:
|
||||
types: [opened, edited, synchronize]
|
||||
|
||||
jobs:
|
||||
# Validates that PR title follows Conventional Commits specification
|
||||
lint-pr-title:
|
||||
name: Validate PR Title
|
||||
name: 'Validate PR Title Format'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Validate PR Title
|
||||
- name: '✅ Validate Conventional Commits Format'
|
||||
uses: amannn/action-semantic-pull-request@v5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -81,6 +83,7 @@ jobs:
|
||||
core
|
||||
cli
|
||||
langchain
|
||||
langchain_v1
|
||||
standard-tests
|
||||
text-splitters
|
||||
docs
|
||||
|
||||
18
.github/workflows/run_notebooks.yml
vendored
18
.github/workflows/run_notebooks.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Run Notebooks
|
||||
name: '📝 Run Documentation Notebooks'
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -24,43 +24,43 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'langchain-ai/langchain' || github.event_name != 'schedule'
|
||||
name: "Test docs"
|
||||
name: '📑 Test Documentation Notebooks'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + uv
|
||||
- name: '🐍 Set up Python + UV'
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ github.event.inputs.python_version || '3.11' }}
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
- name: '🔐 Authenticate to Google Cloud'
|
||||
id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
- name: '🔐 Configure AWS Credentials'
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies
|
||||
- name: '📦 Install Dependencies'
|
||||
run: |
|
||||
uv sync --group dev --group test
|
||||
|
||||
- name: Pre-download files
|
||||
- name: '📦 Pre-download Test Files'
|
||||
run: |
|
||||
uv run python docs/scripts/cache_data.py
|
||||
curl -s https://raw.githubusercontent.com/lerocha/chinook-database/master/ChinookDatabase/DataSources/Chinook_Sqlite.sql | sqlite3 docs/docs/how_to/Chinook.db
|
||||
cp docs/docs/how_to/Chinook.db docs/docs/tutorials/Chinook.db
|
||||
|
||||
- name: Prepare notebooks
|
||||
- name: '🔧 Prepare Notebooks for CI'
|
||||
run: |
|
||||
uv run python docs/scripts/prepare_notebooks_for_ci.py --comment-install-cells --working-directory ${{ github.event.inputs.working-directory || 'all' }}
|
||||
|
||||
- name: Run notebooks
|
||||
- name: '🚀 Execute Notebooks'
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
|
||||
37
.github/workflows/scheduled_test.yml
vendored
37
.github/workflows/scheduled_test.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: Scheduled Tests
|
||||
name: '⏰ Scheduled Integration Tests'
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
workflow_dispatch: # Allows maintainers to trigger the workflow manually in GitHub UI
|
||||
inputs:
|
||||
working-directory-force:
|
||||
type: string
|
||||
@@ -10,7 +10,7 @@ on:
|
||||
type: string
|
||||
description: "Python version to use - defaults to 3.9 and 3.11 in matrix - example value: 3.9"
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
- cron: '0 13 * * *' # Runs daily at 1PM UTC (9AM EDT/6AM PDT)
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -22,14 +22,16 @@ env:
|
||||
POETRY_LIBS: ("libs/partners/google-vertexai" "libs/partners/google-genai" "libs/partners/aws")
|
||||
|
||||
jobs:
|
||||
# Generate dynamic test matrix based on input parameters or defaults
|
||||
# Only runs on the main repo (for scheduled runs) or when manually triggered
|
||||
compute-matrix:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
runs-on: ubuntu-latest
|
||||
name: Compute matrix
|
||||
name: '📋 Compute Test Matrix'
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Set matrix
|
||||
- name: '🔢 Generate Python & Library Matrix'
|
||||
id: set-matrix
|
||||
env:
|
||||
DEFAULT_LIBS: ${{ env.DEFAULT_LIBS }}
|
||||
@@ -50,9 +52,11 @@ jobs:
|
||||
matrix="{\"python-version\": $python_version, \"working-directory\": $working_directory}"
|
||||
echo $matrix
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
# Run integration tests against partner libraries with live API credentials
|
||||
# Tests are run with both Poetry and UV depending on the library's setup
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
|
||||
name: '🐍 Python ${{ matrix.python-version }}: ${{ matrix.working-directory }}'
|
||||
runs-on: ubuntu-latest
|
||||
needs: [compute-matrix]
|
||||
timeout-minutes: 20
|
||||
@@ -75,7 +79,7 @@ jobs:
|
||||
repository: langchain-ai/langchain-aws
|
||||
path: langchain-aws
|
||||
|
||||
- name: Move libs
|
||||
- name: '📦 Organize External Libraries'
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
@@ -84,7 +88,7 @@ jobs:
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} with poetry
|
||||
- name: '🐍 Set up Python ${{ matrix.python-version }} + Poetry'
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
@@ -93,40 +97,40 @@ jobs:
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + uv
|
||||
- name: '🐍 Set up Python ${{ matrix.python-version }} + UV'
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
uses: "./langchain/.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
- name: '🔐 Authenticate to Google Cloud'
|
||||
id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
- name: '🔐 Configure AWS Credentials'
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies (poetry)
|
||||
- name: '📦 Install Dependencies (Poetry)'
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: Install dependencies (uv)
|
||||
- name: '📦 Install Dependencies (UV)'
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with uv..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
uv sync --group test --group test_integration
|
||||
|
||||
- name: Run integration tests
|
||||
- name: '🚀 Run Integration Tests'
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
@@ -155,14 +159,15 @@ jobs:
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
make integration_tests
|
||||
|
||||
- name: Remove external libraries
|
||||
- name: '🧹 Clean up External Libraries'
|
||||
# Clean up external libraries to avoid affecting git status check
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/aws
|
||||
|
||||
- name: Ensure tests did not create additional files
|
||||
- name: '🧹 Verify Clean Working Directory'
|
||||
working-directory: langchain
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,5 +1,4 @@
|
||||
.vs/
|
||||
.vscode/
|
||||
.idea/
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
|
||||
14
.markdownlint.json
Normal file
14
.markdownlint.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"MD013": false,
|
||||
"MD024": {
|
||||
"siblings_only": true
|
||||
},
|
||||
"MD025": false,
|
||||
"MD033": false,
|
||||
"MD034": false,
|
||||
"MD036": false,
|
||||
"MD041": false,
|
||||
"MD046": {
|
||||
"style": "fenced"
|
||||
}
|
||||
}
|
||||
@@ -1,111 +1,111 @@
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: core
|
||||
name: format core
|
||||
language: system
|
||||
entry: make -C libs/core format
|
||||
files: ^libs/core/
|
||||
pass_filenames: false
|
||||
- id: langchain
|
||||
name: format langchain
|
||||
language: system
|
||||
entry: make -C libs/langchain format
|
||||
files: ^libs/langchain/
|
||||
pass_filenames: false
|
||||
- id: standard-tests
|
||||
name: format standard-tests
|
||||
language: system
|
||||
entry: make -C libs/standard-tests format
|
||||
files: ^libs/standard-tests/
|
||||
pass_filenames: false
|
||||
- id: text-splitters
|
||||
name: format text-splitters
|
||||
language: system
|
||||
entry: make -C libs/text-splitters format
|
||||
files: ^libs/text-splitters/
|
||||
pass_filenames: false
|
||||
- id: anthropic
|
||||
name: format partners/anthropic
|
||||
language: system
|
||||
entry: make -C libs/partners/anthropic format
|
||||
files: ^libs/partners/anthropic/
|
||||
pass_filenames: false
|
||||
- id: chroma
|
||||
name: format partners/chroma
|
||||
language: system
|
||||
entry: make -C libs/partners/chroma format
|
||||
files: ^libs/partners/chroma/
|
||||
pass_filenames: false
|
||||
- id: couchbase
|
||||
name: format partners/couchbase
|
||||
language: system
|
||||
entry: make -C libs/partners/couchbase format
|
||||
files: ^libs/partners/couchbase/
|
||||
pass_filenames: false
|
||||
- id: exa
|
||||
name: format partners/exa
|
||||
language: system
|
||||
entry: make -C libs/partners/exa format
|
||||
files: ^libs/partners/exa/
|
||||
pass_filenames: false
|
||||
- id: fireworks
|
||||
name: format partners/fireworks
|
||||
language: system
|
||||
entry: make -C libs/partners/fireworks format
|
||||
files: ^libs/partners/fireworks/
|
||||
pass_filenames: false
|
||||
- id: groq
|
||||
name: format partners/groq
|
||||
language: system
|
||||
entry: make -C libs/partners/groq format
|
||||
files: ^libs/partners/groq/
|
||||
pass_filenames: false
|
||||
- id: huggingface
|
||||
name: format partners/huggingface
|
||||
language: system
|
||||
entry: make -C libs/partners/huggingface format
|
||||
files: ^libs/partners/huggingface/
|
||||
pass_filenames: false
|
||||
- id: mistralai
|
||||
name: format partners/mistralai
|
||||
language: system
|
||||
entry: make -C libs/partners/mistralai format
|
||||
files: ^libs/partners/mistralai/
|
||||
pass_filenames: false
|
||||
- id: nomic
|
||||
name: format partners/nomic
|
||||
language: system
|
||||
entry: make -C libs/partners/nomic format
|
||||
files: ^libs/partners/nomic/
|
||||
pass_filenames: false
|
||||
- id: ollama
|
||||
name: format partners/ollama
|
||||
language: system
|
||||
entry: make -C libs/partners/ollama format
|
||||
files: ^libs/partners/ollama/
|
||||
pass_filenames: false
|
||||
- id: openai
|
||||
name: format partners/openai
|
||||
language: system
|
||||
entry: make -C libs/partners/openai format
|
||||
files: ^libs/partners/openai/
|
||||
pass_filenames: false
|
||||
- id: prompty
|
||||
name: format partners/prompty
|
||||
language: system
|
||||
entry: make -C libs/partners/prompty format
|
||||
files: ^libs/partners/prompty/
|
||||
pass_filenames: false
|
||||
- id: qdrant
|
||||
name: format partners/qdrant
|
||||
language: system
|
||||
entry: make -C libs/partners/qdrant format
|
||||
files: ^libs/partners/qdrant/
|
||||
pass_filenames: false
|
||||
- id: root
|
||||
name: format docs, cookbook
|
||||
language: system
|
||||
entry: make format
|
||||
files: ^(docs|cookbook)/
|
||||
pass_filenames: false
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: core
|
||||
name: format core
|
||||
language: system
|
||||
entry: make -C libs/core format
|
||||
files: ^libs/core/
|
||||
pass_filenames: false
|
||||
- id: langchain
|
||||
name: format langchain
|
||||
language: system
|
||||
entry: make -C libs/langchain format
|
||||
files: ^libs/langchain/
|
||||
pass_filenames: false
|
||||
- id: standard-tests
|
||||
name: format standard-tests
|
||||
language: system
|
||||
entry: make -C libs/standard-tests format
|
||||
files: ^libs/standard-tests/
|
||||
pass_filenames: false
|
||||
- id: text-splitters
|
||||
name: format text-splitters
|
||||
language: system
|
||||
entry: make -C libs/text-splitters format
|
||||
files: ^libs/text-splitters/
|
||||
pass_filenames: false
|
||||
- id: anthropic
|
||||
name: format partners/anthropic
|
||||
language: system
|
||||
entry: make -C libs/partners/anthropic format
|
||||
files: ^libs/partners/anthropic/
|
||||
pass_filenames: false
|
||||
- id: chroma
|
||||
name: format partners/chroma
|
||||
language: system
|
||||
entry: make -C libs/partners/chroma format
|
||||
files: ^libs/partners/chroma/
|
||||
pass_filenames: false
|
||||
- id: couchbase
|
||||
name: format partners/couchbase
|
||||
language: system
|
||||
entry: make -C libs/partners/couchbase format
|
||||
files: ^libs/partners/couchbase/
|
||||
pass_filenames: false
|
||||
- id: exa
|
||||
name: format partners/exa
|
||||
language: system
|
||||
entry: make -C libs/partners/exa format
|
||||
files: ^libs/partners/exa/
|
||||
pass_filenames: false
|
||||
- id: fireworks
|
||||
name: format partners/fireworks
|
||||
language: system
|
||||
entry: make -C libs/partners/fireworks format
|
||||
files: ^libs/partners/fireworks/
|
||||
pass_filenames: false
|
||||
- id: groq
|
||||
name: format partners/groq
|
||||
language: system
|
||||
entry: make -C libs/partners/groq format
|
||||
files: ^libs/partners/groq/
|
||||
pass_filenames: false
|
||||
- id: huggingface
|
||||
name: format partners/huggingface
|
||||
language: system
|
||||
entry: make -C libs/partners/huggingface format
|
||||
files: ^libs/partners/huggingface/
|
||||
pass_filenames: false
|
||||
- id: mistralai
|
||||
name: format partners/mistralai
|
||||
language: system
|
||||
entry: make -C libs/partners/mistralai format
|
||||
files: ^libs/partners/mistralai/
|
||||
pass_filenames: false
|
||||
- id: nomic
|
||||
name: format partners/nomic
|
||||
language: system
|
||||
entry: make -C libs/partners/nomic format
|
||||
files: ^libs/partners/nomic/
|
||||
pass_filenames: false
|
||||
- id: ollama
|
||||
name: format partners/ollama
|
||||
language: system
|
||||
entry: make -C libs/partners/ollama format
|
||||
files: ^libs/partners/ollama/
|
||||
pass_filenames: false
|
||||
- id: openai
|
||||
name: format partners/openai
|
||||
language: system
|
||||
entry: make -C libs/partners/openai format
|
||||
files: ^libs/partners/openai/
|
||||
pass_filenames: false
|
||||
- id: prompty
|
||||
name: format partners/prompty
|
||||
language: system
|
||||
entry: make -C libs/partners/prompty format
|
||||
files: ^libs/partners/prompty/
|
||||
pass_filenames: false
|
||||
- id: qdrant
|
||||
name: format partners/qdrant
|
||||
language: system
|
||||
entry: make -C libs/partners/qdrant format
|
||||
files: ^libs/partners/qdrant/
|
||||
pass_filenames: false
|
||||
- id: root
|
||||
name: format docs, cookbook
|
||||
language: system
|
||||
entry: make format
|
||||
files: ^(docs|cookbook)/
|
||||
pass_filenames: false
|
||||
|
||||
@@ -13,7 +13,7 @@ build:
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/api_reference/conf.py
|
||||
configuration: docs/api_reference/conf.py
|
||||
|
||||
# If using Sphinx, optionally build your docs in additional formats such as PDF
|
||||
formats:
|
||||
@@ -21,5 +21,5 @@ formats:
|
||||
|
||||
# Optionally declare the Python requirements required to build your docs
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/api_reference/requirements.txt
|
||||
install:
|
||||
- requirements: docs/api_reference/requirements.txt
|
||||
|
||||
21
.vscode/extensions.json
vendored
Normal file
21
.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"ms-python.python",
|
||||
"charliermarsh.ruff",
|
||||
"ms-python.mypy-type-checker",
|
||||
"ms-toolsai.jupyter",
|
||||
"ms-toolsai.jupyter-keymap",
|
||||
"ms-toolsai.jupyter-renderers",
|
||||
"ms-toolsai.vscode-jupyter-cell-tags",
|
||||
"ms-toolsai.vscode-jupyter-slideshow",
|
||||
"yzhang.markdown-all-in-one",
|
||||
"davidanson.vscode-markdownlint",
|
||||
"bierner.markdown-mermaid",
|
||||
"bierner.markdown-preview-github-styles",
|
||||
"eamodio.gitlens",
|
||||
"github.vscode-pull-request-github",
|
||||
"github.vscode-github-actions",
|
||||
"redhat.vscode-yaml",
|
||||
"editorconfig.editorconfig",
|
||||
],
|
||||
}
|
||||
80
.vscode/settings.json
vendored
Normal file
80
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
{
|
||||
"python.analysis.include": [
|
||||
"libs/**",
|
||||
"docs/**",
|
||||
"cookbook/**"
|
||||
],
|
||||
"python.analysis.exclude": [
|
||||
"**/node_modules",
|
||||
"**/__pycache__",
|
||||
"**/.pytest_cache",
|
||||
"**/.*",
|
||||
"_dist/**",
|
||||
"docs/_build/**",
|
||||
"docs/api_reference/_build/**"
|
||||
],
|
||||
"python.analysis.autoImportCompletions": true,
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
"python.testing.cwd": "${workspaceFolder}",
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.ruffEnabled": true,
|
||||
"[python]": {
|
||||
"editor.formatOnSave": true,
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.organizeImports": "explicit",
|
||||
"source.fixAll": "explicit"
|
||||
},
|
||||
"editor.defaultFormatter": "charliermarsh.ruff"
|
||||
},
|
||||
"editor.rulers": [
|
||||
88
|
||||
],
|
||||
"editor.tabSize": 4,
|
||||
"editor.insertSpaces": true,
|
||||
"editor.trimAutoWhitespace": true,
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"files.insertFinalNewline": true,
|
||||
"files.exclude": {
|
||||
"**/__pycache__": true,
|
||||
"**/.pytest_cache": true,
|
||||
"**/*.pyc": true,
|
||||
"**/.mypy_cache": true,
|
||||
"**/.ruff_cache": true,
|
||||
"_dist/**": true,
|
||||
"docs/_build/**": true,
|
||||
"docs/api_reference/_build/**": true,
|
||||
"**/node_modules": true,
|
||||
"**/.git": false
|
||||
},
|
||||
"search.exclude": {
|
||||
"**/__pycache__": true,
|
||||
"**/*.pyc": true,
|
||||
"_dist/**": true,
|
||||
"docs/_build/**": true,
|
||||
"docs/api_reference/_build/**": true,
|
||||
"**/node_modules": true,
|
||||
"**/.git": true,
|
||||
"uv.lock": true,
|
||||
"yarn.lock": true
|
||||
},
|
||||
"git.autofetch": true,
|
||||
"git.enableSmartCommit": true,
|
||||
"jupyter.askForKernelRestart": false,
|
||||
"jupyter.interactiveWindow.textEditor.executeSelection": true,
|
||||
"[markdown]": {
|
||||
"editor.wordWrap": "on",
|
||||
"editor.quickSuggestions": {
|
||||
"comments": "off",
|
||||
"strings": "off",
|
||||
"other": "off"
|
||||
}
|
||||
},
|
||||
"[yaml]": {
|
||||
"editor.tabSize": 2,
|
||||
"editor.insertSpaces": true
|
||||
},
|
||||
"[json]": {
|
||||
"editor.tabSize": 2,
|
||||
"editor.insertSpaces": true
|
||||
},
|
||||
}
|
||||
@@ -7,5 +7,5 @@ Please see the following guides for migrating LangChain code:
|
||||
* Migrating from [LangChain 0.0.x Chains](https://python.langchain.com/docs/versions/migrating_chains/)
|
||||
* Upgrade to [LangGraph Memory](https://python.langchain.com/docs/versions/migrating_memory/)
|
||||
|
||||
The [LangChain CLI](https://python.langchain.com/docs/versions/v0_3/#migrate-using-langchain-cli) can help you automatically upgrade your code to use non-deprecated imports.
|
||||
The [LangChain CLI](https://python.langchain.com/docs/versions/v0_3/#migrate-using-langchain-cli) can help you automatically upgrade your code to use non-deprecated imports.
|
||||
This will be especially helpful if you're still on either version 0.0.x or 0.1.x of LangChain.
|
||||
|
||||
67
Makefile
67
Makefile
@@ -8,9 +8,6 @@ help: Makefile
|
||||
@printf "\n\033[1mUsage: make <TARGETS> ...\033[0m\n\n\033[1mTargets:\033[0m\n\n"
|
||||
@sed -n 's/^## //p' $< | awk -F':' '{printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' | sort | sed -e 's/^/ /'
|
||||
|
||||
## all: Default target, shows help.
|
||||
all: help
|
||||
|
||||
## clean: Clean documentation and API documentation artifacts.
|
||||
clean: docs_clean api_docs_clean
|
||||
|
||||
@@ -19,49 +16,79 @@ clean: docs_clean api_docs_clean
|
||||
######################
|
||||
|
||||
## docs_build: Build the documentation.
|
||||
docs_build:
|
||||
docs_build: docs_clean
|
||||
@echo "📚 Building LangChain documentation..."
|
||||
cd docs && make build
|
||||
@echo "✅ Documentation build complete!"
|
||||
|
||||
## docs_clean: Clean the documentation build artifacts.
|
||||
docs_clean:
|
||||
@echo "🧹 Cleaning documentation artifacts..."
|
||||
cd docs && make clean
|
||||
@echo "✅ LangChain documentation cleaned"
|
||||
|
||||
## docs_linkcheck: Run linkchecker on the documentation.
|
||||
docs_linkcheck:
|
||||
uv run --no-group test linkchecker _dist/docs/ --ignore-url node_modules
|
||||
@echo "🔗 Checking documentation links..."
|
||||
@if [ -d _dist/docs ]; then \
|
||||
uv run --group test linkchecker _dist/docs/ --ignore-url node_modules; \
|
||||
else \
|
||||
echo "⚠️ Documentation not built. Run 'make docs_build' first."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "✅ Link check complete"
|
||||
|
||||
## api_docs_build: Build the API Reference documentation.
|
||||
api_docs_build:
|
||||
uv run --no-group test python docs/api_reference/create_api_rst.py
|
||||
cd docs/api_reference && uv run --no-group test make html
|
||||
uv run --no-group test python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
api_docs_build: clean
|
||||
@echo "📖 Building API Reference documentation..."
|
||||
uv pip install -e libs/cli
|
||||
uv run --group docs python docs/api_reference/create_api_rst.py
|
||||
cd docs/api_reference && uv run --group docs make html
|
||||
uv run --group docs python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
@echo "✅ API documentation built"
|
||||
@echo "🌐 Opening documentation in browser..."
|
||||
open docs/api_reference/_build/html/reference.html
|
||||
|
||||
API_PKG ?= text-splitters
|
||||
|
||||
api_docs_quick_preview:
|
||||
uv run --no-group test python docs/api_reference/create_api_rst.py $(API_PKG)
|
||||
cd docs/api_reference && uv run make html
|
||||
uv run --no-group test python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
api_docs_quick_preview: clean
|
||||
@echo "⚡ Building quick API preview for $(API_PKG)..."
|
||||
uv run --group docs python docs/api_reference/create_api_rst.py $(API_PKG)
|
||||
cd docs/api_reference && uv run --group docs make html
|
||||
uv run --group docs python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
@echo "🌐 Opening preview in browser..."
|
||||
open docs/api_reference/_build/html/reference.html
|
||||
|
||||
## api_docs_clean: Clean the API Reference documentation build artifacts.
|
||||
api_docs_clean:
|
||||
@echo "🧹 Cleaning API documentation artifacts..."
|
||||
find ./docs/api_reference -name '*_api_reference.rst' -delete
|
||||
git clean -fdX ./docs/api_reference
|
||||
rm -f docs/api_reference/index.md
|
||||
|
||||
@echo "✅ API documentation cleaned"
|
||||
|
||||
## api_docs_linkcheck: Run linkchecker on the API Reference documentation.
|
||||
api_docs_linkcheck:
|
||||
uv run --no-group test linkchecker docs/api_reference/_build/html/index.html
|
||||
@echo "🔗 Checking API documentation links..."
|
||||
@if [ -f docs/api_reference/_build/html/index.html ]; then \
|
||||
uv run --group test linkchecker docs/api_reference/_build/html/index.html; \
|
||||
else \
|
||||
echo "⚠️ API documentation not built. Run 'make api_docs_build' first."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "✅ API link check complete"
|
||||
|
||||
## spell_check: Run codespell on the project.
|
||||
spell_check:
|
||||
uv run --no-group test codespell --toml pyproject.toml
|
||||
@echo "✏️ Checking spelling across project..."
|
||||
uv run --group codespell codespell --toml pyproject.toml
|
||||
@echo "✅ Spell check complete"
|
||||
|
||||
## spell_fix: Run codespell on the project and fix the errors.
|
||||
spell_fix:
|
||||
uv run --no-group test codespell --toml pyproject.toml -w
|
||||
@echo "✏️ Fixing spelling errors across project..."
|
||||
uv run --group codespell codespell --toml pyproject.toml -w
|
||||
@echo "✅ Spelling errors fixed"
|
||||
|
||||
######################
|
||||
# LINTING AND FORMATTING
|
||||
@@ -69,6 +96,7 @@ spell_fix:
|
||||
|
||||
## lint: Run linting on the project.
|
||||
lint lint_package lint_tests:
|
||||
@echo "🔍 Running code linting and checks..."
|
||||
uv run --group lint ruff check docs cookbook
|
||||
uv run --group lint ruff format docs cookbook cookbook --diff
|
||||
git --no-pager grep 'from langchain import' docs cookbook | grep -vE 'from langchain import (hub)' && echo "Error: no importing langchain from root in docs, except for hub" && exit 1 || exit 0
|
||||
@@ -76,11 +104,16 @@ lint lint_package lint_tests:
|
||||
git --no-pager grep 'api.python.langchain.com' -- docs/docs ':!docs/docs/additional_resources/arxiv_references.mdx' ':!docs/docs/integrations/document_loaders/sitemap.ipynb' || exit 0 && \
|
||||
echo "Error: you should link python.langchain.com/api_reference, not api.python.langchain.com in the docs" && \
|
||||
exit 1
|
||||
@echo "✅ Linting complete"
|
||||
|
||||
## format: Format the project files.
|
||||
format format_diff:
|
||||
@echo "🎨 Formatting project files..."
|
||||
uv run --group lint ruff format docs cookbook
|
||||
uv run --group lint ruff check --fix docs cookbook
|
||||
@echo "✅ Formatting complete"
|
||||
|
||||
update-package-downloads:
|
||||
@echo "📊 Updating package download statistics..."
|
||||
uv run python docs/scripts/packages_yml_get_downloads.py
|
||||
@echo "✅ Package downloads updated"
|
||||
|
||||
@@ -40,9 +40,10 @@ controllable agent workflows.
|
||||
## Why use LangChain?
|
||||
|
||||
LangChain helps developers build applications powered by LLMs through a standard
|
||||
interface for models, embeddings, vector stores, and more.
|
||||
interface for models, embeddings, vector stores, and more.
|
||||
|
||||
Use LangChain for:
|
||||
|
||||
- **Real-time data augmentation**. Easily connect LLMs to diverse data sources and
|
||||
external / internal systems, drawing from LangChain’s vast library of integrations with
|
||||
model providers, tools, vector stores, retrievers, and more.
|
||||
@@ -52,9 +53,10 @@ frontier evolves, adapt quickly — LangChain’s abstractions keep you moving w
|
||||
losing momentum.
|
||||
|
||||
## LangChain’s ecosystem
|
||||
|
||||
While the LangChain framework can be used standalone, it also integrates seamlessly
|
||||
with any LangChain product, giving developers a full suite of tools when building LLM
|
||||
applications.
|
||||
applications.
|
||||
|
||||
To improve your LLM application development, pair LangChain with:
|
||||
|
||||
@@ -73,6 +75,7 @@ teams — and iterate quickly with visual prototyping in
|
||||
[LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Tutorials](https://python.langchain.com/docs/tutorials/): Simple walkthroughs with
|
||||
guided examples on getting started with LangChain.
|
||||
- [How-to Guides](https://python.langchain.com/docs/how_to/): Quick, actionable code
|
||||
|
||||
35
SECURITY.md
35
SECURITY.md
@@ -11,6 +11,7 @@ When building such applications developers should remember to follow good securi
|
||||
* [**Defense in Depth**](https://en.wikipedia.org/wiki/Defense_in_depth_(computing)): No security technique is perfect. Fine-tuning and good chain design can reduce, but not eliminate, the odds that a Large Language Model (LLM) may make a mistake. It's best to combine multiple layered security approaches rather than relying on any single layer of defense to ensure security. For example: use both read-only permissions and sandboxing to ensure that LLMs are only able to access data that is explicitly meant for them to use.
|
||||
|
||||
Risks of not doing so include, but are not limited to:
|
||||
|
||||
* Data corruption or loss.
|
||||
* Unauthorized access to confidential information.
|
||||
* Compromised performance or availability of critical resources.
|
||||
@@ -27,10 +28,10 @@ design and secure your applications.
|
||||
|
||||
## Reporting OSS Vulnerabilities
|
||||
|
||||
LangChain is partnered with [huntr by Protect AI](https://huntr.com/) to provide
|
||||
a bounty program for our open source projects.
|
||||
LangChain is partnered with [huntr by Protect AI](https://huntr.com/) to provide
|
||||
a bounty program for our open source projects.
|
||||
|
||||
Please report security vulnerabilities associated with the LangChain
|
||||
Please report security vulnerabilities associated with the LangChain
|
||||
open source projects [here](https://huntr.com/bounties/disclose/?target=https%3A%2F%2Fgithub.com%2Flangchain-ai%2Flangchain&validSearch=true).
|
||||
|
||||
Before reporting a vulnerability, please review:
|
||||
@@ -45,39 +46,39 @@ Before reporting a vulnerability, please review:
|
||||
|
||||
The following packages and repositories are eligible for bug bounties:
|
||||
|
||||
- langchain-core
|
||||
- langchain (see exceptions)
|
||||
- langchain-community (see exceptions)
|
||||
- langgraph
|
||||
- langserve
|
||||
* langchain-core
|
||||
* langchain (see exceptions)
|
||||
* langchain-community (see exceptions)
|
||||
* langgraph
|
||||
* langserve
|
||||
|
||||
### Out of Scope Targets
|
||||
|
||||
All out of scope targets defined by huntr as well as:
|
||||
|
||||
- **langchain-experimental**: This repository is for experimental code and is not
|
||||
* **langchain-experimental**: This repository is for experimental code and is not
|
||||
eligible for bug bounties (see [package warning](https://pypi.org/project/langchain-experimental/)), bug reports to it will be marked as interesting or waste of
|
||||
time and published with no bounty attached.
|
||||
- **tools**: Tools in either langchain or langchain-community are not eligible for bug
|
||||
* **tools**: Tools in either langchain or langchain-community are not eligible for bug
|
||||
bounties. This includes the following directories
|
||||
- libs/langchain/langchain/tools
|
||||
- libs/community/langchain_community/tools
|
||||
- Please review the [Best Practices](#best-practices)
|
||||
* libs/langchain/langchain/tools
|
||||
* libs/community/langchain_community/tools
|
||||
* Please review the [Best Practices](#best-practices)
|
||||
for more details, but generally tools interact with the real world. Developers are
|
||||
expected to understand the security implications of their code and are responsible
|
||||
for the security of their tools.
|
||||
- Code documented with security notices. This will be decided done on a case by
|
||||
* Code documented with security notices. This will be decided on a case by
|
||||
case basis, but likely will not be eligible for a bounty as the code is already
|
||||
documented with guidelines for developers that should be followed for making their
|
||||
application secure.
|
||||
- Any LangSmith related repositories or APIs (see [Reporting LangSmith Vulnerabilities](#reporting-langsmith-vulnerabilities)).
|
||||
* Any LangSmith related repositories or APIs (see [Reporting LangSmith Vulnerabilities](#reporting-langsmith-vulnerabilities)).
|
||||
|
||||
## Reporting LangSmith Vulnerabilities
|
||||
|
||||
Please report security vulnerabilities associated with LangSmith by email to `security@langchain.dev`.
|
||||
|
||||
- LangSmith site: https://smith.langchain.com
|
||||
- SDK client: https://github.com/langchain-ai/langsmith-sdk
|
||||
* LangSmith site: [https://smith.langchain.com](https://smith.langchain.com)
|
||||
* SDK client: [https://github.com/langchain-ai/langsmith-sdk](https://github.com/langchain-ai/langsmith-sdk)
|
||||
|
||||
### Other Security Concerns
|
||||
|
||||
|
||||
@@ -20,11 +20,7 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "5939a54c-3198-4ba4-8346-1cc088c473c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### You can embed text in the same VectorDB space as images, and retreive text and images as well based on input text or image.\n",
|
||||
"##### Following link demonstrates that.\n",
|
||||
"<a> https://python.langchain.com/v0.2/docs/integrations/text_embedding/open_clip/ </a>"
|
||||
]
|
||||
"source": "##### You can embed text in the same VectorDB space as images, and retrieve text and images as well based on input text or image.\n##### Following link demonstrates that.\n<a> https://python.langchain.com/v0.2/docs/integrations/text_embedding/open_clip/ </a>"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -600,4 +596,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -552,9 +552,7 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "77deb6a0-0950-450a-916a-f2a029676c20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Appending all retreived documents in a single document**"
|
||||
]
|
||||
"source": "**Appending all retrieved documents in a single document**"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -758,4 +756,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
# we build the docs in these stages:
|
||||
# 1. install vercel and python dependencies
|
||||
# 2. copy files from "source dir" to "intermediate dir"
|
||||
# 2. generate files like model feat table, etc in "intermediate dir"
|
||||
# 3. copy files to their right spots (e.g. langserve readme) in "intermediate dir"
|
||||
# 4. build the docs from "intermediate dir" to "output dir"
|
||||
# We build the docs in these stages:
|
||||
# 1. Install vercel and python dependencies
|
||||
# 2. Copy files from "source dir" to "intermediate dir"
|
||||
# 2. Generate files like model feat table, etc in "intermediate dir"
|
||||
# 3. Copy files to their right spots (e.g. langserve readme) in "intermediate dir"
|
||||
# 4. Build the docs from "intermediate dir" to "output dir"
|
||||
|
||||
SOURCE_DIR = docs/
|
||||
INTERMEDIATE_DIR = build/intermediate/docs
|
||||
@@ -18,32 +18,45 @@ PORT ?= 3001
|
||||
clean:
|
||||
rm -rf build
|
||||
|
||||
clean-cache:
|
||||
rm -rf build .venv/deps_installed
|
||||
|
||||
install-vercel-deps:
|
||||
yum -y -q update
|
||||
yum -y -q install gcc bzip2-devel libffi-devel zlib-devel wget tar gzip rsync -y
|
||||
|
||||
install-py-deps:
|
||||
python3 -m venv .venv
|
||||
$(PYTHON) -m pip install -q --upgrade pip
|
||||
$(PYTHON) -m pip install -q --upgrade uv
|
||||
$(PYTHON) -m uv pip install -q --pre -r vercel_requirements.txt
|
||||
$(PYTHON) -m uv pip install -q --pre $$($(PYTHON) scripts/partner_deps_list.py) --overrides vercel_overrides.txt
|
||||
@echo "📦 Installing Python dependencies..."
|
||||
@if [ ! -d .venv ]; then python3 -m venv .venv; fi
|
||||
@if [ ! -f .venv/deps_installed ]; then \
|
||||
$(PYTHON) -m pip install -q --upgrade pip --disable-pip-version-check; \
|
||||
$(PYTHON) -m pip install -q --upgrade uv; \
|
||||
$(PYTHON) -m uv pip install -q --pre -r vercel_requirements.txt; \
|
||||
$(PYTHON) -m uv pip install -q --pre $$($(PYTHON) scripts/partner_deps_list.py) --overrides vercel_overrides.txt; \
|
||||
touch .venv/deps_installed; \
|
||||
fi
|
||||
@echo "✅ Dependencies installed"
|
||||
|
||||
generate-files:
|
||||
@echo "📄 Generating documentation files..."
|
||||
mkdir -p $(INTERMEDIATE_DIR)
|
||||
cp -rp $(SOURCE_DIR)/* $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/tool_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/kv_store_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/partner_pkg_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
curl https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md | sed 's/<=/\<=/g' > $(INTERMEDIATE_DIR)/langserve.md
|
||||
@if [ ! -f build/langserve_readme_cache.md ] || [ $$(find build/langserve_readme_cache.md -mtime +1 -print) ]; then \
|
||||
echo "🌐 Downloading LangServe README..."; \
|
||||
curl -s https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md | sed 's/<=/\<=/g' > build/langserve_readme_cache.md; \
|
||||
fi
|
||||
cp build/langserve_readme_cache.md $(INTERMEDIATE_DIR)/langserve.md
|
||||
cp ../SECURITY.md $(INTERMEDIATE_DIR)/security.md
|
||||
$(PYTHON) scripts/resolve_local_links.py $(INTERMEDIATE_DIR)/langserve.md https://github.com/langchain-ai/langserve/tree/main/
|
||||
@echo "🔧 Generating feature tables and processing links..."
|
||||
$(PYTHON) scripts/tool_feat_table.py $(INTERMEDIATE_DIR) & \
|
||||
$(PYTHON) scripts/kv_store_feat_table.py $(INTERMEDIATE_DIR) & \
|
||||
$(PYTHON) scripts/partner_pkg_table.py $(INTERMEDIATE_DIR) & \
|
||||
$(PYTHON) scripts/resolve_local_links.py $(INTERMEDIATE_DIR)/langserve.md https://github.com/langchain-ai/langserve/tree/main/ & \
|
||||
wait
|
||||
@echo "✅ Files generated"
|
||||
|
||||
copy-infra:
|
||||
@echo "📂 Copying infrastructure files..."
|
||||
mkdir -p $(OUTPUT_NEW_DIR)
|
||||
cp -r src $(OUTPUT_NEW_DIR)
|
||||
cp vercel.json $(OUTPUT_NEW_DIR)
|
||||
@@ -55,15 +68,22 @@ copy-infra:
|
||||
cp -r static $(OUTPUT_NEW_DIR)
|
||||
cp -r ../libs/cli/langchain_cli/integration_template $(OUTPUT_NEW_DIR)/src/theme
|
||||
cp yarn.lock $(OUTPUT_NEW_DIR)
|
||||
@echo "✅ Infrastructure files copied"
|
||||
|
||||
render:
|
||||
@echo "📓 Converting notebooks (this may take a while)..."
|
||||
$(PYTHON) scripts/notebook_convert.py $(INTERMEDIATE_DIR) $(OUTPUT_NEW_DOCS_DIR)
|
||||
@echo "✅ Notebooks converted"
|
||||
|
||||
md-sync:
|
||||
@echo "📝 Syncing markdown files..."
|
||||
rsync -avmq --include="*/" --include="*.mdx" --include="*.md" --include="*.png" --include="*/_category_.yml" --exclude="*" $(INTERMEDIATE_DIR)/ $(OUTPUT_NEW_DOCS_DIR)
|
||||
@echo "✅ Markdown files synced"
|
||||
|
||||
append-related:
|
||||
@echo "🔗 Appending related links..."
|
||||
$(PYTHON) scripts/append_related_links.py $(OUTPUT_NEW_DOCS_DIR)
|
||||
@echo "✅ Related links appended"
|
||||
|
||||
generate-references:
|
||||
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
|
||||
@@ -71,6 +91,10 @@ generate-references:
|
||||
update-md: generate-files md-sync
|
||||
|
||||
build: install-py-deps generate-files copy-infra render md-sync append-related
|
||||
@echo ""
|
||||
@echo "🎉 Documentation build complete!"
|
||||
@echo "📖 To view locally, run: cd docs && make start"
|
||||
@echo ""
|
||||
|
||||
vercel-build: install-vercel-deps build generate-references
|
||||
rm -rf docs
|
||||
@@ -84,4 +108,9 @@ vercel-build: install-vercel-deps build generate-references
|
||||
NODE_OPTIONS="--max-old-space-size=5000" yarn run docusaurus build
|
||||
|
||||
start:
|
||||
cd $(OUTPUT_NEW_DIR) && yarn && yarn start --port=$(PORT)
|
||||
@echo "🚀 Starting documentation server on port $(PORT)..."
|
||||
@echo "📖 Installing Node.js dependencies..."
|
||||
cd $(OUTPUT_NEW_DIR) && yarn install --silent
|
||||
@echo "🌐 Starting server at http://localhost:$(PORT)"
|
||||
@echo "Press Ctrl+C to stop the server"
|
||||
cd $(OUTPUT_NEW_DIR) && yarn start --port=$(PORT)
|
||||
|
||||
@@ -262,6 +262,8 @@ myst_enable_extensions = ["colon_fence"]
|
||||
|
||||
# generate autosummary even if no references
|
||||
autosummary_generate = True
|
||||
# Don't fail on autosummary import warnings
|
||||
autosummary_ignore_module_all = False
|
||||
|
||||
html_copy_source = False
|
||||
html_show_sourcelink = False
|
||||
|
||||
@@ -202,6 +202,12 @@ def _load_package_modules(
|
||||
if file_path.name.startswith("_"):
|
||||
continue
|
||||
|
||||
if "integration_template" in file_path.parts:
|
||||
continue
|
||||
|
||||
if "project_template" in file_path.parts:
|
||||
continue
|
||||
|
||||
relative_module_name = file_path.relative_to(package_path)
|
||||
|
||||
# Skip if any module part starts with an underscore
|
||||
@@ -495,15 +501,7 @@ def _package_namespace(package_name: str) -> str:
|
||||
|
||||
def _package_dir(package_name: str = "langchain") -> Path:
|
||||
"""Return the path to the directory containing the documentation."""
|
||||
if package_name in (
|
||||
"langchain",
|
||||
"experimental",
|
||||
"community",
|
||||
"core",
|
||||
"cli",
|
||||
"text-splitters",
|
||||
"standard-tests",
|
||||
):
|
||||
if (ROOT_DIR / "libs" / package_name).exists():
|
||||
return ROOT_DIR / "libs" / package_name / _package_namespace(package_name)
|
||||
else:
|
||||
return (
|
||||
@@ -592,7 +590,12 @@ For the legacy API reference hosted on ReadTheDocs see [https://api.python.langc
|
||||
if integrations:
|
||||
integration_headers = [
|
||||
" ".join(
|
||||
custom_names.get(x, x.title().replace("ai", "AI").replace("db", "DB"))
|
||||
custom_names.get(
|
||||
x,
|
||||
x.title().replace("db", "DB")
|
||||
if dir_ == "langchain_v1"
|
||||
else x.title().replace("ai", "AI").replace("db", "DB"),
|
||||
)
|
||||
for x in dir_.split("-")
|
||||
)
|
||||
for dir_ in integrations
|
||||
@@ -660,18 +663,12 @@ def main(dirs: Optional[list] = None) -> None:
|
||||
print("Starting to build API reference files.")
|
||||
if not dirs:
|
||||
dirs = [
|
||||
dir_
|
||||
for dir_ in os.listdir(ROOT_DIR / "libs")
|
||||
if dir_ not in ("cli", "partners", "packages.yml")
|
||||
and "pyproject.toml" in os.listdir(ROOT_DIR / "libs" / dir_)
|
||||
p.parent.name
|
||||
for p in (ROOT_DIR / "libs").rglob("pyproject.toml")
|
||||
# Exclude packages that are not directly under libs/ or libs/partners/
|
||||
if p.parent.parent.name in ("libs", "partners")
|
||||
]
|
||||
dirs += [
|
||||
dir_
|
||||
for dir_ in os.listdir(ROOT_DIR / "libs" / "partners")
|
||||
if os.path.isdir(ROOT_DIR / "libs" / "partners" / dir_)
|
||||
and "pyproject.toml" in os.listdir(ROOT_DIR / "libs" / "partners" / dir_)
|
||||
]
|
||||
for dir_ in dirs:
|
||||
for dir_ in sorted(dirs):
|
||||
# Skip any hidden directories
|
||||
# Some of these could be present by mistake in the code base
|
||||
# e.g., .pytest_cache from running tests from the wrong location.
|
||||
@@ -682,7 +679,7 @@ def main(dirs: Optional[list] = None) -> None:
|
||||
print("Building package:", dir_)
|
||||
_build_rst_file(package_name=dir_)
|
||||
|
||||
_build_index(dirs)
|
||||
_build_index(sorted(dirs))
|
||||
print("API reference files built.")
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Contribute Code
|
||||
# Contribute code
|
||||
|
||||
If you would like to add a new feature or update an existing one, please read the resources below before getting started:
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
This guide walks through how to run the repository locally and check in your first code.
|
||||
For a [development container](https://containers.dev/), see the [.devcontainer folder](https://github.com/langchain-ai/langchain/tree/master/.devcontainer).
|
||||
|
||||
## Dependency Management: `uv` and other env/dependency managers
|
||||
## Dependency management: `uv` and other env/dependency managers
|
||||
|
||||
This project utilizes [uv](https://docs.astral.sh/uv/) v0.5+ as a dependency manager.
|
||||
|
||||
@@ -37,7 +37,7 @@ For this quickstart, start with `langchain`:
|
||||
cd libs/langchain
|
||||
```
|
||||
|
||||
## Local Development Dependencies
|
||||
## Local development dependencies
|
||||
|
||||
Install development requirements (for running langchain, running examples, linting, formatting, tests, and coverage):
|
||||
|
||||
@@ -64,12 +64,6 @@ To run unit tests:
|
||||
make test
|
||||
```
|
||||
|
||||
To run unit tests in Docker:
|
||||
|
||||
```bash
|
||||
make docker_tests
|
||||
```
|
||||
|
||||
There are also [integration tests and code-coverage](../testing.mdx) available.
|
||||
|
||||
### Developing langchain_core
|
||||
@@ -81,11 +75,11 @@ cd libs/core
|
||||
make test
|
||||
```
|
||||
|
||||
## Formatting and Linting
|
||||
## Formatting and linting
|
||||
|
||||
Run these locally before submitting a PR; the CI system will check also.
|
||||
|
||||
### Code Formatting
|
||||
### Code formatting
|
||||
|
||||
Formatting for this project is done via [ruff](https://docs.astral.sh/ruff/rules/).
|
||||
|
||||
@@ -163,7 +157,7 @@ If codespell is incorrectly flagging a word, you can skip spellcheck for that wo
|
||||
ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure'
|
||||
```
|
||||
|
||||
## Working with Optional Dependencies
|
||||
## Working with optional dependencies
|
||||
|
||||
`langchain`, `langchain-community`, and `langchain-experimental` rely on optional dependencies to keep these packages lightweight.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Contribute Documentation
|
||||
# Contribute documentation
|
||||
|
||||
Documentation is a vital part of LangChain. We welcome both new documentation for new features and
|
||||
community improvements to our current documentation. Please read the resources below before getting started:
|
||||
|
||||
@@ -12,12 +12,11 @@ It covers a wide array of topics, including tutorials, use cases, integrations,
|
||||
and more, offering extensive guidance on building with LangChain.
|
||||
The content for this documentation lives in the `/docs` directory of the monorepo.
|
||||
2. In-code Documentation: This is documentation of the codebase itself, which is also
|
||||
used to generate the externally facing [API Reference](https://python.langchain.com/api_reference/langchain/index.html).
|
||||
used to generate the externally facing [API Reference](https://python.langchain.com/api_reference/).
|
||||
The content for the API reference is autogenerated by scanning the docstrings in the codebase. For this reason we ask that
|
||||
developers document their code well.
|
||||
|
||||
The `API Reference` is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/)
|
||||
from the code and is hosted by [Read the Docs](https://readthedocs.org/).
|
||||
The API Reference is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code.
|
||||
|
||||
We appreciate all contributions to the documentation, whether it be fixing a typo,
|
||||
adding a new tutorial or example and whether it be in the main documentation or the API Reference.
|
||||
@@ -25,7 +24,7 @@ adding a new tutorial or example and whether it be in the main documentation or
|
||||
Similar to linting, we recognize documentation can be annoying. If you do not want
|
||||
to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
|
||||
## 📜 Main Documentation
|
||||
## 📜 Main documentation
|
||||
|
||||
The content for the main documentation is located in the `/docs` directory of the monorepo.
|
||||
|
||||
@@ -42,7 +41,7 @@ After modifying the documentation:
|
||||
3. Make a pull request with the changes.
|
||||
4. You can preview and verify that the changes are what you wanted by clicking the `View deployment` or `Visit Preview` buttons on the pull request `Conversation` page. This will take you to a preview of the documentation changes.
|
||||
|
||||
## ⚒️ Linting and Building Documentation Locally
|
||||
## ⚒️ Linting and building documentation locally
|
||||
|
||||
After writing up the documentation, you may want to lint and build the documentation
|
||||
locally to ensure that it looks good and is free of errors.
|
||||
@@ -57,20 +56,44 @@ The code that builds the documentation is located in the `/docs` directory of th
|
||||
|
||||
In the following commands, the prefix `api_` indicates that those are operations for the API Reference.
|
||||
|
||||
Before building the documentation, it is always a good idea to clean the build directory:
|
||||
|
||||
```bash
|
||||
make docs_clean
|
||||
make api_docs_clean
|
||||
```
|
||||
|
||||
Next, you can build the documentation as outlined below:
|
||||
You can build the documentation as outlined below:
|
||||
|
||||
```bash
|
||||
make docs_build
|
||||
make api_docs_build
|
||||
```
|
||||
|
||||
### Viewing documentation locally
|
||||
|
||||
After building the main documentation, you can view it locally by starting a development server:
|
||||
|
||||
```bash
|
||||
# For main documentation (after running `make docs_build`)
|
||||
cd docs && make start
|
||||
```
|
||||
|
||||
This will start a development server where you can view the documentation in your browser. The exact url will be shown to you during the start process. The server will automatically reload when you make changes to the documentation files under the `build/` directory (e.g. for temporary tests - changes you wish to persist should be put under `docs/docs/`).
|
||||
|
||||
:::tip
|
||||
|
||||
You can specify a different port by setting the `PORT` environment variable:
|
||||
|
||||
```bash
|
||||
cd docs && PORT=3000 make start
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
The API Reference documentation is built as static HTML files and will be automatically opened directly in your browser.
|
||||
|
||||
You can also view the API Reference for a specific package by specifying the package name and installing the package if necessary dependencies:
|
||||
|
||||
```bash
|
||||
# Opens the API Reference for the `ollama` package in your default browser
|
||||
uv pip install -e libs/partners/ollama
|
||||
make api_docs_quick_preview API_PKG=ollama
|
||||
```
|
||||
|
||||
:::tip
|
||||
|
||||
The `make api_docs_build` command takes a long time. If you're making cosmetic changes to the API docs and want to see how they look, use:
|
||||
@@ -79,18 +102,28 @@ The `make api_docs_build` command takes a long time. If you're making cosmetic c
|
||||
make api_docs_quick_preview
|
||||
```
|
||||
|
||||
which will just build a small subset of the API reference.
|
||||
which will just build a small subset of the API reference (the `text-splitters` package).
|
||||
|
||||
:::
|
||||
|
||||
Finally, run the link checker to ensure all links are valid:
|
||||
Finally, run the link checker from the project root to ensure all links are valid:
|
||||
|
||||
```bash
|
||||
make docs_linkcheck
|
||||
make api_docs_linkcheck
|
||||
```
|
||||
|
||||
### Linting and Formatting
|
||||
To clean up the documentation build artifacts, you can run:
|
||||
|
||||
```bash
|
||||
make clean
|
||||
|
||||
# Or to clean specific documentation artifacts
|
||||
make docs_clean
|
||||
make api_docs_clean
|
||||
```
|
||||
|
||||
### Formatting and linting
|
||||
|
||||
The Main Documentation is linted from the **monorepo root**. To lint the main documentation, run the following from there:
|
||||
|
||||
@@ -104,9 +137,9 @@ If you have formatting-related errors, you can fix them automatically with:
|
||||
make format
|
||||
```
|
||||
|
||||
## ⌨️ In-code Documentation
|
||||
## ⌨️ In-code documentation
|
||||
|
||||
The in-code documentation is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code and is hosted by [Read the Docs](https://readthedocs.org/).
|
||||
The in-code documentation is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code following [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html).
|
||||
|
||||
For the API reference to be useful, the codebase must be well-documented. This means that all functions, classes, and methods should have a docstring that explains what they do, what the arguments are, and what the return value is. This is a good practice in general, but it is especially important for LangChain because the API reference is the primary resource for developers to understand how to use the codebase.
|
||||
|
||||
@@ -141,16 +174,16 @@ def my_function(arg1: int, arg2: str) -> float:
|
||||
return 3.14
|
||||
```
|
||||
|
||||
### Linting and Formatting
|
||||
### Formatting and linting
|
||||
|
||||
The in-code documentation is linted from the directories belonging to the packages
|
||||
being documented.
|
||||
|
||||
For example, if you're working on the `langchain-community` package, you would change
|
||||
the working directory to the `langchain-community` directory:
|
||||
For example, if you're working on the `langchain-ollama` package, you would change
|
||||
the working directory to the the package directory:
|
||||
|
||||
```bash
|
||||
cd [root]/libs/langchain-community
|
||||
cd [root]/libs/partners/ollama
|
||||
```
|
||||
|
||||
Then you can run the following commands to lint and format the in-code documentation:
|
||||
@@ -160,9 +193,9 @@ make format
|
||||
make lint
|
||||
```
|
||||
|
||||
## Verify Documentation Changes
|
||||
## Verify documentation changes
|
||||
|
||||
After pushing documentation changes to the repository, you can preview and verify that the changes are
|
||||
what you wanted by clicking the `View deployment` or `Visit Preview` buttons on the pull request `Conversation` page.
|
||||
This will take you to a preview of the documentation changes.
|
||||
This preview is created by [Vercel](https://vercel.com/docs/getting-started-with-vercel).
|
||||
This preview is created by [Vercel](https://vercel.com/docs/getting-started-with-vercel).
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
sidebar_class_name: "hidden"
|
||||
---
|
||||
|
||||
# Documentation Style Guide
|
||||
# Documentation style guide
|
||||
|
||||
As LangChain continues to grow, the amount of documentation required to cover the various concepts and integrations continues to grow too.
|
||||
This page provides guidelines for anyone writing documentation for LangChain and outlines some of our philosophies around
|
||||
@@ -158,3 +158,5 @@ Be concise, including in code samples.
|
||||
- Use bullet points and numbered lists to break down information into easily digestible chunks
|
||||
- Use tables (especially for **Reference** sections) and diagrams often to present information visually
|
||||
- Include the table of contents for longer documentation pages to help readers navigate the content, but hide it for shorter pages
|
||||
|
||||
Next, see the [documentation setup guide](setup.mdx) to get started with writing documentation for LangChain.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# How-to Guides
|
||||
# How-to guides
|
||||
|
||||
- [**Documentation**](documentation/index.mdx): Help improve our docs, including this one!
|
||||
- [**Code**](code/index.mdx): Help us write code, fix bugs, or improve our infrastructure.
|
||||
|
||||
@@ -3,7 +3,7 @@ pagination_prev: null
|
||||
pagination_next: contributing/how_to/integrations/package
|
||||
---
|
||||
|
||||
# Contribute Integrations
|
||||
# Contribute integrations
|
||||
|
||||
Integrations are a core component of LangChain.
|
||||
LangChain provides standard interfaces for several different components (language models, vector stores, etc) that are crucial when building LLM applications.
|
||||
@@ -16,7 +16,7 @@ LangChain provides standard interfaces for several different components (languag
|
||||
- **Best Practices:** Through their standard interface, LangChain components encourage and facilitate best practices (streaming, async, etc)
|
||||
|
||||
|
||||
## Components to Integrate
|
||||
## Components to integrate
|
||||
|
||||
:::info
|
||||
|
||||
@@ -71,7 +71,7 @@ In order to contribute an integration, you should follow these steps:
|
||||
5. [Optional] Open and merge a PR to add documentation for your integration to the official LangChain docs.
|
||||
6. [Optional] Engage with the LangChain team for joint co-marketing ([see below](#co-marketing)).
|
||||
|
||||
## Co-Marketing
|
||||
## Co-marketing
|
||||
|
||||
With over 20 million monthly downloads, LangChain has a large audience of developers
|
||||
building LLM applications. Beyond just listing integrations, we aim to highlight
|
||||
@@ -87,5 +87,5 @@ Here are some heuristics for types of content we are excited to promote:
|
||||
- **End-to-end applications:** End-to-end applications are great resources for developers looking to build. We prefer to highlight applications that are more complex/agentic in nature, and that use [LangGraph](https://github.com/langchain-ai/langgraph) as the orchestration framework. We get particularly excited about anything involving long-term memory, human-in-the-loop interaction patterns, or multi-agent architectures.
|
||||
- **Research:** We love highlighting novel research! Whether it is research built on top of LangChain or that integrates with it.
|
||||
|
||||
## Further Reading
|
||||
## Further reading
|
||||
To get started, let's learn [how to implement an integration package](/docs/contributing/how_to/integrations/package/) for LangChain.
|
||||
|
||||
@@ -358,7 +358,7 @@ a schema for the LLM to fill out when calling the tool. Similar to the `name` an
|
||||
description (part of `Field(..., description="description")`) are passed to the LLM,
|
||||
and the values in these fields should be concise and LLM-usable.
|
||||
|
||||
### Run Methods
|
||||
### Run methods
|
||||
|
||||
`_run` is the main method that should be implemented in the subclass. This method
|
||||
takes in the arguments from `args_schema` and runs the tool, returning a string
|
||||
@@ -469,6 +469,6 @@ import RetrieverSource from '/src/theme/integration_template/integration_templat
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
## Next steps
|
||||
|
||||
Now that you've implemented your package, you can move on to [testing your integration](../standard_tests) for your integration and successfully run them.
|
||||
|
||||
@@ -10,7 +10,7 @@ Unit tests run on every pull request, so they should be fast and reliable.
|
||||
|
||||
Integration tests run once a day, and they require more setup, so they should be reserved for confirming interface points with external services.
|
||||
|
||||
## Unit Tests
|
||||
## Unit tests
|
||||
|
||||
Unit tests cover modular logic that does not require calls to outside APIs.
|
||||
If you add new logic, please add a unit test.
|
||||
@@ -27,19 +27,13 @@ To run unit tests:
|
||||
make test
|
||||
```
|
||||
|
||||
To run unit tests in Docker:
|
||||
|
||||
```bash
|
||||
make docker_tests
|
||||
```
|
||||
|
||||
To run a specific test:
|
||||
|
||||
```bash
|
||||
TEST_FILE=tests/unit_tests/test_imports.py make test
|
||||
```
|
||||
|
||||
## Integration Tests
|
||||
## Integration tests
|
||||
|
||||
Integration tests cover logic that requires making calls to outside APIs (often integration with other services).
|
||||
If you add support for a new external API, please add a new integration test.
|
||||
|
||||
@@ -12,7 +12,7 @@ More coming soon! We are working on tutorials to help you make your first contri
|
||||
|
||||
- [**Make your first docs PR**](tutorials/docs.mdx)
|
||||
|
||||
## How-to Guides
|
||||
## How-to guides
|
||||
|
||||
- [**Documentation**](how_to/documentation/index.mdx): Help improve our docs, including this one!
|
||||
- [**Code**](how_to/code/index.mdx): Help us write code, fix bugs, or improve our infrastructure.
|
||||
|
||||
@@ -50,7 +50,7 @@ There are other files in the root directory level, but their presence should be
|
||||
## Documentation
|
||||
|
||||
The `/docs` directory contains the content for the documentation that is shown
|
||||
at https://python.langchain.com/ and the associated API Reference https://python.langchain.com/api_reference/langchain/index.html.
|
||||
at [python.langchain.com](https://python.langchain.com/) and the associated [API Reference](https://python.langchain.com/api_reference/).
|
||||
|
||||
See the [documentation](../how_to/documentation/index.mdx) guidelines to learn how to contribute to the documentation.
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ This tutorial will guide you through making a simple documentation edit, like co
|
||||
|
||||
---
|
||||
|
||||
## Editing a Documentation Page on GitHub
|
||||
## Editing a documentation page on GitHub
|
||||
|
||||
Sometimes you want to make a small change, like fixing a typo, and the easiest way to do this is to use GitHub's editor directly.
|
||||
|
||||
@@ -42,10 +42,14 @@ Sometimes you want to make a small change, like fixing a typo, and the easiest w
|
||||
- Give your PR a title like `docs: Fix typo in X section`.
|
||||
- Follow the checklist in the PR description template.
|
||||
|
||||
## Getting a Review
|
||||
## Getting a review
|
||||
|
||||
Once you've submitted the pull request, it will be reviewed by the maintainers. You may receive feedback or requests for changes. Keep an eye on the PR to address any comments.
|
||||
|
||||
Docs PRs are typically reviewed within a few days, but it may take longer depending on the complexity of the change and the availability of maintainers.
|
||||
|
||||
For more information on reviews, see the [Review Process](../reference/review_process.mdx).
|
||||
|
||||
## More information
|
||||
|
||||
See our [how-to guides](../how_to/documentation/index.mdx) for more information on contributing to documentation:
|
||||
|
||||
@@ -92,7 +92,7 @@ the support of DB2 vector store and vector search.
|
||||
|
||||
See detailed usage examples in the guide [here](/docs/integrations/vectorstores/db2).
|
||||
|
||||
Installation: This is a seperate package for vector store feature only and can be run
|
||||
Installation: This is a separate package for vector store feature only and can be run
|
||||
without the `langchain-ibm` package.
|
||||
```bash
|
||||
pip install -U langchain-db2
|
||||
|
||||
@@ -20,7 +20,7 @@ pip install langchain-predictionguard
|
||||
|---|---|---|---------------------------------------------------------|-------------------------------------------------------------------------------|
|
||||
|Chat|Build Chat Bots|[Chat](https://docs.predictionguard.com/api-reference/api-reference/chat-completions)| `from langchain_predictionguard import ChatPredictionGuard` | [ChatPredictionGuard.ipynb](/docs/integrations/chat/predictionguard) |
|
||||
|Completions|Generate Text|[Completions](https://docs.predictionguard.com/api-reference/api-reference/completions)| `from langchain_predictionguard import PredictionGuard` | [PredictionGuard.ipynb](/docs/integrations/llms/predictionguard) |
|
||||
|Text Embedding|Embed String to Vectores|[Embeddings](https://docs.predictionguard.com/api-reference/api-reference/embeddings)| `from langchain_predictionguard import PredictionGuardEmbeddings` | [PredictionGuardEmbeddings.ipynb](/docs/integrations/text_embedding/predictionguard) |
|
||||
|Text Embedding|Embed String to Vectors|[Embeddings](https://docs.predictionguard.com/api-reference/api-reference/embeddings)| `from langchain_predictionguard import PredictionGuardEmbeddings` | [PredictionGuardEmbeddings.ipynb](/docs/integrations/text_embedding/predictionguard) |
|
||||
|
||||
## Getting Started
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# PremAI
|
||||
|
||||
[PremAI](https://premai.io/) is an all-in-one platform that simplifies the creation of robust, production-ready applications powered by Generative AI. By streamlining the development process, PremAI allows you to concentrate on enhancing user experience and driving overall growth for your application. You can quickly start using our platform [here](https://docs.premai.io/quick-start).
|
||||
|
||||
[PremAI](https://premai.io/) is an all-in-one platform that simplifies the creation of robust, production-ready applications powered by Generative AI. By streamlining the development process, PremAI allows you to concentrate on enhancing user experience and driving overall growth for your application. You can quickly start using [our platform](https://docs.premai.io/quick-start).
|
||||
|
||||
## ChatPremAI
|
||||
|
||||
@@ -26,10 +25,9 @@ from langchain_community.chat_models import ChatPremAI
|
||||
|
||||
Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is `8`. But make sure you use your project-id, otherwise it will throw error.
|
||||
|
||||
To use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. By default it will use the model name and parameters used in the [LaunchPad](https://docs.premai.io/get-started/launchpad).
|
||||
|
||||
> Note: If you change the `model` or any other parameters like `temperature` or `max_tokens` while setting the client, it will override existing default configurations, that was used in LaunchPad.
|
||||
To use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. By default it will use the model name and parameters used in the [LaunchPad](https://docs.premai.io/get-started/launchpad).
|
||||
|
||||
> Note: If you change the `model` or any other parameters like `temperature` or `max_tokens` while setting the client, it will override existing default configurations, that was used in LaunchPad.
|
||||
|
||||
```python
|
||||
import os
|
||||
@@ -43,9 +41,9 @@ chat = ChatPremAI(project_id=1234, model_name="gpt-4o")
|
||||
|
||||
### Chat Completions
|
||||
|
||||
`ChatPremAI` supports two methods: `invoke` (which is the same as `generate`) and `stream`.
|
||||
`ChatPremAI` supports two methods: `invoke` (which is the same as `generate`) and `stream`.
|
||||
|
||||
The first one will give us a static result. Whereas the second one will stream tokens one by one. Here's how you can generate chat-like completions.
|
||||
The first one will give us a static result. Whereas the second one will stream tokens one by one. Here's how you can generate chat-like completions.
|
||||
|
||||
```python
|
||||
human_message = HumanMessage(content="Who are you?")
|
||||
@@ -72,18 +70,17 @@ chat.invoke(
|
||||
)
|
||||
```
|
||||
|
||||
> If you are going to place system prompt here, then it will override your system prompt that was fixed while deploying the application from the platform.
|
||||
> If you are going to place system prompt here, then it will override your system prompt that was fixed while deploying the application from the platform.
|
||||
|
||||
> You can find all the optional parameters [here](https://docs.premai.io/get-started/sdk#optional-parameters). Any parameters other than [these supported parameters](https://docs.premai.io/get-started/sdk#optional-parameters) will be automatically removed before calling the model.
|
||||
|
||||
|
||||
### Native RAG Support with Prem Repositories
|
||||
|
||||
Prem Repositories which allows users to upload documents (.txt, .pdf etc) and connect those repositories to the LLMs. You can think Prem repositories as native RAG, where each repository can be considered as a vector database. You can connect multiple repositories. You can learn more about repositories [here](https://docs.premai.io/get-started/repositories).
|
||||
|
||||
Repositories are also supported in langchain premai. Here is how you can do it.
|
||||
Repositories are also supported in langchain premai. Here is how you can do it.
|
||||
|
||||
```python
|
||||
```python
|
||||
|
||||
query = "Which models are used for dense retrieval"
|
||||
repository_ids = [1985,]
|
||||
@@ -94,13 +91,13 @@ repositories = dict(
|
||||
)
|
||||
```
|
||||
|
||||
First we start by defining our repository with some repository ids. Make sure that the ids are valid repository ids. You can learn more about how to get the repository id [here](https://docs.premai.io/get-started/repositories).
|
||||
First we start by defining our repository with some repository ids. Make sure that the ids are valid repository ids. You can learn more about how to get the repository id [here](https://docs.premai.io/get-started/repositories).
|
||||
|
||||
> Please note: Similar like `model_name` when you invoke the argument `repositories`, then you are potentially overriding the repositories connected in the launchpad.
|
||||
> Please note: Similar like `model_name` when you invoke the argument `repositories`, then you are potentially overriding the repositories connected in the launchpad.
|
||||
|
||||
Now, we connect the repository with our chat object to invoke RAG based generations.
|
||||
Now, we connect the repository with our chat object to invoke RAG based generations.
|
||||
|
||||
```python
|
||||
```python
|
||||
import json
|
||||
|
||||
response = chat.invoke(query, max_tokens=100, repositories=repositories)
|
||||
@@ -109,7 +106,7 @@ print(response.content)
|
||||
print(json.dumps(response.response_metadata, indent=4))
|
||||
```
|
||||
|
||||
This is how an output looks like.
|
||||
This is how an output looks like.
|
||||
|
||||
```bash
|
||||
Dense retrieval models typically include:
|
||||
@@ -134,11 +131,11 @@ Dense retrieval models typically include:
|
||||
|
||||
So, this also means that you do not need to make your own RAG pipeline when using the Prem Platform. Prem uses it's own RAG technology to deliver best in class performance for Retrieval Augmented Generations.
|
||||
|
||||
> Ideally, you do not need to connect Repository IDs here to get Retrieval Augmented Generations. You can still get the same result if you have connected the repositories in prem platform.
|
||||
> Ideally, you do not need to connect Repository IDs here to get Retrieval Augmented Generations. You can still get the same result if you have connected the repositories in prem platform.
|
||||
|
||||
### Streaming
|
||||
|
||||
In this section, let's see how we can stream tokens using langchain and PremAI. Here's how you do it.
|
||||
In this section, let's see how we can stream tokens using langchain and PremAI. Here's how you do it.
|
||||
|
||||
```python
|
||||
import sys
|
||||
@@ -163,16 +160,15 @@ for chunk in chat.stream(
|
||||
|
||||
This will stream tokens one after the other.
|
||||
|
||||
> Please note: As of now, RAG with streaming is not supported. However we still support it with our API. You can learn more about that [here](https://docs.premai.io/get-started/chat-completion-sse).
|
||||
|
||||
> Please note: As of now, RAG with streaming is not supported. However we still support it with our API. You can learn more about that [here](https://docs.premai.io/get-started/chat-completion-sse).
|
||||
|
||||
## Prem Templates
|
||||
|
||||
Writing Prompt Templates can be super messy. Prompt templates are long, hard to manage, and must be continuously tweaked to improve and keep the same throughout the application.
|
||||
Writing Prompt Templates can be super messy. Prompt templates are long, hard to manage, and must be continuously tweaked to improve and keep the same throughout the application.
|
||||
|
||||
With **Prem**, writing and managing prompts can be super easy. The **_Templates_** tab inside the [launchpad](https://docs.premai.io/get-started/launchpad) helps you write as many prompts you need and use it inside the SDK to make your application running using those prompts. You can read more about Prompt Templates [here](https://docs.premai.io/get-started/prem-templates).
|
||||
With **Prem**, writing and managing prompts can be super easy. The **_Templates_** tab inside the [launchpad](https://docs.premai.io/get-started/launchpad) helps you write as many prompts you need and use it inside the SDK to make your application running using those prompts. You can read more about Prompt Templates [here](https://docs.premai.io/get-started/prem-templates).
|
||||
|
||||
To use Prem Templates natively with LangChain, you need to pass an id the `HumanMessage`. This id should be the name the variable of your prompt template. the `content` in `HumanMessage` should be the value of that variable.
|
||||
To use Prem Templates natively with LangChain, you need to pass an id the `HumanMessage`. This id should be the name the variable of your prompt template. the `content` in `HumanMessage` should be the value of that variable.
|
||||
|
||||
let's say for example, if your prompt template was this:
|
||||
|
||||
@@ -198,7 +194,7 @@ template_id = "78069ce8-xxxxx-xxxxx-xxxx-xxx"
|
||||
response = chat.invoke([human_message], template_id=template_id)
|
||||
```
|
||||
|
||||
Prem Templates are also available for Streaming too.
|
||||
Prem Templates are also available for Streaming too.
|
||||
|
||||
## Prem Embeddings
|
||||
|
||||
@@ -215,7 +211,7 @@ if os.environ.get("PREMAI_API_KEY") is None:
|
||||
|
||||
```
|
||||
|
||||
We support lots of state of the art embedding models. You can view our list of supported LLMs and embedding models [here](https://docs.premai.io/get-started/supported-models). For now let's go for `text-embedding-3-large` model for this example. .
|
||||
We support lots of state of the art embedding models. You can view our list of supported LLMs and embedding models [here](https://docs.premai.io/get-started/supported-models). For now let's go for `text-embedding-3-large` model for this example. .
|
||||
|
||||
```python
|
||||
|
||||
@@ -231,7 +227,7 @@ print(query_result[:5])
|
||||
```
|
||||
|
||||
:::note
|
||||
Setting `model_name` argument in mandatory for PremAIEmbeddings unlike chat.
|
||||
Setting `model_name` argument in mandatory for PremAIEmbeddings unlike chat.
|
||||
:::
|
||||
|
||||
Finally, let's embed some sample document
|
||||
@@ -254,11 +250,13 @@ print(doc_result[0][:5])
|
||||
```python
|
||||
print(f"Dimension of embeddings: {len(query_result)}")
|
||||
```
|
||||
|
||||
Dimension of embeddings: 3072
|
||||
|
||||
```python
|
||||
doc_result[:5]
|
||||
```
|
||||
|
||||
>Result:
|
||||
>
|
||||
>[-0.02129288576543331,
|
||||
@@ -269,20 +267,20 @@ doc_result[:5]
|
||||
|
||||
## Tool/Function Calling
|
||||
|
||||
LangChain PremAI supports tool/function calling. Tool/function calling allows a model to respond to a given prompt by generating output that matches a user-defined schema.
|
||||
LangChain PremAI supports tool/function calling. Tool/function calling allows a model to respond to a given prompt by generating output that matches a user-defined schema.
|
||||
|
||||
- You can learn all about tool calling in details [in our documentation here](https://docs.premai.io/get-started/function-calling).
|
||||
- You can learn more about langchain tool calling in [this part of the docs](https://python.langchain.com/v0.1/docs/modules/model_io/chat/function_calling).
|
||||
|
||||
**NOTE:**
|
||||
|
||||
> The current version of LangChain ChatPremAI do not support function/tool calling with streaming support. Streaming support along with function calling will come soon.
|
||||
> The current version of LangChain ChatPremAI do not support function/tool calling with streaming support. Streaming support along with function calling will come soon.
|
||||
|
||||
### Passing tools to model
|
||||
|
||||
In order to pass tools and let the LLM choose the tool it needs to call, we need to pass a tool schema. A tool schema is the function definition along with proper docstring on what does the function do, what each argument of the function is etc. Below are some simple arithmetic functions with their schema.
|
||||
In order to pass tools and let the LLM choose the tool it needs to call, we need to pass a tool schema. A tool schema is the function definition along with proper docstring on what does the function do, what each argument of the function is etc. Below are some simple arithmetic functions with their schema.
|
||||
|
||||
**NOTE:**
|
||||
**NOTE:**
|
||||
> When defining function/tool schema, do not forget to add information around the function arguments, otherwise it would throw error.
|
||||
|
||||
```python
|
||||
@@ -320,27 +318,28 @@ def multiply(a: int, b: int) -> int:
|
||||
|
||||
### Binding tool schemas with our LLM
|
||||
|
||||
We will now use the `bind_tools` method to convert our above functions to a "tool" and binding it with the model. This means we are going to pass these tool information everytime we invoke the model.
|
||||
We will now use the `bind_tools` method to convert our above functions to a "tool" and binding it with the model. This means we are going to pass these tool information every time we invoke the model.
|
||||
|
||||
```python
|
||||
tools = [add, multiply]
|
||||
llm_with_tools = chat.bind_tools(tools)
|
||||
```
|
||||
|
||||
After this, we get the response from the model which is now binded with the tools.
|
||||
After this, we get the response from the model which is now binded with the tools.
|
||||
|
||||
```python
|
||||
```python
|
||||
query = "What is 3 * 12? Also, what is 11 + 49?"
|
||||
|
||||
messages = [HumanMessage(query)]
|
||||
ai_msg = llm_with_tools.invoke(messages)
|
||||
```
|
||||
|
||||
As we can see, when our chat model is binded with tools, then based on the given prompt, it calls the correct set of the tools and sequentially.
|
||||
As we can see, when our chat model is binded with tools, then based on the given prompt, it calls the correct set of the tools and sequentially.
|
||||
|
||||
```python
|
||||
```python
|
||||
ai_msg.tool_calls
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
```python
|
||||
@@ -352,15 +351,15 @@ ai_msg.tool_calls
|
||||
'id': 'call_MPKYGLHbf39csJIyb5BZ9xIk'}]
|
||||
```
|
||||
|
||||
We append this message shown above to the LLM which acts as a context and makes the LLM aware that what all functions it has called.
|
||||
We append this message shown above to the LLM which acts as a context and makes the LLM aware that what all functions it has called.
|
||||
|
||||
```python
|
||||
```python
|
||||
messages.append(ai_msg)
|
||||
```
|
||||
|
||||
Since tool calling happens into two phases, where:
|
||||
|
||||
1. in our first call, we gathered all the tools that the LLM decided to tool, so that it can get the result as an added context to give more accurate and hallucination free result.
|
||||
1. in our first call, we gathered all the tools that the LLM decided to tool, so that it can get the result as an added context to give more accurate and hallucination free result.
|
||||
|
||||
2. in our second call, we will parse those set of tools decided by LLM and run them (in our case it will be the functions we defined, with the LLM's extracted arguments) and pass this result to the LLM
|
||||
|
||||
@@ -373,12 +372,13 @@ for tool_call in ai_msg.tool_calls:
|
||||
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))
|
||||
```
|
||||
|
||||
Finally, we call the LLM (binded with the tools) with the function response added in it's context.
|
||||
Finally, we call the LLM (binded with the tools) with the function response added in it's context.
|
||||
|
||||
```python
|
||||
response = llm_with_tools.invoke(messages)
|
||||
print(response.content)
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
```txt
|
||||
@@ -425,4 +425,4 @@ chain.invoke(query)
|
||||
[multiply(a=3, b=12), add(a=11, b=49)]
|
||||
```
|
||||
|
||||
Now, as done above, we parse this and run this functions and call the LLM once again to get the result.
|
||||
Now, as done above, we parse this and run this functions and call the LLM once again to get the result.
|
||||
|
||||
@@ -265,13 +265,7 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "08437fa2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"### Vector search\n",
|
||||
"\n",
|
||||
"Dense vector retrival using fake embeddings in this example."
|
||||
]
|
||||
"source": "## Instantiation\n\n### Vector search\n\nDense vector retrieval using fake embeddings in this example."
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -713,4 +707,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -638,7 +638,7 @@
|
||||
"\n",
|
||||
"For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n",
|
||||
"\n",
|
||||
"- [Tutorials](/docs/tutorials/)\n",
|
||||
"- [Tutorials](/docs/tutorials/rag)\n",
|
||||
"- [How-to: Question and answer with RAG](https://python.langchain.com/docs/how_to/#qa-with-rag)\n",
|
||||
"- [Retrieval conceptual docs](https://python.langchain.com/docs/concepts/retrieval)"
|
||||
]
|
||||
|
||||
782
docs/docs/integrations/vectorstores/pgvectorstore.ipynb
Normal file
782
docs/docs/integrations/vectorstores/pgvectorstore.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -447,11 +447,7 @@
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We'll define a helper funciton to create a uuid for a document and associated vector embedding based on its timestamp. We'll use this function to create a uuid for each git log entry.\n",
|
||||
"\n",
|
||||
"Important note: If you are working with documents and want the current date and time associated with vector for time-based search, you can skip this step. A uuid will be automatically generated when the documents are ingested by default."
|
||||
]
|
||||
"source": "We'll define a helper function to create a uuid for a document and associated vector embedding based on its timestamp. We'll use this function to create a uuid for each git log entry.\n\nImportant note: If you are working with documents and want the current date and time associated with vector for time-based search, you can skip this step. A uuid will be automatically generated when the documents are ingested by default."
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -1729,4 +1725,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,6 @@
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from langchain_community import document_loaders
|
||||
from langchain_core.document_loaders.base import BaseLoader
|
||||
|
||||
KV_STORE_TEMPLATE = """\
|
||||
---
|
||||
sidebar_class_name: hidden
|
||||
|
||||
@@ -175,8 +175,23 @@ def _modify_frontmatter(
|
||||
def _convert_notebook(
|
||||
notebook_path: Path, output_path: Path, intermediate_docs_dir: Path
|
||||
) -> Path:
|
||||
with open(notebook_path) as f:
|
||||
nb = nbformat.read(f, as_version=4)
|
||||
import json
|
||||
import uuid
|
||||
|
||||
with open(notebook_path, "r", encoding="utf-8") as f:
|
||||
nb_json = json.load(f)
|
||||
|
||||
# Fix missing and duplicate cell IDs before nbformat validation
|
||||
seen_ids = set()
|
||||
for cell in nb_json.get("cells", []):
|
||||
if "id" not in cell or not cell.get("id") or cell.get("id") in seen_ids:
|
||||
cell["id"] = str(uuid.uuid4())[:8]
|
||||
seen_ids.add(cell["id"])
|
||||
|
||||
nb = nbformat.reads(json.dumps(nb_json), as_version=4)
|
||||
|
||||
# Upgrade notebook format
|
||||
nb = nbformat.v4.upgrade(nb)
|
||||
|
||||
body, resources = exporter.from_notebook_node(nb)
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ from langchain_couchbase import CouchbaseSearchVectorStore
|
||||
from langchain_milvus import Milvus
|
||||
from langchain_mongodb import MongoDBAtlasVectorSearch
|
||||
from langchain_pinecone import PineconeVectorStore
|
||||
from langchain_postgres import PGVectorStore
|
||||
from langchain_qdrant import QdrantVectorStore
|
||||
|
||||
vectorstore_list = [
|
||||
@@ -22,6 +23,7 @@ vectorstore_list = [
|
||||
]
|
||||
|
||||
from_partners = [
|
||||
("PGVectorStore", PGVectorStore),
|
||||
("Chroma", Chroma),
|
||||
("AstraDBVectorStore", AstraDBVectorStore),
|
||||
("QdrantVectorStore", QdrantVectorStore),
|
||||
@@ -52,6 +54,17 @@ The table below lists the features for some of our most popular vector stores.
|
||||
|
||||
def get_vectorstore_table():
|
||||
vectorstore_feat_table = {
|
||||
"PGVectorStore": {
|
||||
"Delete by ID": True,
|
||||
"Filtering": True,
|
||||
"similarity_search_by_vector": True,
|
||||
"similarity_search_with_score": True,
|
||||
"asearch": True,
|
||||
"Passes Standard Tests": True,
|
||||
"Multi Tenancy": False,
|
||||
"Local/Cloud": "Local",
|
||||
"IDs in add Documents": True,
|
||||
},
|
||||
"FAISS": {
|
||||
"Delete by ID": True,
|
||||
"Filtering": True,
|
||||
|
||||
@@ -315,8 +315,8 @@ module.exports = {
|
||||
},
|
||||
],
|
||||
link: {
|
||||
type: "doc",
|
||||
id: "integrations/stores/index",
|
||||
type: "generated-index",
|
||||
slug: "integrations/stores",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1174,6 +1174,19 @@ const FEATURE_TABLES = {
|
||||
local: true,
|
||||
idsInAddDocuments: true,
|
||||
},
|
||||
{
|
||||
name: "PGVectorStore",
|
||||
link: "pgvectorstore",
|
||||
deleteById: true,
|
||||
filtering: true,
|
||||
searchByVector: true,
|
||||
searchWithScore: true,
|
||||
async: true,
|
||||
passesStandardTests: true,
|
||||
multiTenancy: false,
|
||||
local: true,
|
||||
idsInAddDocuments: true,
|
||||
},
|
||||
{
|
||||
name: "PineconeVectorStore",
|
||||
link: "pinecone",
|
||||
|
||||
@@ -60,6 +60,13 @@ export default function VectorStoreTabs(props) {
|
||||
packageName: "langchain-postgres",
|
||||
default: false,
|
||||
},
|
||||
{
|
||||
value: "PGVectorStore",
|
||||
label: "PGVectorStore",
|
||||
text: `from langchain_postgres import PGEngine, PGVectorStore\n${useFakeEmbeddings ? fakeEmbeddingsString : ""}\n$engine = PGEngine.from_connection_string(\n url="postgresql+psycopg://..."\n)\n\n${vectorStoreVarName} = PGVectorStore.create_sync(\n engine=pg_engine,\n table_name='test_table',\n embedding_service=embedding\n)`,
|
||||
packageName: "langchain-postgres",
|
||||
default: false,
|
||||
},
|
||||
{
|
||||
value: "Pinecone",
|
||||
label: "Pinecone",
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
httpx
|
||||
grpcio
|
||||
aiohttp<3.11
|
||||
protobuf<3.21
|
||||
protobuf<5.0
|
||||
tenacity
|
||||
urllib3
|
||||
pypdf
|
||||
# Fix numpy conflicts between langchain-astradb and langchain-chroma
|
||||
numpy>=1.26.0,<2.0.0
|
||||
# Fix simsimd build error in langchain-weaviate
|
||||
simsimd>=5.0.0
|
||||
# Fix sentencepiece build error - use newer version that supports modern CMake
|
||||
sentencepiece>=0.2.1
|
||||
|
||||
@@ -67,12 +67,11 @@ def serve(
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Start the LangServe app, whether it's a template or an app."""
|
||||
# see if is a template
|
||||
try:
|
||||
project_dir = get_package_root()
|
||||
pyproject = project_dir / "pyproject.toml"
|
||||
get_langserve_export(pyproject)
|
||||
except KeyError:
|
||||
except (KeyError, FileNotFoundError):
|
||||
# not a template
|
||||
app_namespace.serve(port=port, host=host)
|
||||
else:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: all format lint test tests integration_tests docker_tests help extended_tests
|
||||
.PHONY: all format lint test tests integration_tests help extended_tests
|
||||
|
||||
# Default target executed when no arguments are given to make.
|
||||
all: help
|
||||
|
||||
@@ -61,6 +61,7 @@ class __ModuleName__Loader(BaseLoader):
|
||||
.. code-block:: python
|
||||
|
||||
TODO: Example output
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
# TODO: This method must be implemented to load documents.
|
||||
|
||||
@@ -61,6 +61,7 @@ class __ModuleName__Tool(BaseTool): # type: ignore[override]
|
||||
.. code-block:: python
|
||||
|
||||
# TODO: output of invocation
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
# TODO: Set tool name and description
|
||||
|
||||
@@ -56,7 +56,6 @@ select = [
|
||||
"C4", # flake8-comprehensions
|
||||
"COM", # flake8-commas
|
||||
"D", # pydocstyle
|
||||
"DOC", # pydoclint
|
||||
"E", # pycodestyle error
|
||||
"EM", # flake8-errmsg
|
||||
"F", # pyflakes
|
||||
|
||||
1420
libs/cli/uv.lock
generated
1420
libs/cli/uv.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
.PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests
|
||||
.PHONY: all format lint test tests test_watch integration_tests help extended_tests
|
||||
|
||||
# Default target executed when no arguments are given to make.
|
||||
all: help
|
||||
|
||||
@@ -70,6 +70,7 @@ def beta(
|
||||
@beta
|
||||
def the_function_to_annotate():
|
||||
pass
|
||||
|
||||
"""
|
||||
|
||||
def beta(
|
||||
|
||||
@@ -136,6 +136,7 @@ def deprecated(
|
||||
@deprecated('1.4.0')
|
||||
def the_function_to_deprecate():
|
||||
pass
|
||||
|
||||
"""
|
||||
_validate_deprecation_params(
|
||||
removal, alternative, alternative_import, pending=pending
|
||||
@@ -549,6 +550,7 @@ def rename_parameter(
|
||||
|
||||
@_api.rename_parameter("3.1", "bad_name", "good_name")
|
||||
def func(good_name): ...
|
||||
|
||||
"""
|
||||
|
||||
def decorator(f: Callable[_P, _R]) -> Callable[_P, _R]:
|
||||
|
||||
@@ -363,6 +363,7 @@ class Context:
|
||||
print(output["result"]) # Output: "hello"
|
||||
print(output["context"]) # Output: "What's your name?"
|
||||
print(output["input"]) # Output: "What's your name?
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -7,6 +7,8 @@ from typing import TYPE_CHECKING, Any, Optional, Union
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from langchain_core.messages.v1 import AIMessage, AIMessageChunk, MessageV1
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Sequence
|
||||
from uuid import UUID
|
||||
@@ -64,9 +66,11 @@ class LLMManagerMixin:
|
||||
|
||||
def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
token: Union[str, AIMessageChunk],
|
||||
*,
|
||||
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
|
||||
chunk: Optional[
|
||||
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
|
||||
] = None,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
@@ -75,8 +79,8 @@ class LLMManagerMixin:
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
|
||||
containing content and other information.
|
||||
chunk (GenerationChunk | ChatGenerationChunk | AIMessageChunk): The new
|
||||
generated chunk, containing content and other information.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
@@ -84,7 +88,7 @@ class LLMManagerMixin:
|
||||
|
||||
def on_llm_end(
|
||||
self,
|
||||
response: LLMResult,
|
||||
response: Union[LLMResult, AIMessage],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
@@ -93,7 +97,7 @@ class LLMManagerMixin:
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response which was generated.
|
||||
response (LLMResult | AIMessage): The response which was generated.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
@@ -261,7 +265,7 @@ class CallbackManagerMixin:
|
||||
def on_chat_model_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
messages: list[list[BaseMessage]],
|
||||
messages: Union[list[list[BaseMessage]], list[list[MessageV1]]],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
@@ -439,6 +443,9 @@ class BaseCallbackHandler(
|
||||
run_inline: bool = False
|
||||
"""Whether to run the callback inline."""
|
||||
|
||||
accepts_new_messages: bool = False
|
||||
"""Whether the callback accepts new message format."""
|
||||
|
||||
@property
|
||||
def ignore_llm(self) -> bool:
|
||||
"""Whether to ignore LLM callbacks."""
|
||||
@@ -509,7 +516,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
async def on_chat_model_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
messages: list[list[BaseMessage]],
|
||||
messages: Union[list[list[BaseMessage]], list[list[MessageV1]]],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
@@ -538,9 +545,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
async def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
token: Union[str, AIMessageChunk],
|
||||
*,
|
||||
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
|
||||
chunk: Optional[
|
||||
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
|
||||
] = None,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
@@ -550,8 +559,8 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
Args:
|
||||
token (str): The new token.
|
||||
chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk,
|
||||
containing content and other information.
|
||||
chunk (GenerationChunk | ChatGenerationChunk | AIMessageChunk): The new
|
||||
generated chunk, containing content and other information.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
@@ -560,7 +569,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
async def on_llm_end(
|
||||
self,
|
||||
response: LLMResult,
|
||||
response: Union[LLMResult, AIMessage],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
@@ -570,7 +579,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The response which was generated.
|
||||
response (LLMResult | AIMessage): The response which was generated.
|
||||
run_id (UUID): The run ID. This is the ID of the current run.
|
||||
parent_run_id (UUID): The parent run ID. This is the ID of the parent run.
|
||||
tags (Optional[list[str]]): The tags.
|
||||
@@ -594,8 +603,8 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
parent_run_id: The parent run ID. This is the ID of the parent run.
|
||||
tags: The tags.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
- response (LLMResult): The response which was generated before
|
||||
the error occurred.
|
||||
- response (LLMResult | AIMessage): The response which was generated
|
||||
before the error occurred.
|
||||
"""
|
||||
|
||||
async def on_chain_start(
|
||||
|
||||
@@ -53,6 +53,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
When not used as a context manager, a deprecation warning will be issued
|
||||
on first use. The file will be opened immediately in ``__init__`` and closed
|
||||
in ``__del__`` or when ``close()`` is called explicitly.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
||||
@@ -11,6 +11,7 @@ from abc import ABC, abstractmethod
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from contextlib import asynccontextmanager, contextmanager
|
||||
from contextvars import copy_context
|
||||
from dataclasses import is_dataclass
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -37,6 +38,8 @@ from langchain_core.callbacks.base import (
|
||||
)
|
||||
from langchain_core.callbacks.stdout import StdOutCallbackHandler
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
from langchain_core.messages.v1 import AIMessage, AIMessageChunk
|
||||
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, LLMResult
|
||||
from langchain_core.tracers.schemas import Run
|
||||
from langchain_core.utils.env import env_var_is_set
|
||||
|
||||
@@ -47,7 +50,7 @@ if TYPE_CHECKING:
|
||||
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult
|
||||
from langchain_core.outputs import GenerationChunk
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -105,6 +108,7 @@ def trace_as_chain_group(
|
||||
# Use the callback manager for the chain group
|
||||
res = llm.invoke(llm_input, {"callbacks": manager})
|
||||
manager.on_chain_end({"output": res})
|
||||
|
||||
""" # noqa: E501
|
||||
from langchain_core.tracers.context import _get_trace_callbacks
|
||||
|
||||
@@ -186,6 +190,7 @@ async def atrace_as_chain_group(
|
||||
# Use the async callback manager for the chain group
|
||||
res = await llm.ainvoke(llm_input, {"callbacks": manager})
|
||||
await manager.on_chain_end({"output": res})
|
||||
|
||||
""" # noqa: E501
|
||||
from langchain_core.tracers.context import _get_trace_callbacks
|
||||
|
||||
@@ -241,6 +246,22 @@ def shielded(func: Func) -> Func:
|
||||
return cast("Func", wrapped)
|
||||
|
||||
|
||||
def _convert_llm_events(
|
||||
event_name: str, args: tuple[Any, ...], kwargs: dict[str, Any]
|
||||
) -> None:
|
||||
if event_name == "on_chat_model_start" and isinstance(args[1], list):
|
||||
for idx, item in enumerate(args[1]):
|
||||
if is_dataclass(item):
|
||||
args[1][idx] = item # convert to old message
|
||||
elif event_name == "on_llm_new_token" and is_dataclass(args[0]):
|
||||
kwargs["chunk"] = ChatGenerationChunk(text=args[0].text, message=args[0])
|
||||
args[0] = args[0].text
|
||||
elif event_name == "on_llm_end" and is_dataclass(args[0]):
|
||||
args[0] = LLMResult(
|
||||
generations=[[ChatGeneration(text=args[0].text, message=args[0])]]
|
||||
)
|
||||
|
||||
|
||||
def handle_event(
|
||||
handlers: list[BaseCallbackHandler],
|
||||
event_name: str,
|
||||
@@ -269,6 +290,8 @@ def handle_event(
|
||||
if ignore_condition_name is None or not getattr(
|
||||
handler, ignore_condition_name
|
||||
):
|
||||
if not handler.accepts_new_messages:
|
||||
_convert_llm_events(event_name, args, kwargs)
|
||||
event = getattr(handler, event_name)(*args, **kwargs)
|
||||
if asyncio.iscoroutine(event):
|
||||
coros.append(event)
|
||||
@@ -363,6 +386,8 @@ async def _ahandle_event_for_handler(
|
||||
) -> None:
|
||||
try:
|
||||
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
|
||||
if not handler.accepts_new_messages:
|
||||
_convert_llm_events(event_name, args, kwargs)
|
||||
event = getattr(handler, event_name)
|
||||
if asyncio.iscoroutinefunction(event):
|
||||
await event(*args, **kwargs)
|
||||
@@ -670,9 +695,11 @@ class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
|
||||
|
||||
def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
token: Union[str, AIMessageChunk],
|
||||
*,
|
||||
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
|
||||
chunk: Optional[
|
||||
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
|
||||
] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM generates a new token.
|
||||
@@ -697,11 +724,11 @@ class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
def on_llm_end(self, response: Union[LLMResult, AIMessage], **kwargs: Any) -> None:
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The LLM result.
|
||||
response (LLMResult | AIMessage): The LLM result.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
"""
|
||||
if not self.handlers:
|
||||
@@ -727,8 +754,8 @@ class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
|
||||
Args:
|
||||
error (Exception or KeyboardInterrupt): The error.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
- response (LLMResult): The response which was generated before
|
||||
the error occurred.
|
||||
- response (LLMResult | AIMessage): The response which was generated
|
||||
before the error occurred.
|
||||
"""
|
||||
if not self.handlers:
|
||||
return
|
||||
@@ -766,9 +793,11 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
|
||||
|
||||
async def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
token: Union[str, AIMessageChunk],
|
||||
*,
|
||||
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
|
||||
chunk: Optional[
|
||||
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
|
||||
] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM generates a new token.
|
||||
@@ -794,11 +823,13 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
|
||||
)
|
||||
|
||||
@shielded
|
||||
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
async def on_llm_end(
|
||||
self, response: Union[LLMResult, AIMessage], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM ends running.
|
||||
|
||||
Args:
|
||||
response (LLMResult): The LLM result.
|
||||
response (LLMResult | AIMessage): The LLM result.
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
"""
|
||||
if not self.handlers:
|
||||
@@ -825,11 +856,8 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
|
||||
Args:
|
||||
error (Exception or KeyboardInterrupt): The error.
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
- response (LLMResult): The response which was generated before
|
||||
the error occurred.
|
||||
|
||||
|
||||
|
||||
- response (LLMResult | AIMessage): The response which was generated
|
||||
before the error occurred.
|
||||
"""
|
||||
if not self.handlers:
|
||||
return
|
||||
@@ -2575,6 +2603,7 @@ async def adispatch_custom_event(
|
||||
behalf.
|
||||
|
||||
.. versionadded:: 0.2.15
|
||||
|
||||
"""
|
||||
from langchain_core.runnables.config import (
|
||||
ensure_config,
|
||||
@@ -2645,6 +2674,7 @@ def dispatch_custom_event(
|
||||
foo_.invoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]})
|
||||
|
||||
.. versionadded:: 0.2.15
|
||||
|
||||
"""
|
||||
from langchain_core.runnables.config import (
|
||||
ensure_config,
|
||||
|
||||
@@ -44,6 +44,7 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
|
||||
.. versionadded:: 0.3.49
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
@@ -98,7 +99,7 @@ def get_usage_metadata_callback(
|
||||
|
||||
Args:
|
||||
name (str): The name of the context variable. Defaults to
|
||||
``"usage_metadata_callback"``.
|
||||
``'usage_metadata_callback'``.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
@@ -127,6 +128,7 @@ def get_usage_metadata_callback(
|
||||
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
||||
|
||||
.. versionadded:: 0.3.49
|
||||
|
||||
"""
|
||||
from langchain_core.tracers.context import register_configure_hook
|
||||
|
||||
|
||||
@@ -91,6 +91,7 @@ class BaseChatMessageHistory(ABC):
|
||||
def clear(self):
|
||||
with open(os.path.join(storage_path, session_id), "w") as f:
|
||||
f.write("[]")
|
||||
|
||||
"""
|
||||
|
||||
messages: list[BaseMessage]
|
||||
|
||||
@@ -36,6 +36,7 @@ class LangSmithLoader(BaseLoader):
|
||||
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
||||
|
||||
.. versionadded:: 0.2.34
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
@@ -61,7 +62,7 @@ class LangSmithLoader(BaseLoader):
|
||||
Args:
|
||||
dataset_id: The ID of the dataset to filter by. Defaults to None.
|
||||
dataset_name: The name of the dataset to filter by. Defaults to None.
|
||||
content_key: The inputs key to set as Document page content. ``"."`` characters
|
||||
content_key: The inputs key to set as Document page content. ``'.'`` characters
|
||||
are interpreted as nested keys. E.g. ``content_key="first.second"`` will
|
||||
result in
|
||||
``Document(page_content=format_content(example.inputs["first"]["second"]))``
|
||||
|
||||
@@ -102,6 +102,7 @@ class Blob(BaseMedia):
|
||||
# Read the blob as a byte stream
|
||||
with blob.as_bytes_io() as f:
|
||||
print(f.read())
|
||||
|
||||
"""
|
||||
|
||||
data: Union[bytes, str, None] = None
|
||||
@@ -265,6 +266,7 @@ class Document(BaseMedia):
|
||||
page_content="Hello, world!",
|
||||
metadata={"source": "https://example.com"}
|
||||
)
|
||||
|
||||
"""
|
||||
|
||||
page_content: str
|
||||
|
||||
@@ -46,6 +46,7 @@ class FakeEmbeddings(Embeddings, BaseModel):
|
||||
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
|
||||
"""
|
||||
|
||||
size: int
|
||||
@@ -103,6 +104,7 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
|
||||
|
||||
2
|
||||
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
|
||||
|
||||
"""
|
||||
|
||||
size: int
|
||||
|
||||
@@ -444,6 +444,9 @@ def index(
|
||||
scoped_full_cleanup_source_ids: set[str] = set()
|
||||
|
||||
for doc_batch in _batch(batch_size, doc_iterator):
|
||||
# Track original batch size before deduplication
|
||||
original_batch_size = len(doc_batch)
|
||||
|
||||
hashed_docs = list(
|
||||
_deduplicate_in_order(
|
||||
[
|
||||
@@ -452,6 +455,8 @@ def index(
|
||||
]
|
||||
)
|
||||
)
|
||||
# Count documents removed by within-batch deduplication
|
||||
num_skipped += original_batch_size - len(hashed_docs)
|
||||
|
||||
source_ids: Sequence[Optional[str]] = [
|
||||
source_id_assigner(hashed_doc) for hashed_doc in hashed_docs
|
||||
@@ -784,6 +789,9 @@ async def aindex(
|
||||
scoped_full_cleanup_source_ids: set[str] = set()
|
||||
|
||||
async for doc_batch in _abatch(batch_size, async_doc_iterator):
|
||||
# Track original batch size before deduplication
|
||||
original_batch_size = len(doc_batch)
|
||||
|
||||
hashed_docs = list(
|
||||
_deduplicate_in_order(
|
||||
[
|
||||
@@ -792,6 +800,8 @@ async def aindex(
|
||||
]
|
||||
)
|
||||
)
|
||||
# Count documents removed by within-batch deduplication
|
||||
num_skipped += original_batch_size - len(hashed_docs)
|
||||
|
||||
source_ids: Sequence[Optional[str]] = [
|
||||
source_id_assigner(doc) for doc in hashed_docs
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import copy
|
||||
import re
|
||||
from collections.abc import Sequence
|
||||
from typing import Optional
|
||||
@@ -51,6 +52,7 @@ def _parse_data_uri(uri: str) -> Optional[dict]:
|
||||
"mime_type": "image/jpeg",
|
||||
"data": "/9j/4AAQSkZJRg...",
|
||||
}
|
||||
|
||||
"""
|
||||
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
|
||||
match = re.match(regex, uri)
|
||||
@@ -127,7 +129,10 @@ def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
|
||||
and _is_openai_data_block(block)
|
||||
):
|
||||
if formatted_message is message:
|
||||
formatted_message = message.model_copy()
|
||||
if isinstance(message, BaseMessage):
|
||||
formatted_message = message.model_copy()
|
||||
else:
|
||||
formatted_message = copy.copy(message)
|
||||
# Also shallow-copy content
|
||||
formatted_message.content = list(formatted_message.content)
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Mapping, Sequence
|
||||
from functools import cache
|
||||
@@ -26,8 +25,8 @@ from langchain_core.messages import (
|
||||
AnyMessage,
|
||||
BaseMessage,
|
||||
MessageLikeRepresentation,
|
||||
get_buffer_string,
|
||||
)
|
||||
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
||||
from langchain_core.prompt_values import PromptValue
|
||||
from langchain_core.runnables import Runnable, RunnableSerializable
|
||||
from langchain_core.utils import get_pydantic_field_names
|
||||
@@ -85,7 +84,9 @@ def _get_token_ids_default_method(text: str) -> list[int]:
|
||||
LanguageModelInput = Union[PromptValue, str, Sequence[MessageLikeRepresentation]]
|
||||
LanguageModelOutput = Union[BaseMessage, str]
|
||||
LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
|
||||
LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", BaseMessage, str)
|
||||
LanguageModelOutputVar = TypeVar(
|
||||
"LanguageModelOutputVar", BaseMessage, str, AIMessageV1
|
||||
)
|
||||
|
||||
|
||||
def _get_verbosity() -> bool:
|
||||
@@ -365,33 +366,6 @@ class BaseLanguageModel(
|
||||
"""
|
||||
return len(self.get_token_ids(text))
|
||||
|
||||
def get_num_tokens_from_messages(
|
||||
self,
|
||||
messages: list[BaseMessage],
|
||||
tools: Optional[Sequence] = None,
|
||||
) -> int:
|
||||
"""Get the number of tokens in the messages.
|
||||
|
||||
Useful for checking if an input fits in a model's context window.
|
||||
|
||||
**Note**: the base implementation of get_num_tokens_from_messages ignores
|
||||
tool schemas.
|
||||
|
||||
Args:
|
||||
messages: The message inputs to tokenize.
|
||||
tools: If provided, sequence of dict, BaseModel, function, or BaseTools
|
||||
to be converted to tool schemas.
|
||||
|
||||
Returns:
|
||||
The sum of the number of tokens across the messages.
|
||||
"""
|
||||
if tools is not None:
|
||||
warnings.warn(
|
||||
"Counting tokens in tool schemas is not yet supported. Ignoring tools.",
|
||||
stacklevel=2,
|
||||
)
|
||||
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
|
||||
|
||||
@classmethod
|
||||
def _all_required_field_names(cls) -> set:
|
||||
"""DEPRECATED: Kept for backwards compatibility.
|
||||
|
||||
@@ -55,10 +55,13 @@ from langchain_core.messages import (
|
||||
HumanMessage,
|
||||
convert_to_messages,
|
||||
convert_to_openai_image_block,
|
||||
get_buffer_string,
|
||||
is_data_content_block,
|
||||
message_chunk_to_message,
|
||||
)
|
||||
from langchain_core.messages import content_blocks as types
|
||||
from langchain_core.messages.ai import _LC_ID_PREFIX
|
||||
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
||||
from langchain_core.outputs import (
|
||||
ChatGeneration,
|
||||
ChatGenerationChunk,
|
||||
@@ -220,6 +223,23 @@ def _format_ls_structured_output(ls_structured_output_format: Optional[dict]) ->
|
||||
return ls_structured_output_format_dict
|
||||
|
||||
|
||||
def _convert_to_v1(message: AIMessage) -> AIMessageV1:
|
||||
"""Best-effort conversion of a V0 AIMessage to V1."""
|
||||
if isinstance(message.content, str):
|
||||
content: list[types.ContentBlock] = []
|
||||
if message.content:
|
||||
content = [{"type": "text", "text": message.content}]
|
||||
|
||||
for tool_call in message.tool_calls:
|
||||
content.append(tool_call)
|
||||
|
||||
return AIMessageV1(
|
||||
content=content,
|
||||
usage_metadata=message.usage_metadata,
|
||||
response_metadata=message.response_metadata,
|
||||
)
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
"""Base class for chat models.
|
||||
|
||||
@@ -328,6 +348,20 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
does not properly support streaming.
|
||||
"""
|
||||
|
||||
output_version: str = "v0"
|
||||
"""Version of AIMessage output format to use.
|
||||
|
||||
This field is used to roll-out new output formats for chat model AIMessages
|
||||
in a backwards-compatible way.
|
||||
|
||||
``'v1'`` standardizes output format using a list of typed ContentBlock dicts. We
|
||||
recommend this for new applications.
|
||||
|
||||
All chat models currently support the default of ``"v0"``.
|
||||
|
||||
.. versionadded:: 0.4
|
||||
"""
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def raise_deprecation(cls, values: dict) -> Any:
|
||||
@@ -1337,6 +1371,33 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
starter_dict["_type"] = self._llm_type
|
||||
return starter_dict
|
||||
|
||||
def get_num_tokens_from_messages(
|
||||
self,
|
||||
messages: list[BaseMessage],
|
||||
tools: Optional[Sequence] = None,
|
||||
) -> int:
|
||||
"""Get the number of tokens in the messages.
|
||||
|
||||
Useful for checking if an input fits in a model's context window.
|
||||
|
||||
**Note**: the base implementation of get_num_tokens_from_messages ignores
|
||||
tool schemas.
|
||||
|
||||
Args:
|
||||
messages: The message inputs to tokenize.
|
||||
tools: If provided, sequence of dict, BaseModel, function, or BaseTools
|
||||
to be converted to tool schemas.
|
||||
|
||||
Returns:
|
||||
The sum of the number of tokens across the messages.
|
||||
"""
|
||||
if tools is not None:
|
||||
warnings.warn(
|
||||
"Counting tokens in tool schemas is not yet supported. Ignoring tools.",
|
||||
stacklevel=2,
|
||||
)
|
||||
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
|
||||
|
||||
def bind_tools(
|
||||
self,
|
||||
tools: Sequence[
|
||||
@@ -1367,12 +1428,13 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
"""Model wrapper that returns outputs formatted to match the given schema.
|
||||
|
||||
Args:
|
||||
schema:
|
||||
The output schema. Can be passed in as:
|
||||
- an OpenAI function/tool schema,
|
||||
- a JSON Schema,
|
||||
- a TypedDict class,
|
||||
- or a Pydantic class.
|
||||
schema: The output schema. Can be passed in as:
|
||||
|
||||
- an OpenAI function/tool schema,
|
||||
- a JSON Schema,
|
||||
- a TypedDict class,
|
||||
- or a Pydantic class.
|
||||
|
||||
If ``schema`` is a Pydantic class then the model output will be a
|
||||
Pydantic instance of that class, and the model-generated fields will be
|
||||
validated by the Pydantic class. Otherwise the model output will be a
|
||||
@@ -1386,7 +1448,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
then both the raw model response (a BaseMessage) and the parsed model
|
||||
response will be returned. If an error occurs during output parsing it
|
||||
will be caught and returned as well. The final output is always a dict
|
||||
with keys "raw", "parsed", and "parsing_error".
|
||||
with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
|
||||
|
||||
Returns:
|
||||
A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.
|
||||
@@ -1397,9 +1459,10 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
||||
|
||||
If ``include_raw`` is True, then Runnable outputs a dict with keys:
|
||||
- ``"raw"``: BaseMessage
|
||||
- ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
|
||||
- ``"parsing_error"``: Optional[BaseException]
|
||||
|
||||
- ``'raw'``: BaseMessage
|
||||
- ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
|
||||
- ``'parsing_error'``: Optional[BaseException]
|
||||
|
||||
Example: Pydantic schema (include_raw=False):
|
||||
.. code-block:: python
|
||||
@@ -1465,6 +1528,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
.. versionchanged:: 0.2.26
|
||||
|
||||
Added support for TypedDict class.
|
||||
|
||||
""" # noqa: E501
|
||||
_ = kwargs.pop("method", None)
|
||||
_ = kwargs.pop("strict", None)
|
||||
|
||||
@@ -1418,6 +1418,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
||||
.. code-block:: python
|
||||
|
||||
llm.save(file_path="path/llm.yaml")
|
||||
|
||||
"""
|
||||
# Convert file to Path object.
|
||||
save_path = Path(file_path)
|
||||
|
||||
1
libs/core/langchain_core/language_models/v1/__init__.py
Normal file
1
libs/core/langchain_core/language_models/v1/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""LangChain v1.0 chat models."""
|
||||
942
libs/core/langchain_core/language_models/v1/chat_models.py
Normal file
942
libs/core/langchain_core/language_models/v1/chat_models.py
Normal file
@@ -0,0 +1,942 @@
|
||||
"""Chat models for conversational AI."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import typing
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import AsyncIterator, Iterator, Sequence
|
||||
from operator import itemgetter
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Literal,
|
||||
Optional,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
)
|
||||
from typing_extensions import override
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManager,
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain_core.language_models._utils import _normalize_messages
|
||||
from langchain_core.language_models.base import (
|
||||
BaseLanguageModel,
|
||||
LangSmithParams,
|
||||
LanguageModelInput,
|
||||
)
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
convert_to_openai_image_block,
|
||||
get_buffer_string,
|
||||
is_data_content_block,
|
||||
)
|
||||
from langchain_core.messages.utils import (
|
||||
_convert_from_v1_message,
|
||||
convert_to_messages_v1,
|
||||
)
|
||||
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
||||
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
|
||||
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
|
||||
from langchain_core.messages.v1 import MessageV1, add_ai_message_chunks
|
||||
from langchain_core.outputs import (
|
||||
ChatGeneration,
|
||||
ChatGenerationChunk,
|
||||
)
|
||||
from langchain_core.prompt_values import PromptValue
|
||||
from langchain_core.rate_limiters import BaseRateLimiter
|
||||
from langchain_core.runnables import RunnableMap, RunnablePassthrough
|
||||
from langchain_core.runnables.config import ensure_config, run_in_executor
|
||||
from langchain_core.tracers._streaming import _StreamingCallbackHandler
|
||||
from langchain_core.utils.function_calling import (
|
||||
convert_to_json_schema,
|
||||
convert_to_openai_tool,
|
||||
)
|
||||
from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.output_parsers.base import OutputParserLike
|
||||
from langchain_core.runnables import Runnable, RunnableConfig
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
|
||||
def _generate_response_from_error(error: BaseException) -> list[AIMessageV1]:
|
||||
if hasattr(error, "response"):
|
||||
response = error.response
|
||||
metadata: dict = {}
|
||||
if hasattr(response, "headers"):
|
||||
try:
|
||||
metadata["headers"] = dict(response.headers)
|
||||
except Exception:
|
||||
metadata["headers"] = None
|
||||
if hasattr(response, "status_code"):
|
||||
metadata["status_code"] = response.status_code
|
||||
if hasattr(error, "request_id"):
|
||||
metadata["request_id"] = error.request_id
|
||||
generations = [AIMessageV1(content=[], response_metadata=metadata)]
|
||||
else:
|
||||
generations = []
|
||||
|
||||
return generations
|
||||
|
||||
|
||||
def _format_for_tracing(messages: list[MessageV1]) -> list[MessageV1]:
|
||||
"""Format messages for tracing in on_chat_model_start.
|
||||
|
||||
- Update image content blocks to OpenAI Chat Completions format (backward
|
||||
compatibility).
|
||||
- Add "type" key to content blocks that have a single key.
|
||||
|
||||
Args:
|
||||
messages: List of messages to format.
|
||||
|
||||
Returns:
|
||||
List of messages formatted for tracing.
|
||||
"""
|
||||
messages_to_trace = []
|
||||
for message in messages:
|
||||
message_to_trace = message
|
||||
for idx, block in enumerate(message.content):
|
||||
# Update image content blocks to OpenAI # Chat Completions format.
|
||||
if (
|
||||
block["type"] == "image"
|
||||
and is_data_content_block(block)
|
||||
and block.get("source_type") != "id"
|
||||
):
|
||||
if message_to_trace is message:
|
||||
# Shallow copy
|
||||
message_to_trace = copy.copy(message)
|
||||
message_to_trace.content = list(message_to_trace.content)
|
||||
|
||||
message_to_trace.content[idx] = convert_to_openai_image_block(block)
|
||||
else:
|
||||
pass
|
||||
messages_to_trace.append(message_to_trace)
|
||||
|
||||
return messages_to_trace
|
||||
|
||||
|
||||
def generate_from_stream(stream: Iterator[AIMessageChunkV1]) -> AIMessageV1:
|
||||
"""Generate from a stream.
|
||||
|
||||
Args:
|
||||
stream: Iterator of AIMessageChunkV1.
|
||||
|
||||
Returns:
|
||||
AIMessageV1: aggregated message.
|
||||
"""
|
||||
generation = next(stream, None)
|
||||
if generation:
|
||||
generation += list(stream)
|
||||
if generation is None:
|
||||
msg = "No generations found in stream."
|
||||
raise ValueError(msg)
|
||||
return generation.to_message()
|
||||
|
||||
|
||||
async def agenerate_from_stream(
|
||||
stream: AsyncIterator[AIMessageChunkV1],
|
||||
) -> AIMessageV1:
|
||||
"""Async generate from a stream.
|
||||
|
||||
Args:
|
||||
stream: Iterator of AIMessageChunkV1.
|
||||
|
||||
Returns:
|
||||
AIMessageV1: aggregated message.
|
||||
"""
|
||||
chunks = [chunk async for chunk in stream]
|
||||
return await run_in_executor(None, generate_from_stream, iter(chunks))
|
||||
|
||||
|
||||
def _format_ls_structured_output(ls_structured_output_format: Optional[dict]) -> dict:
|
||||
if ls_structured_output_format:
|
||||
try:
|
||||
ls_structured_output_format_dict = {
|
||||
"ls_structured_output_format": {
|
||||
"kwargs": ls_structured_output_format.get("kwargs", {}),
|
||||
"schema": convert_to_json_schema(
|
||||
ls_structured_output_format["schema"]
|
||||
),
|
||||
}
|
||||
}
|
||||
except ValueError:
|
||||
ls_structured_output_format_dict = {}
|
||||
else:
|
||||
ls_structured_output_format_dict = {}
|
||||
|
||||
return ls_structured_output_format_dict
|
||||
|
||||
|
||||
class BaseChatModelV1(BaseLanguageModel[AIMessageV1], ABC):
|
||||
"""Base class for chat models.
|
||||
|
||||
Key imperative methods:
|
||||
Methods that actually call the underlying model.
|
||||
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| Method | Input | Output | Description |
|
||||
+===========================+================================================================+=====================================================================+==================================================================================================+
|
||||
| `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
| `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. |
|
||||
+---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
|
||||
|
||||
This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation.
|
||||
|
||||
Key declarative methods:
|
||||
Methods for creating another Runnable using the ChatModel.
|
||||
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| Method | Description |
|
||||
+==================================+===========================================================================================================+
|
||||
| `bind_tools` | Create ChatModel that can call tools. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_structured_output` | Create wrapper that structures model output using schema. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_retry` | Create wrapper that retries model calls on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `with_fallbacks` | Create wrapper that falls back to other models on failure. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
| `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. |
|
||||
+----------------------------------+-----------------------------------------------------------------------------------------------------------+
|
||||
|
||||
This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
|
||||
|
||||
Creating custom chat model:
|
||||
Custom chat model implementations should inherit from this class.
|
||||
Please reference the table below for information about which
|
||||
methods and properties are required or optional for implementations.
|
||||
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| Method/Property | Description | Required/Optional |
|
||||
+==================================+====================================================================+===================+
|
||||
| `_generate` | Use to generate a chat result from a prompt | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_stream` | Use to implement streaming | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_agenerate` | Use to implement a native async method | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
| `_astream` | Use to implement async version of `_stream` | Optional |
|
||||
+----------------------------------+--------------------------------------------------------------------+-------------------+
|
||||
|
||||
Follow the guide for more information on how to implement a custom Chat Model:
|
||||
[Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
rate_limiter: Optional[BaseRateLimiter] = Field(default=None, exclude=True)
|
||||
"An optional rate limiter to use for limiting the number of requests."
|
||||
|
||||
disable_streaming: Union[bool, Literal["tool_calling"]] = False
|
||||
"""Whether to disable streaming for this model.
|
||||
|
||||
If streaming is bypassed, then ``stream()``/``astream()``/``astream_events()`` will
|
||||
defer to ``invoke()``/``ainvoke()``.
|
||||
|
||||
- If True, will always bypass streaming case.
|
||||
- If ``'tool_calling'``, will bypass streaming case only when the model is called
|
||||
with a ``tools`` keyword argument. In other words, LangChain will automatically
|
||||
switch to non-streaming behavior (``invoke()``) only when the tools argument is
|
||||
provided. This offers the best of both worlds.
|
||||
- If False (default), will always use streaming case if available.
|
||||
|
||||
The main reason for this flag is that code might be written using ``.stream()`` and
|
||||
a user may want to swap out a given model for another model whose the implementation
|
||||
does not properly support streaming.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(
|
||||
arbitrary_types_allowed=True,
|
||||
)
|
||||
|
||||
# --- Runnable methods ---
|
||||
|
||||
@property
|
||||
@override
|
||||
def OutputType(self) -> Any:
|
||||
"""Get the output type for this runnable."""
|
||||
return AIMessageV1
|
||||
|
||||
def _convert_input(self, model_input: LanguageModelInput) -> list[MessageV1]:
|
||||
if isinstance(model_input, PromptValue):
|
||||
return model_input.to_messages(output_version="v1")
|
||||
if isinstance(model_input, str):
|
||||
return [HumanMessageV1(content=model_input)]
|
||||
if isinstance(model_input, Sequence):
|
||||
return convert_to_messages_v1(model_input)
|
||||
msg = (
|
||||
f"Invalid input type {type(model_input)}. "
|
||||
"Must be a PromptValue, str, or list of BaseMessages."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
def _should_stream(
|
||||
self,
|
||||
*,
|
||||
async_api: bool,
|
||||
run_manager: Optional[
|
||||
Union[CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun]
|
||||
] = None,
|
||||
**kwargs: Any,
|
||||
) -> bool:
|
||||
"""Determine if a given model call should hit the streaming API."""
|
||||
sync_not_implemented = type(self)._stream == BaseChatModelV1._stream # noqa: SLF001
|
||||
async_not_implemented = type(self)._astream == BaseChatModelV1._astream # noqa: SLF001
|
||||
|
||||
# Check if streaming is implemented.
|
||||
if (not async_api) and sync_not_implemented:
|
||||
return False
|
||||
# Note, since async falls back to sync we check both here.
|
||||
if async_api and async_not_implemented and sync_not_implemented:
|
||||
return False
|
||||
|
||||
# Check if streaming has been disabled on this instance.
|
||||
if self.disable_streaming is True:
|
||||
return False
|
||||
# We assume tools are passed in via "tools" kwarg in all models.
|
||||
if self.disable_streaming == "tool_calling" and kwargs.get("tools"):
|
||||
return False
|
||||
|
||||
# Check if a runtime streaming flag has been passed in.
|
||||
if "stream" in kwargs:
|
||||
return kwargs["stream"]
|
||||
|
||||
# Check if any streaming callback handlers have been passed in.
|
||||
handlers = run_manager.handlers if run_manager else []
|
||||
return any(isinstance(h, _StreamingCallbackHandler) for h in handlers)
|
||||
|
||||
@override
|
||||
def invoke(
|
||||
self,
|
||||
input: LanguageModelInput,
|
||||
config: Optional[RunnableConfig] = None,
|
||||
**kwargs: Any,
|
||||
) -> AIMessageV1:
|
||||
config = ensure_config(config)
|
||||
messages = self._convert_input(input)
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
"ls_structured_output_format", None
|
||||
) or kwargs.pop("structured_output_format", None)
|
||||
ls_structured_output_format_dict = _format_ls_structured_output(
|
||||
ls_structured_output_format
|
||||
)
|
||||
|
||||
params = self._get_invocation_params(**kwargs)
|
||||
options = {**kwargs, **ls_structured_output_format_dict}
|
||||
inheritable_metadata = {
|
||||
**(config.get("metadata") or {}),
|
||||
**self._get_ls_params(**kwargs),
|
||||
}
|
||||
callback_manager = CallbackManager.configure(
|
||||
config.get("callbacks"),
|
||||
self.callbacks,
|
||||
self.verbose,
|
||||
config.get("tags"),
|
||||
self.tags,
|
||||
inheritable_metadata,
|
||||
self.metadata,
|
||||
)
|
||||
(run_manager,) = callback_manager.on_chat_model_start(
|
||||
{},
|
||||
[_format_for_tracing(messages)],
|
||||
invocation_params=params,
|
||||
options=options,
|
||||
name=config.get("run_name"),
|
||||
run_id=config.pop("run_id", None),
|
||||
batch_size=1,
|
||||
)
|
||||
|
||||
if self.rate_limiter:
|
||||
self.rate_limiter.acquire(blocking=True)
|
||||
|
||||
input_messages = _normalize_messages(messages)
|
||||
|
||||
if self._should_stream(async_api=False, **kwargs):
|
||||
chunks: list[AIMessageChunkV1] = []
|
||||
try:
|
||||
for msg in self._stream(input_messages, **kwargs):
|
||||
run_manager.on_llm_new_token(msg)
|
||||
chunks.append(msg)
|
||||
except BaseException as e:
|
||||
run_manager.on_llm_error(e, response=_generate_response_from_error(e))
|
||||
raise
|
||||
msg = add_ai_message_chunks(chunks[0], *chunks[1:])
|
||||
else:
|
||||
try:
|
||||
msg = self._invoke(input_messages, **kwargs)
|
||||
except BaseException as e:
|
||||
run_manager.on_llm_error(e)
|
||||
raise
|
||||
|
||||
run_manager.on_llm_end(msg)
|
||||
return msg
|
||||
|
||||
@override
|
||||
async def ainvoke(
|
||||
self,
|
||||
input: LanguageModelInput,
|
||||
config: Optional[RunnableConfig] = None,
|
||||
**kwargs: Any,
|
||||
) -> AIMessage:
|
||||
config = ensure_config(config)
|
||||
messages = self._convert_input(input)
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
"ls_structured_output_format", None
|
||||
) or kwargs.pop("structured_output_format", None)
|
||||
ls_structured_output_format_dict = _format_ls_structured_output(
|
||||
ls_structured_output_format
|
||||
)
|
||||
|
||||
params = self._get_invocation_params(**kwargs)
|
||||
options = {**kwargs, **ls_structured_output_format_dict}
|
||||
inheritable_metadata = {
|
||||
**(config.get("metadata") or {}),
|
||||
**self._get_ls_params(**kwargs),
|
||||
}
|
||||
callback_manager = AsyncCallbackManager.configure(
|
||||
config.get("callbacks"),
|
||||
self.callbacks,
|
||||
self.verbose,
|
||||
config.get("tags"),
|
||||
self.tags,
|
||||
inheritable_metadata,
|
||||
self.metadata,
|
||||
)
|
||||
(run_manager,) = await callback_manager.on_chat_model_start(
|
||||
{},
|
||||
[_format_for_tracing(messages)],
|
||||
invocation_params=params,
|
||||
options=options,
|
||||
name=config.get("run_name"),
|
||||
run_id=config.pop("run_id", None),
|
||||
batch_size=1,
|
||||
)
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.aacquire(blocking=True)
|
||||
|
||||
# TODO: type openai image, audio, file types and permit in MessageV1
|
||||
input_messages = _normalize_messages(messages) # type: ignore[arg-type]
|
||||
|
||||
if self._should_stream(async_api=True, **kwargs):
|
||||
chunks: list[AIMessageChunkV1] = []
|
||||
try:
|
||||
async for msg in self._astream(input_messages, **kwargs):
|
||||
await run_manager.on_llm_new_token(msg)
|
||||
chunks.append(msg)
|
||||
except BaseException as e:
|
||||
await run_manager.on_llm_error(
|
||||
e, response=_generate_response_from_error(e)
|
||||
)
|
||||
raise
|
||||
msg = add_ai_message_chunks(chunks[0], *chunks[1:])
|
||||
else:
|
||||
try:
|
||||
msg = await self._ainvoke(input_messages, **kwargs)
|
||||
except BaseException as e:
|
||||
await run_manager.on_llm_error(
|
||||
e, response=_generate_response_from_error(e)
|
||||
)
|
||||
raise
|
||||
|
||||
await run_manager.on_llm_end(msg.to_message())
|
||||
return msg
|
||||
|
||||
@override
|
||||
def stream(
|
||||
self,
|
||||
input: LanguageModelInput,
|
||||
config: Optional[RunnableConfig] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[AIMessageChunkV1]:
|
||||
if not self._should_stream(async_api=False, **{**kwargs, "stream": True}):
|
||||
# model doesn't implement streaming, so use default implementation
|
||||
yield cast(
|
||||
"AIMessageChunkV1",
|
||||
self.invoke(input, config=config, **kwargs),
|
||||
)
|
||||
else:
|
||||
config = ensure_config(config)
|
||||
messages = self._convert_input(input)
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
"ls_structured_output_format", None
|
||||
) or kwargs.pop("structured_output_format", None)
|
||||
ls_structured_output_format_dict = _format_ls_structured_output(
|
||||
ls_structured_output_format
|
||||
)
|
||||
|
||||
params = self._get_invocation_params(**kwargs)
|
||||
options = {**kwargs, **ls_structured_output_format_dict}
|
||||
inheritable_metadata = {
|
||||
**(config.get("metadata") or {}),
|
||||
**self._get_ls_params(**kwargs),
|
||||
}
|
||||
callback_manager = CallbackManager.configure(
|
||||
config.get("callbacks"),
|
||||
self.callbacks,
|
||||
self.verbose,
|
||||
config.get("tags"),
|
||||
self.tags,
|
||||
inheritable_metadata,
|
||||
self.metadata,
|
||||
)
|
||||
(run_manager,) = callback_manager.on_chat_model_start(
|
||||
{},
|
||||
[_format_for_tracing(messages)],
|
||||
invocation_params=params,
|
||||
options=options,
|
||||
name=config.get("run_name"),
|
||||
run_id=config.pop("run_id", None),
|
||||
batch_size=1,
|
||||
)
|
||||
|
||||
chunks: list[AIMessageChunkV1] = []
|
||||
|
||||
if self.rate_limiter:
|
||||
self.rate_limiter.acquire(blocking=True)
|
||||
|
||||
try:
|
||||
# TODO: replace this with something for new messages
|
||||
input_messages = _normalize_messages(messages)
|
||||
for msg in self._stream(input_messages, **kwargs):
|
||||
run_manager.on_llm_new_token(msg)
|
||||
chunks.append(msg)
|
||||
yield msg
|
||||
except BaseException as e:
|
||||
run_manager.on_llm_error(e, response=_generate_response_from_error(e))
|
||||
raise
|
||||
|
||||
msg = add_ai_message_chunks(chunks[0], *chunks[1:])
|
||||
run_manager.on_llm_end(msg)
|
||||
|
||||
@override
|
||||
async def astream(
|
||||
self,
|
||||
input: LanguageModelInput,
|
||||
config: Optional[RunnableConfig] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[AIMessageChunkV1]:
|
||||
if not self._should_stream(async_api=True, **{**kwargs, "stream": True}):
|
||||
# No async or sync stream is implemented, so fall back to ainvoke
|
||||
yield cast(
|
||||
"AIMessageChunkV1",
|
||||
await self.ainvoke(input, config=config, **kwargs),
|
||||
)
|
||||
return
|
||||
|
||||
config = ensure_config(config)
|
||||
messages = self._convert_input(input)
|
||||
|
||||
ls_structured_output_format = kwargs.pop(
|
||||
"ls_structured_output_format", None
|
||||
) or kwargs.pop("structured_output_format", None)
|
||||
ls_structured_output_format_dict = _format_ls_structured_output(
|
||||
ls_structured_output_format
|
||||
)
|
||||
|
||||
params = self._get_invocation_params(**kwargs)
|
||||
options = {**kwargs, **ls_structured_output_format_dict}
|
||||
inheritable_metadata = {
|
||||
**(config.get("metadata") or {}),
|
||||
**self._get_ls_params(**kwargs),
|
||||
}
|
||||
callback_manager = AsyncCallbackManager.configure(
|
||||
config.get("callbacks"),
|
||||
self.callbacks,
|
||||
self.verbose,
|
||||
config.get("tags"),
|
||||
self.tags,
|
||||
inheritable_metadata,
|
||||
self.metadata,
|
||||
)
|
||||
(run_manager,) = await callback_manager.on_chat_model_start(
|
||||
{},
|
||||
[_format_for_tracing(messages)],
|
||||
invocation_params=params,
|
||||
options=options,
|
||||
name=config.get("run_name"),
|
||||
run_id=config.pop("run_id", None),
|
||||
batch_size=1,
|
||||
)
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.aacquire(blocking=True)
|
||||
|
||||
chunks: list[AIMessageChunkV1] = []
|
||||
|
||||
try:
|
||||
input_messages = _normalize_messages(messages)
|
||||
async for msg in self._astream(
|
||||
input_messages,
|
||||
**kwargs,
|
||||
):
|
||||
await run_manager.on_llm_new_token(msg)
|
||||
chunks.append(msg)
|
||||
yield msg
|
||||
except BaseException as e:
|
||||
await run_manager.on_llm_error(e, response=_generate_response_from_error(e))
|
||||
raise
|
||||
|
||||
msg = add_ai_message_chunks(chunks[0], *chunks[1:])
|
||||
await run_manager.on_llm_end(msg)
|
||||
|
||||
# --- Custom methods ---
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: list[Optional[dict]]) -> dict: # noqa: ARG002
|
||||
return {}
|
||||
|
||||
def _get_invocation_params(
|
||||
self,
|
||||
stop: Optional[list[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> dict:
|
||||
params = self.dict()
|
||||
params["stop"] = stop
|
||||
return {**params, **kwargs}
|
||||
|
||||
def _get_ls_params(
|
||||
self,
|
||||
stop: Optional[list[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> LangSmithParams:
|
||||
"""Get standard params for tracing."""
|
||||
# get default provider from class name
|
||||
default_provider = self.__class__.__name__
|
||||
if default_provider.startswith("Chat"):
|
||||
default_provider = default_provider[4:].lower()
|
||||
elif default_provider.endswith("Chat"):
|
||||
default_provider = default_provider[:-4]
|
||||
default_provider = default_provider.lower()
|
||||
|
||||
ls_params = LangSmithParams(ls_provider=default_provider, ls_model_type="chat")
|
||||
if stop:
|
||||
ls_params["ls_stop"] = stop
|
||||
|
||||
# model
|
||||
if hasattr(self, "model") and isinstance(self.model, str):
|
||||
ls_params["ls_model_name"] = self.model
|
||||
elif hasattr(self, "model_name") and isinstance(self.model_name, str):
|
||||
ls_params["ls_model_name"] = self.model_name
|
||||
|
||||
# temperature
|
||||
if "temperature" in kwargs and isinstance(kwargs["temperature"], float):
|
||||
ls_params["ls_temperature"] = kwargs["temperature"]
|
||||
elif hasattr(self, "temperature") and isinstance(self.temperature, float):
|
||||
ls_params["ls_temperature"] = self.temperature
|
||||
|
||||
# max_tokens
|
||||
if "max_tokens" in kwargs and isinstance(kwargs["max_tokens"], int):
|
||||
ls_params["ls_max_tokens"] = kwargs["max_tokens"]
|
||||
elif hasattr(self, "max_tokens") and isinstance(self.max_tokens, int):
|
||||
ls_params["ls_max_tokens"] = self.max_tokens
|
||||
|
||||
return ls_params
|
||||
|
||||
def _get_llm_string(self, stop: Optional[list[str]] = None, **kwargs: Any) -> str:
|
||||
params = self._get_invocation_params(stop=stop, **kwargs)
|
||||
params = {**params, **kwargs}
|
||||
return str(sorted(params.items()))
|
||||
|
||||
def _invoke(
|
||||
self,
|
||||
messages: list[MessageV1],
|
||||
**kwargs: Any,
|
||||
) -> AIMessage:
|
||||
raise NotImplementedError
|
||||
|
||||
async def _ainvoke(
|
||||
self,
|
||||
messages: list[MessageV1],
|
||||
**kwargs: Any,
|
||||
) -> AIMessage:
|
||||
return await run_in_executor(
|
||||
None,
|
||||
self._invoke,
|
||||
messages,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
messages: list[MessageV1],
|
||||
**kwargs: Any,
|
||||
) -> Iterator[AIMessageChunkV1]:
|
||||
raise NotImplementedError
|
||||
|
||||
async def _astream(
|
||||
self,
|
||||
messages: list[MessageV1],
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[AIMessageChunkV1]:
|
||||
iterator = await run_in_executor(
|
||||
None,
|
||||
self._stream,
|
||||
messages,
|
||||
**kwargs,
|
||||
)
|
||||
done = object()
|
||||
while True:
|
||||
item = await run_in_executor(
|
||||
None,
|
||||
next,
|
||||
iterator,
|
||||
done,
|
||||
)
|
||||
if item is done:
|
||||
break
|
||||
yield item # type: ignore[misc]
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of chat model."""
|
||||
|
||||
@override
|
||||
def dict(self, **kwargs: Any) -> dict:
|
||||
"""Return a dictionary of the LLM."""
|
||||
starter_dict = dict(self._identifying_params)
|
||||
starter_dict["_type"] = self._llm_type
|
||||
return starter_dict
|
||||
|
||||
def bind_tools(
|
||||
self,
|
||||
tools: Sequence[
|
||||
Union[typing.Dict[str, Any], type, Callable, BaseTool] # noqa: UP006
|
||||
],
|
||||
*,
|
||||
tool_choice: Optional[Union[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Runnable[LanguageModelInput, AIMessageV1]:
|
||||
"""Bind tools to the model.
|
||||
|
||||
Args:
|
||||
tools: Sequence of tools to bind to the model.
|
||||
tool_choice: The tool to use. If "any" then any tool can be used.
|
||||
|
||||
Returns:
|
||||
A Runnable that returns a message.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def with_structured_output(
|
||||
self,
|
||||
schema: Union[typing.Dict, type], # noqa: UP006
|
||||
*,
|
||||
include_raw: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> Runnable[LanguageModelInput, Union[typing.Dict, BaseModel]]: # noqa: UP006
|
||||
"""Model wrapper that returns outputs formatted to match the given schema.
|
||||
|
||||
Args:
|
||||
schema:
|
||||
The output schema. Can be passed in as:
|
||||
- an OpenAI function/tool schema,
|
||||
- a JSON Schema,
|
||||
- a TypedDict class,
|
||||
- or a Pydantic class.
|
||||
If ``schema`` is a Pydantic class then the model output will be a
|
||||
Pydantic instance of that class, and the model-generated fields will be
|
||||
validated by the Pydantic class. Otherwise the model output will be a
|
||||
dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`
|
||||
for more on how to properly specify types and descriptions of
|
||||
schema fields when specifying a Pydantic or TypedDict class.
|
||||
|
||||
include_raw:
|
||||
If False then only the parsed structured output is returned. If
|
||||
an error occurs during model output parsing it will be raised. If True
|
||||
then both the raw model response (a BaseMessage) and the parsed model
|
||||
response will be returned. If an error occurs during output parsing it
|
||||
will be caught and returned as well. The final output is always a dict
|
||||
with keys "raw", "parsed", and "parsing_error".
|
||||
|
||||
Returns:
|
||||
A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.
|
||||
|
||||
If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
|
||||
an instance of ``schema`` (i.e., a Pydantic object).
|
||||
|
||||
Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
||||
|
||||
If ``include_raw`` is True, then Runnable outputs a dict with keys:
|
||||
- ``"raw"``: BaseMessage
|
||||
- ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
|
||||
- ``"parsing_error"``: Optional[BaseException]
|
||||
|
||||
Example: Pydantic schema (include_raw=False):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
||||
|
||||
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
|
||||
|
||||
# -> AnswerWithJustification(
|
||||
# answer='They weigh the same',
|
||||
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
||||
# )
|
||||
|
||||
Example: Pydantic schema (include_raw=True):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)
|
||||
|
||||
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
|
||||
# -> {
|
||||
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
|
||||
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
|
||||
# 'parsing_error': None
|
||||
# }
|
||||
|
||||
Example: Dict schema (include_raw=False):
|
||||
.. code-block:: python
|
||||
|
||||
from pydantic import BaseModel
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
|
||||
class AnswerWithJustification(BaseModel):
|
||||
'''An answer to the user question along with justification for the answer.'''
|
||||
answer: str
|
||||
justification: str
|
||||
|
||||
dict_schema = convert_to_openai_tool(AnswerWithJustification)
|
||||
llm = ChatModel(model="model-name", temperature=0)
|
||||
structured_llm = llm.with_structured_output(dict_schema)
|
||||
|
||||
structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
|
||||
# -> {
|
||||
# 'answer': 'They weigh the same',
|
||||
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
||||
# }
|
||||
|
||||
.. versionchanged:: 0.2.26
|
||||
|
||||
Added support for TypedDict class.
|
||||
""" # noqa: E501
|
||||
_ = kwargs.pop("method", None)
|
||||
_ = kwargs.pop("strict", None)
|
||||
if kwargs:
|
||||
msg = f"Received unsupported arguments {kwargs}"
|
||||
raise ValueError(msg)
|
||||
|
||||
from langchain_core.output_parsers.openai_tools import (
|
||||
JsonOutputKeyToolsParser,
|
||||
PydanticToolsParser,
|
||||
)
|
||||
|
||||
if type(self).bind_tools is BaseChatModelV1.bind_tools:
|
||||
msg = "with_structured_output is not implemented for this model."
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
llm = self.bind_tools(
|
||||
[schema],
|
||||
tool_choice="any",
|
||||
ls_structured_output_format={
|
||||
"kwargs": {"method": "function_calling"},
|
||||
"schema": schema,
|
||||
},
|
||||
)
|
||||
if isinstance(schema, type) and is_basemodel_subclass(schema):
|
||||
output_parser: OutputParserLike = PydanticToolsParser(
|
||||
tools=[cast("TypeBaseModel", schema)], first_tool_only=True
|
||||
)
|
||||
else:
|
||||
key_name = convert_to_openai_tool(schema)["function"]["name"]
|
||||
output_parser = JsonOutputKeyToolsParser(
|
||||
key_name=key_name, first_tool_only=True
|
||||
)
|
||||
if include_raw:
|
||||
parser_assign = RunnablePassthrough.assign(
|
||||
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
|
||||
)
|
||||
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
|
||||
parser_with_fallback = parser_assign.with_fallbacks(
|
||||
[parser_none], exception_key="parsing_error"
|
||||
)
|
||||
return RunnableMap(raw=llm) | parser_with_fallback
|
||||
return llm | output_parser
|
||||
|
||||
def get_num_tokens_from_messages(
|
||||
self,
|
||||
messages: list[MessageV1],
|
||||
tools: Optional[Sequence] = None,
|
||||
) -> int:
|
||||
"""Get the number of tokens in the messages.
|
||||
|
||||
Useful for checking if an input fits in a model's context window.
|
||||
|
||||
**Note**: the base implementation of get_num_tokens_from_messages ignores
|
||||
tool schemas.
|
||||
|
||||
Args:
|
||||
messages: The message inputs to tokenize.
|
||||
tools: If provided, sequence of dict, BaseModel, function, or BaseTools
|
||||
to be converted to tool schemas.
|
||||
|
||||
Returns:
|
||||
The sum of the number of tokens across the messages.
|
||||
"""
|
||||
messages = [_convert_from_v1_message(message) for message in messages]
|
||||
if tools is not None:
|
||||
warnings.warn(
|
||||
"Counting tokens in tool schemas is not yet supported. Ignoring tools.",
|
||||
stacklevel=2,
|
||||
)
|
||||
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
|
||||
|
||||
|
||||
def _gen_info_and_msg_metadata(
|
||||
generation: Union[ChatGeneration, ChatGenerationChunk],
|
||||
) -> dict:
|
||||
return {
|
||||
**(generation.generation_info or {}),
|
||||
**generation.message.response_metadata,
|
||||
}
|
||||
@@ -53,6 +53,7 @@ class BaseMemory(Serializable, ABC):
|
||||
|
||||
def clear(self) -> None:
|
||||
pass
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
model_config = ConfigDict(
|
||||
|
||||
@@ -33,6 +33,24 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
|
||||
from langchain_core.messages.content_blocks import (
|
||||
Annotation,
|
||||
AudioContentBlock,
|
||||
Citation,
|
||||
CodeInterpreterCall,
|
||||
CodeInterpreterOutput,
|
||||
CodeInterpreterResult,
|
||||
ContentBlock,
|
||||
DataContentBlock,
|
||||
FileContentBlock,
|
||||
ImageContentBlock,
|
||||
NonStandardAnnotation,
|
||||
NonStandardContentBlock,
|
||||
PlainTextContentBlock,
|
||||
ReasoningContentBlock,
|
||||
TextContentBlock,
|
||||
VideoContentBlock,
|
||||
WebSearchCall,
|
||||
WebSearchResult,
|
||||
convert_to_openai_data_block,
|
||||
convert_to_openai_image_block,
|
||||
is_data_content_block,
|
||||
@@ -65,24 +83,42 @@ if TYPE_CHECKING:
|
||||
__all__ = (
|
||||
"AIMessage",
|
||||
"AIMessageChunk",
|
||||
"Annotation",
|
||||
"AnyMessage",
|
||||
"AudioContentBlock",
|
||||
"BaseMessage",
|
||||
"BaseMessageChunk",
|
||||
"ChatMessage",
|
||||
"ChatMessageChunk",
|
||||
"Citation",
|
||||
"CodeInterpreterCall",
|
||||
"CodeInterpreterOutput",
|
||||
"CodeInterpreterResult",
|
||||
"ContentBlock",
|
||||
"DataContentBlock",
|
||||
"FileContentBlock",
|
||||
"FunctionMessage",
|
||||
"FunctionMessageChunk",
|
||||
"HumanMessage",
|
||||
"HumanMessageChunk",
|
||||
"ImageContentBlock",
|
||||
"InvalidToolCall",
|
||||
"MessageLikeRepresentation",
|
||||
"NonStandardAnnotation",
|
||||
"NonStandardContentBlock",
|
||||
"PlainTextContentBlock",
|
||||
"ReasoningContentBlock",
|
||||
"RemoveMessage",
|
||||
"SystemMessage",
|
||||
"SystemMessageChunk",
|
||||
"TextContentBlock",
|
||||
"ToolCall",
|
||||
"ToolCallChunk",
|
||||
"ToolMessage",
|
||||
"ToolMessageChunk",
|
||||
"VideoContentBlock",
|
||||
"WebSearchCall",
|
||||
"WebSearchResult",
|
||||
"_message_from_dict",
|
||||
"convert_to_messages",
|
||||
"convert_to_openai_data_block",
|
||||
@@ -103,25 +139,43 @@ __all__ = (
|
||||
_dynamic_imports = {
|
||||
"AIMessage": "ai",
|
||||
"AIMessageChunk": "ai",
|
||||
"Annotation": "content_blocks",
|
||||
"AudioContentBlock": "content_blocks",
|
||||
"BaseMessage": "base",
|
||||
"BaseMessageChunk": "base",
|
||||
"merge_content": "base",
|
||||
"message_to_dict": "base",
|
||||
"messages_to_dict": "base",
|
||||
"Citation": "content_blocks",
|
||||
"ContentBlock": "content_blocks",
|
||||
"ChatMessage": "chat",
|
||||
"ChatMessageChunk": "chat",
|
||||
"CodeInterpreterCall": "content_blocks",
|
||||
"CodeInterpreterOutput": "content_blocks",
|
||||
"CodeInterpreterResult": "content_blocks",
|
||||
"DataContentBlock": "content_blocks",
|
||||
"FileContentBlock": "content_blocks",
|
||||
"FunctionMessage": "function",
|
||||
"FunctionMessageChunk": "function",
|
||||
"HumanMessage": "human",
|
||||
"HumanMessageChunk": "human",
|
||||
"NonStandardAnnotation": "content_blocks",
|
||||
"NonStandardContentBlock": "content_blocks",
|
||||
"PlainTextContentBlock": "content_blocks",
|
||||
"ReasoningContentBlock": "content_blocks",
|
||||
"RemoveMessage": "modifier",
|
||||
"SystemMessage": "system",
|
||||
"SystemMessageChunk": "system",
|
||||
"WebSearchCall": "content_blocks",
|
||||
"WebSearchResult": "content_blocks",
|
||||
"ImageContentBlock": "content_blocks",
|
||||
"InvalidToolCall": "tool",
|
||||
"TextContentBlock": "content_blocks",
|
||||
"ToolCall": "tool",
|
||||
"ToolCallChunk": "tool",
|
||||
"ToolMessage": "tool",
|
||||
"ToolMessageChunk": "tool",
|
||||
"VideoContentBlock": "content_blocks",
|
||||
"AnyMessage": "utils",
|
||||
"MessageLikeRepresentation": "utils",
|
||||
"_message_from_dict": "utils",
|
||||
|
||||
@@ -57,6 +57,7 @@ class InputTokenDetails(TypedDict, total=False):
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
|
||||
"""
|
||||
|
||||
audio: int
|
||||
@@ -89,6 +90,7 @@ class OutputTokenDetails(TypedDict, total=False):
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
"""
|
||||
|
||||
audio: int
|
||||
@@ -128,6 +130,7 @@ class UsageMetadata(TypedDict):
|
||||
.. versionchanged:: 0.3.9
|
||||
|
||||
Added ``input_token_details`` and ``output_token_details``.
|
||||
|
||||
"""
|
||||
|
||||
input_tokens: int
|
||||
|
||||
@@ -1,110 +1,782 @@
|
||||
"""Types for content blocks."""
|
||||
"""Standard, multimodal content blocks for Large Language Model I/O.
|
||||
|
||||
.. warning::
|
||||
This module is under active development. The API is unstable and subject to
|
||||
change in future releases.
|
||||
|
||||
This module provides a standardized data structure for representing inputs to and
|
||||
outputs from Large Language Models. The core abstraction is the **Content Block**, a
|
||||
``TypedDict`` that can represent a piece of text, an image, a tool call, or other
|
||||
structured data.
|
||||
|
||||
Data **not yet mapped** to a standard block may be represented using the
|
||||
``NonStandardContentBlock``, which allows for provider-specific data to be included
|
||||
without losing the benefits of type checking and validation.
|
||||
|
||||
Furthermore, provider-specific fields *within* a standard block will be allowed as extra
|
||||
keys on the TypedDict per `PEP 728 <https://peps.python.org/pep-0728/>`__. This allows
|
||||
for flexibility in the data structure while maintaining a consistent interface.
|
||||
|
||||
**Example using ``extra_items=Any``:**
|
||||
|
||||
.. code-block:: python
|
||||
from langchain_core.messages.content_blocks import TextContentBlock
|
||||
from typing import Any
|
||||
|
||||
my_block: TextContentBlock = {
|
||||
"type": "text",
|
||||
"text": "Hello, world!",
|
||||
"extra_field": "This is allowed",
|
||||
"another_field": 42, # Any type is allowed
|
||||
}
|
||||
|
||||
# A type checker that supports PEP 728 would validate the object above.
|
||||
# Accessing the provider-specific key is possible, and its type is 'Any'.
|
||||
block_extra_field = my_block["extra_field"]
|
||||
|
||||
.. warning::
|
||||
Type checkers such as MyPy do not yet support `PEP 728 <https://peps.python.org/pep-0728/>`__,
|
||||
so you may see type errors when using provider-specific fields. These are safe to
|
||||
ignore, as the fields are still validated at runtime.
|
||||
|
||||
**Rationale**
|
||||
|
||||
Different LLM providers use distinct and incompatible API schemas. This module
|
||||
introduces a unified, provider-agnostic format to standardize these interactions. A
|
||||
message to or from a model is simply a `list` of `ContentBlock` objects, allowing for
|
||||
the natural interleaving of text, images, and other content in a single, ordered
|
||||
sequence.
|
||||
|
||||
An adapter for a specific provider is responsible for translating this standard list of
|
||||
blocks into the format required by its API.
|
||||
|
||||
**Key Block Types**
|
||||
|
||||
The module defines several types of content blocks, including:
|
||||
|
||||
- **``TextContentBlock``**: Standard text.
|
||||
- **``ImageContentBlock``**, **``AudioContentBlock``**, **``VideoContentBlock``**: For
|
||||
multimodal data.
|
||||
- **``ToolCallContentBlock``**, **``ToolOutputContentBlock``**: For function calling.
|
||||
- **``ReasoningContentBlock``**: To capture a model's thought process.
|
||||
- **``Citation``**: For annotations that link generated text to a source document.
|
||||
|
||||
**Example Usage**
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages.content_blocks import TextContentBlock, ImageContentBlock
|
||||
|
||||
multimodal_message: AIMessage = [
|
||||
TextContentBlock(type="text", text="What is shown in this image?"),
|
||||
ImageContentBlock(
|
||||
type="image",
|
||||
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
|
||||
mime_type="image/png",
|
||||
),
|
||||
]
|
||||
""" # noqa: E501
|
||||
|
||||
import warnings
|
||||
from typing import Any, Literal, Union
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
from pydantic import TypeAdapter, ValidationError
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
from typing_extensions import NotRequired, TypedDict, get_args, get_origin
|
||||
|
||||
# --- Text and annotations ---
|
||||
|
||||
|
||||
class BaseDataContentBlock(TypedDict, total=False):
|
||||
"""Base class for data content blocks."""
|
||||
class Citation(TypedDict):
|
||||
"""Annotation for citing data from a document.
|
||||
|
||||
.. note::
|
||||
``start/end`` indices refer to the **response text**,
|
||||
not the source text. This means that the indices are relative to the model's
|
||||
response, not the original document (as specified in the ``url``).
|
||||
"""
|
||||
|
||||
type: Literal["citation"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
url: NotRequired[str]
|
||||
"""URL of the document source."""
|
||||
|
||||
# For future consideration, if needed:
|
||||
# provenance: NotRequired[str]
|
||||
# """Provenance of the document, e.g., "Wikipedia", "arXiv", etc.
|
||||
|
||||
# Included for future compatibility; not currently implemented.
|
||||
# """
|
||||
|
||||
title: NotRequired[str]
|
||||
"""Source document title.
|
||||
|
||||
For example, the page title for a web page or the title of a paper.
|
||||
"""
|
||||
|
||||
start_index: NotRequired[int]
|
||||
"""Start index of the **response text** (``TextContentBlock.text``) for which the
|
||||
annotation applies."""
|
||||
|
||||
end_index: NotRequired[int]
|
||||
"""End index of the **response text** (``TextContentBlock.text``) for which the
|
||||
annotation applies."""
|
||||
|
||||
cited_text: NotRequired[str]
|
||||
"""Excerpt of source text being cited."""
|
||||
|
||||
# NOTE: not including spans for the raw document text (such as `text_start_index`
|
||||
# and `text_end_index`) as this is not currently supported by any provider. The
|
||||
# thinking is that the `cited_text` should be sufficient for most use cases, and it
|
||||
# is difficult to reliably extract spans from the raw document text across file
|
||||
# formats or encoding schemes.
|
||||
|
||||
|
||||
class NonStandardAnnotation(TypedDict):
|
||||
"""Provider-specific annotation format."""
|
||||
|
||||
type: Literal["non_standard_annotation"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
value: dict[str, Any]
|
||||
"""Provider-specific annotation data."""
|
||||
|
||||
|
||||
Annotation = Union[Citation, NonStandardAnnotation]
|
||||
|
||||
|
||||
class TextContentBlock(TypedDict):
|
||||
"""Content block for text output.
|
||||
|
||||
This typically represents the main text content of a message, such as the response
|
||||
from a language model or the text of a user message.
|
||||
"""
|
||||
|
||||
type: Literal["text"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
text: str
|
||||
"""Block text."""
|
||||
|
||||
annotations: NotRequired[list[Annotation]]
|
||||
"""Citations and other annotations."""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
|
||||
# --- Tool calls ---
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"name": "foo",
|
||||
"args": {"a": 1},
|
||||
"id": "123"
|
||||
}
|
||||
|
||||
This represents a request to call the tool named "foo" with arguments {"a": 1}
|
||||
and an identifier of "123".
|
||||
"""
|
||||
|
||||
name: str
|
||||
"""The name of the tool to be called."""
|
||||
args: dict[str, Any]
|
||||
"""The arguments to the tool call."""
|
||||
id: Optional[str]
|
||||
"""An identifier associated with the tool call.
|
||||
|
||||
An identifier is needed to associate a tool call request with a tool
|
||||
call result in events when multiple concurrent tool calls are made.
|
||||
"""
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
type: Literal["tool_call"]
|
||||
|
||||
|
||||
class InvalidToolCall(TypedDict):
|
||||
"""Allowance for errors made by LLM.
|
||||
|
||||
Here we add an `error` key to surface errors made during generation
|
||||
(e.g., invalid JSON arguments.)
|
||||
"""
|
||||
|
||||
name: Optional[str]
|
||||
"""The name of the tool to be called."""
|
||||
args: Optional[str]
|
||||
"""The arguments to the tool call."""
|
||||
id: Optional[str]
|
||||
"""An identifier associated with the tool call."""
|
||||
error: Optional[str]
|
||||
"""An error message associated with the tool call."""
|
||||
type: Literal["invalid_tool_call"]
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
|
||||
When merging ToolCallChunks (e.g., via AIMessageChunk.__add__),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
values of `index` are equal and not None.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
|
||||
"""
|
||||
|
||||
name: Optional[str]
|
||||
"""The name of the tool to be called."""
|
||||
args: Optional[str]
|
||||
"""The arguments to the tool call."""
|
||||
id: Optional[str]
|
||||
"""An identifier associated with the tool call."""
|
||||
index: Optional[int]
|
||||
"""The index of the tool call in a sequence."""
|
||||
type: NotRequired[Literal["tool_call_chunk"]]
|
||||
|
||||
|
||||
# --- Provider tool calls (built-in tools) ---
|
||||
# Note: These are not standard tool calls, but rather provider-specific built-in tools.
|
||||
|
||||
|
||||
# Web search
|
||||
class WebSearchCall(TypedDict):
|
||||
"""Content block for a built-in web search tool call."""
|
||||
|
||||
type: Literal["web_search_call"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
query: NotRequired[str]
|
||||
"""The search query used in the web search tool call."""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
|
||||
class WebSearchResult(TypedDict):
|
||||
"""Content block for the result of a built-in web search tool call."""
|
||||
|
||||
type: Literal["web_search_result"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
urls: NotRequired[list[str]]
|
||||
"""List of URLs returned by the web search tool call."""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
|
||||
# Code interpreter
|
||||
|
||||
|
||||
# Call
|
||||
class CodeInterpreterCall(TypedDict):
|
||||
"""Content block for a built-in code interpreter tool call."""
|
||||
|
||||
type: Literal["code_interpreter_call"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
language: NotRequired[str]
|
||||
"""The programming language used in the code interpreter tool call."""
|
||||
|
||||
code: NotRequired[str]
|
||||
"""The code to be executed by the code interpreter."""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
|
||||
# Result block is CodeInterpreterResult
|
||||
class CodeInterpreterOutput(TypedDict):
|
||||
"""Content block for the output of a singular code interpreter tool call.
|
||||
|
||||
Full output of a code interpreter tool call is represented by
|
||||
``CodeInterpreterResult`` which is a list of these blocks.
|
||||
"""
|
||||
|
||||
type: Literal["code_interpreter_output"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
return_code: NotRequired[int]
|
||||
"""Return code of the executed code.
|
||||
|
||||
Example: 0 for success, non-zero for failure.
|
||||
"""
|
||||
|
||||
stderr: NotRequired[str]
|
||||
"""Standard error output of the executed code."""
|
||||
|
||||
stdout: NotRequired[str]
|
||||
"""Standard output of the executed code."""
|
||||
|
||||
file_ids: NotRequired[list[str]]
|
||||
"""List of file IDs generated by the code interpreter."""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
|
||||
class CodeInterpreterResult(TypedDict):
|
||||
"""Content block for the result of a code interpreter tool call."""
|
||||
|
||||
type: Literal["code_interpreter_result"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
output: list[CodeInterpreterOutput]
|
||||
"""List of outputs from the code interpreter tool call."""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
|
||||
# --- Reasoning ---
|
||||
class ReasoningContentBlock(TypedDict):
|
||||
"""Content block for reasoning output."""
|
||||
|
||||
type: Literal["reasoning"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
reasoning: NotRequired[str]
|
||||
"""Reasoning text.
|
||||
|
||||
Either the thought summary or the raw reasoning text itself. This is often parsed
|
||||
from ``<think>`` tags in the model's response.
|
||||
"""
|
||||
|
||||
thought_signature: NotRequired[str]
|
||||
"""Opaque state handle representation of the model's internal thought process.
|
||||
|
||||
Maintains the context of the model's thinking across multiple interactions
|
||||
(e.g. multi-turn conversations) since many APIs are stateless.
|
||||
|
||||
Not to be used to verify authenticity or integrity of the response (`'signature'`).
|
||||
|
||||
Examples:
|
||||
- https://ai.google.dev/gemini-api/docs/thinking#signatures
|
||||
"""
|
||||
|
||||
signature: NotRequired[str]
|
||||
"""Signature of the reasoning content block used to verify **authenticity**.
|
||||
|
||||
Prevents from modifying or fabricating the model's reasoning process.
|
||||
|
||||
Examples:
|
||||
- https://docs.anthropic.com/en/docs/build-with-claude/context-windows#the-context-window-with-extended-thinking-and-tool-use
|
||||
"""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
|
||||
# --- Multi-modal ---
|
||||
|
||||
|
||||
# Note: `title` and `context` are fields that could be used to provide additional
|
||||
# information about the file, such as a description or summary of its content.
|
||||
# E.g. with Claude, you can provide a context for a file which is passed to the model.
|
||||
class ImageContentBlock(TypedDict):
|
||||
"""Content block for image data."""
|
||||
|
||||
type: Literal["image"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
file_id: NotRequired[str]
|
||||
"""ID of the image file, e.g., from a file storage system."""
|
||||
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the content block (if needed)."""
|
||||
"""MIME type of the image. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#image>`__
|
||||
"""
|
||||
|
||||
class URLContentBlock(BaseDataContentBlock):
|
||||
"""Content block for data from a URL."""
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
type: Literal["image", "audio", "file"]
|
||||
"""Type of the content block."""
|
||||
source_type: Literal["url"]
|
||||
"""Source type (url)."""
|
||||
url: str
|
||||
"""URL for data."""
|
||||
url: NotRequired[str]
|
||||
"""URL of the image."""
|
||||
|
||||
|
||||
class Base64ContentBlock(BaseDataContentBlock):
|
||||
"""Content block for inline data from a base64 string."""
|
||||
|
||||
type: Literal["image", "audio", "file"]
|
||||
"""Type of the content block."""
|
||||
source_type: Literal["base64"]
|
||||
"""Source type (base64)."""
|
||||
data: str
|
||||
base64: NotRequired[str]
|
||||
"""Data as a base64 string."""
|
||||
|
||||
# title: NotRequired[str]
|
||||
# """Title of the image."""
|
||||
|
||||
class PlainTextContentBlock(BaseDataContentBlock):
|
||||
"""Content block for plain text data (e.g., from a document)."""
|
||||
# context: NotRequired[str]
|
||||
# """Context for the image, e.g., a description or summary of the image's content.""" # noqa: E501
|
||||
|
||||
|
||||
class VideoContentBlock(TypedDict):
|
||||
"""Content block for video data."""
|
||||
|
||||
type: Literal["video"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
file_id: NotRequired[str]
|
||||
"""ID of the video file, e.g., from a file storage system."""
|
||||
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the video. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#video>`__
|
||||
"""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
url: NotRequired[str]
|
||||
"""URL of the video."""
|
||||
|
||||
base64: NotRequired[str]
|
||||
"""Data as a base64 string."""
|
||||
|
||||
# title: NotRequired[str]
|
||||
# """Title of the video."""
|
||||
|
||||
# context: NotRequired[str]
|
||||
# """Context for the video, e.g., a description or summary of the video's content.""" # noqa: E501
|
||||
|
||||
|
||||
class AudioContentBlock(TypedDict):
|
||||
"""Content block for audio data."""
|
||||
|
||||
type: Literal["audio"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
file_id: NotRequired[str]
|
||||
"""ID of the audio file, e.g., from a file storage system."""
|
||||
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the audio. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml#audio>`__
|
||||
"""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
url: NotRequired[str]
|
||||
"""URL of the audio."""
|
||||
|
||||
base64: NotRequired[str]
|
||||
"""Data as a base64 string."""
|
||||
|
||||
# title: NotRequired[str]
|
||||
# """Title of the audio."""
|
||||
|
||||
# context: NotRequired[str]
|
||||
# """Context for the audio, e.g., a description or summary of the audio's content.""" # noqa: E501
|
||||
|
||||
|
||||
class PlainTextContentBlock(TypedDict):
|
||||
"""Content block for plaintext data (e.g., from a document).
|
||||
|
||||
.. note::
|
||||
Title and context are optional fields that may be passed to the model. See
|
||||
Anthropic `example <https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content>`__.
|
||||
"""
|
||||
|
||||
type: Literal["text-plain"]
|
||||
"""Type of the content block."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
file_id: NotRequired[str]
|
||||
"""ID of the plaintext file, e.g., from a file storage system."""
|
||||
|
||||
mime_type: Literal["text/plain"]
|
||||
"""MIME type of the file. Required for base64."""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
url: NotRequired[str]
|
||||
"""URL of the plaintext."""
|
||||
|
||||
base64: NotRequired[str]
|
||||
"""Data as a base64 string."""
|
||||
|
||||
text: NotRequired[str]
|
||||
"""Plaintext content. This is optional if the data is provided as base64."""
|
||||
|
||||
title: NotRequired[str]
|
||||
"""Title of the text data, e.g., the title of a document."""
|
||||
|
||||
context: NotRequired[str]
|
||||
"""Context for the text, e.g., a description or summary of the text's content."""
|
||||
|
||||
|
||||
class FileContentBlock(TypedDict):
|
||||
"""Content block for file data.
|
||||
|
||||
This block is intended for files that are not images, audio, or plaintext. For
|
||||
example, it can be used for PDFs, Word documents, etc.
|
||||
|
||||
If the file is an image, audio, or plaintext, you should use the corresponding
|
||||
content block type (e.g., ``ImageContentBlock``, ``AudioContentBlock``,
|
||||
``PlainTextContentBlock``).
|
||||
"""
|
||||
|
||||
type: Literal["file"]
|
||||
"""Type of the content block."""
|
||||
source_type: Literal["text"]
|
||||
"""Source type (text)."""
|
||||
text: str
|
||||
"""Text data."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
file_id: NotRequired[str]
|
||||
"""ID of the file, e.g., from a file storage system."""
|
||||
|
||||
mime_type: NotRequired[str]
|
||||
"""MIME type of the file. Required for base64.
|
||||
|
||||
`Examples from IANA <https://www.iana.org/assignments/media-types/media-types.xhtml>`__
|
||||
"""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
url: NotRequired[str]
|
||||
"""URL of the file."""
|
||||
|
||||
base64: NotRequired[str]
|
||||
"""Data as a base64 string."""
|
||||
|
||||
# title: NotRequired[str]
|
||||
# """Title of the file, e.g., the name of a document or file."""
|
||||
|
||||
# context: NotRequired[str]
|
||||
# """Context for the file, e.g., a description or summary of the file's content."""
|
||||
|
||||
|
||||
class IDContentBlock(TypedDict):
|
||||
"""Content block for data specified by an identifier."""
|
||||
# Future modalities to consider:
|
||||
# - 3D models
|
||||
# - Tabular data
|
||||
|
||||
type: Literal["image", "audio", "file"]
|
||||
|
||||
# Non-standard
|
||||
class NonStandardContentBlock(TypedDict):
|
||||
"""Content block provider-specific data.
|
||||
|
||||
This block contains data for which there is not yet a standard type.
|
||||
|
||||
The purpose of this block should be to simply hold a provider-specific payload.
|
||||
If a provider's non-standard output includes reasoning and tool calls, it should be
|
||||
the adapter's job to parse that payload and emit the corresponding standard
|
||||
ReasoningContentBlock and ToolCallContentBlocks.
|
||||
"""
|
||||
|
||||
type: Literal["non_standard"]
|
||||
"""Type of the content block."""
|
||||
source_type: Literal["id"]
|
||||
"""Source type (id)."""
|
||||
id: str
|
||||
"""Identifier for data source."""
|
||||
|
||||
id: NotRequired[str]
|
||||
"""Content block identifier. Either:
|
||||
|
||||
- Generated by the provider (e.g., OpenAI's file ID)
|
||||
- Generated by LangChain upon creation (as ``UUID4``)
|
||||
"""
|
||||
|
||||
value: dict[str, Any]
|
||||
"""Provider-specific data."""
|
||||
|
||||
index: NotRequired[int]
|
||||
"""Index of block in aggregate response. Used during streaming."""
|
||||
|
||||
|
||||
# --- Aliases ---
|
||||
DataContentBlock = Union[
|
||||
URLContentBlock,
|
||||
Base64ContentBlock,
|
||||
ImageContentBlock,
|
||||
VideoContentBlock,
|
||||
AudioContentBlock,
|
||||
PlainTextContentBlock,
|
||||
IDContentBlock,
|
||||
FileContentBlock,
|
||||
]
|
||||
|
||||
_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock)
|
||||
ToolContentBlock = Union[
|
||||
ToolCall,
|
||||
CodeInterpreterCall,
|
||||
CodeInterpreterOutput,
|
||||
CodeInterpreterResult,
|
||||
WebSearchCall,
|
||||
WebSearchResult,
|
||||
]
|
||||
|
||||
ContentBlock = Union[
|
||||
TextContentBlock,
|
||||
ToolCall,
|
||||
ReasoningContentBlock,
|
||||
NonStandardContentBlock,
|
||||
DataContentBlock,
|
||||
ToolContentBlock,
|
||||
]
|
||||
|
||||
|
||||
def is_data_content_block(
|
||||
content_block: dict,
|
||||
) -> bool:
|
||||
def _extract_typedict_type_values(union_type: Any) -> set[str]:
|
||||
"""Extract the values of the 'type' field from a TypedDict union type."""
|
||||
result: set[str] = set()
|
||||
for value in get_args(union_type):
|
||||
annotation = value.__annotations__["type"]
|
||||
if get_origin(annotation) is Literal:
|
||||
result.update(get_args(annotation))
|
||||
else:
|
||||
msg = f"{value} 'type' is not a Literal"
|
||||
raise ValueError(msg)
|
||||
return result
|
||||
|
||||
|
||||
KNOWN_BLOCK_TYPES = {
|
||||
bt for bt in get_args(ContentBlock) for bt in get_args(bt.__annotations__["type"])
|
||||
}
|
||||
|
||||
|
||||
def is_data_content_block(block: dict) -> bool:
|
||||
"""Check if the content block is a standard data content block.
|
||||
|
||||
Args:
|
||||
content_block: The content block to check.
|
||||
block: The content block to check.
|
||||
|
||||
Returns:
|
||||
True if the content block is a data content block, False otherwise.
|
||||
"""
|
||||
try:
|
||||
_ = _DataContentBlockAdapter.validate_python(content_block)
|
||||
except ValidationError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return block.get("type") in (
|
||||
"audio",
|
||||
"image",
|
||||
"video",
|
||||
"file",
|
||||
"text-plain",
|
||||
) and any(
|
||||
key in block
|
||||
for key in (
|
||||
"url",
|
||||
"base64",
|
||||
"file_id",
|
||||
"text",
|
||||
"source_type", # backwards compatibility
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
|
||||
def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
|
||||
"""Convert image content block to format expected by OpenAI Chat Completions API."""
|
||||
if content_block["source_type"] == "url":
|
||||
if "url" in block:
|
||||
return {
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": content_block["url"],
|
||||
"url": block["url"],
|
||||
},
|
||||
}
|
||||
if content_block["source_type"] == "base64":
|
||||
if "mime_type" not in content_block:
|
||||
if "base64" in block or block.get("source_type") == "base64":
|
||||
if "mime_type" not in block:
|
||||
error_message = "mime_type key is required for base64 data."
|
||||
raise ValueError(error_message)
|
||||
mime_type = content_block["mime_type"]
|
||||
mime_type = block["mime_type"]
|
||||
base64_data = block["data"] if "data" in block else block["base64"]
|
||||
return {
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:{mime_type};base64,{content_block['data']}",
|
||||
"url": f"data:{mime_type};base64,{base64_data}",
|
||||
},
|
||||
}
|
||||
error_message = "Unsupported source type. Only 'url' and 'base64' are supported."
|
||||
@@ -117,8 +789,9 @@ def convert_to_openai_data_block(block: dict) -> dict:
|
||||
formatted_block = convert_to_openai_image_block(block)
|
||||
|
||||
elif block["type"] == "file":
|
||||
if block["source_type"] == "base64":
|
||||
file = {"file_data": f"data:{block['mime_type']};base64,{block['data']}"}
|
||||
if "base64" in block or block.get("source_type") == "base64":
|
||||
base64_data = block["data"] if "source_type" in block else block["base64"]
|
||||
file = {"file_data": f"data:{block['mime_type']};base64,{base64_data}"}
|
||||
if filename := block.get("filename"):
|
||||
file["filename"] = filename
|
||||
elif (metadata := block.get("metadata")) and ("filename" in metadata):
|
||||
@@ -126,27 +799,28 @@ def convert_to_openai_data_block(block: dict) -> dict:
|
||||
else:
|
||||
warnings.warn(
|
||||
"OpenAI may require a filename for file inputs. Specify a filename "
|
||||
"in the content block: {'type': 'file', 'source_type': 'base64', "
|
||||
"'mime_type': 'application/pdf', 'data': '...', "
|
||||
"'filename': 'my-pdf'}",
|
||||
"in the content block: {'type': 'file', 'mime_type': "
|
||||
"'application/pdf', 'base64': '...', 'filename': 'my-pdf'}",
|
||||
stacklevel=1,
|
||||
)
|
||||
formatted_block = {"type": "file", "file": file}
|
||||
elif block["source_type"] == "id":
|
||||
formatted_block = {"type": "file", "file": {"file_id": block["id"]}}
|
||||
elif "file_id" in block or block.get("source_type") == "id":
|
||||
file_id = block["id"] if "source_type" in block else block["file_id"]
|
||||
formatted_block = {"type": "file", "file": {"file_id": file_id}}
|
||||
else:
|
||||
error_msg = "source_type base64 or id is required for file blocks."
|
||||
error_msg = "Keys base64 or file_id required for file blocks."
|
||||
raise ValueError(error_msg)
|
||||
|
||||
elif block["type"] == "audio":
|
||||
if block["source_type"] == "base64":
|
||||
if "base64" in block or block.get("source_type") == "base64":
|
||||
base64_data = block["data"] if "source_type" in block else block["base64"]
|
||||
audio_format = block["mime_type"].split("/")[-1]
|
||||
formatted_block = {
|
||||
"type": "input_audio",
|
||||
"input_audio": {"data": block["data"], "format": audio_format},
|
||||
"input_audio": {"data": base64_data, "format": audio_format},
|
||||
}
|
||||
else:
|
||||
error_msg = "source_type base64 is required for audio blocks."
|
||||
error_msg = "Key base64 is required for audio blocks."
|
||||
raise ValueError(error_msg)
|
||||
else:
|
||||
error_msg = f"Block of type {block['type']} is not supported."
|
||||
|
||||
@@ -28,6 +28,7 @@ class HumanMessage(BaseMessage):
|
||||
# Instantiate a chat model and invoke it with the messages
|
||||
model = ...
|
||||
print(model.invoke(messages))
|
||||
|
||||
"""
|
||||
|
||||
example: bool = False
|
||||
|
||||
@@ -13,7 +13,7 @@ class RemoveMessage(BaseMessage):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
id: str, # noqa: A002
|
||||
id: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Create a RemoveMessage.
|
||||
|
||||
@@ -5,9 +5,12 @@ from typing import Any, Literal, Optional, Union
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import Field, model_validator
|
||||
from typing_extensions import NotRequired, TypedDict, override
|
||||
from typing_extensions import override
|
||||
|
||||
from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content
|
||||
from langchain_core.messages.content_blocks import InvalidToolCall as InvalidToolCall
|
||||
from langchain_core.messages.content_blocks import ToolCall as ToolCall
|
||||
from langchain_core.messages.content_blocks import ToolCallChunk as ToolCallChunk
|
||||
from langchain_core.utils._merge import merge_dicts, merge_obj
|
||||
|
||||
|
||||
@@ -59,6 +62,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
||||
The tool_call_id field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
tool_call_id: str
|
||||
@@ -176,41 +180,11 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
|
||||
return super().__add__(other)
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Represents a request to call a tool.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{
|
||||
"name": "foo",
|
||||
"args": {"a": 1},
|
||||
"id": "123"
|
||||
}
|
||||
|
||||
This represents a request to call the tool named "foo" with arguments {"a": 1}
|
||||
and an identifier of "123".
|
||||
"""
|
||||
|
||||
name: str
|
||||
"""The name of the tool to be called."""
|
||||
args: dict[str, Any]
|
||||
"""The arguments to the tool call."""
|
||||
id: Optional[str]
|
||||
"""An identifier associated with the tool call.
|
||||
|
||||
An identifier is needed to associate a tool call request with a tool
|
||||
call result in events when multiple concurrent tool calls are made.
|
||||
"""
|
||||
type: NotRequired[Literal["tool_call"]]
|
||||
|
||||
|
||||
def tool_call(
|
||||
*,
|
||||
name: str,
|
||||
args: dict[str, Any],
|
||||
id: Optional[str], # noqa: A002
|
||||
id: Optional[str],
|
||||
) -> ToolCall:
|
||||
"""Create a tool call.
|
||||
|
||||
@@ -222,42 +196,11 @@ def tool_call(
|
||||
return ToolCall(name=name, args=args, id=id, type="tool_call")
|
||||
|
||||
|
||||
class ToolCallChunk(TypedDict):
|
||||
"""A chunk of a tool call (e.g., as part of a stream).
|
||||
|
||||
When merging ToolCallChunks (e.g., via AIMessageChunk.__add__),
|
||||
all string attributes are concatenated. Chunks are only merged if their
|
||||
values of `index` are equal and not None.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
||||
right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
|
||||
|
||||
(
|
||||
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
||||
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
||||
).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
|
||||
"""
|
||||
|
||||
name: Optional[str]
|
||||
"""The name of the tool to be called."""
|
||||
args: Optional[str]
|
||||
"""The arguments to the tool call."""
|
||||
id: Optional[str]
|
||||
"""An identifier associated with the tool call."""
|
||||
index: Optional[int]
|
||||
"""The index of the tool call in a sequence."""
|
||||
type: NotRequired[Literal["tool_call_chunk"]]
|
||||
|
||||
|
||||
def tool_call_chunk(
|
||||
*,
|
||||
name: Optional[str] = None,
|
||||
args: Optional[str] = None,
|
||||
id: Optional[str] = None, # noqa: A002
|
||||
id: Optional[str] = None,
|
||||
index: Optional[int] = None,
|
||||
) -> ToolCallChunk:
|
||||
"""Create a tool call chunk.
|
||||
@@ -273,29 +216,11 @@ def tool_call_chunk(
|
||||
)
|
||||
|
||||
|
||||
class InvalidToolCall(TypedDict):
|
||||
"""Allowance for errors made by LLM.
|
||||
|
||||
Here we add an `error` key to surface errors made during generation
|
||||
(e.g., invalid JSON arguments.)
|
||||
"""
|
||||
|
||||
name: Optional[str]
|
||||
"""The name of the tool to be called."""
|
||||
args: Optional[str]
|
||||
"""The arguments to the tool call."""
|
||||
id: Optional[str]
|
||||
"""An identifier associated with the tool call."""
|
||||
error: Optional[str]
|
||||
"""An error message associated with the tool call."""
|
||||
type: NotRequired[Literal["invalid_tool_call"]]
|
||||
|
||||
|
||||
def invalid_tool_call(
|
||||
*,
|
||||
name: Optional[str] = None,
|
||||
args: Optional[str] = None,
|
||||
id: Optional[str] = None, # noqa: A002
|
||||
id: Optional[str] = None,
|
||||
error: Optional[str] = None,
|
||||
) -> InvalidToolCall:
|
||||
"""Create an invalid tool call.
|
||||
|
||||
@@ -40,6 +40,12 @@ from langchain_core.messages.human import HumanMessage, HumanMessageChunk
|
||||
from langchain_core.messages.modifier import RemoveMessage
|
||||
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
|
||||
from langchain_core.messages.tool import ToolCall, ToolMessage, ToolMessageChunk
|
||||
from langchain_core.messages.v1 import AIMessage as AIMessageV1
|
||||
from langchain_core.messages.v1 import AIMessageChunk as AIMessageChunkV1
|
||||
from langchain_core.messages.v1 import HumanMessage as HumanMessageV1
|
||||
from langchain_core.messages.v1 import MessageV1, MessageV1Types
|
||||
from langchain_core.messages.v1 import SystemMessage as SystemMessageV1
|
||||
from langchain_core.messages.v1 import ToolMessage as ToolMessageV1
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_text_splitters import TextSplitter
|
||||
@@ -111,6 +117,7 @@ def get_buffer_string(
|
||||
]
|
||||
get_buffer_string(messages)
|
||||
# -> "Human: Hi, how are you?\nAI: Good, how are you?"
|
||||
|
||||
"""
|
||||
string_messages = []
|
||||
for m in messages:
|
||||
@@ -202,7 +209,7 @@ def message_chunk_to_message(chunk: BaseMessageChunk) -> BaseMessage:
|
||||
|
||||
|
||||
MessageLikeRepresentation = Union[
|
||||
BaseMessage, list[str], tuple[str, str], str, dict[str, Any]
|
||||
BaseMessage, list[str], tuple[str, str], str, dict[str, Any], MessageV1
|
||||
]
|
||||
|
||||
|
||||
@@ -212,7 +219,7 @@ def _create_message_from_message_type(
|
||||
name: Optional[str] = None,
|
||||
tool_call_id: Optional[str] = None,
|
||||
tool_calls: Optional[list[dict[str, Any]]] = None,
|
||||
id: Optional[str] = None, # noqa: A002
|
||||
id: Optional[str] = None,
|
||||
**additional_kwargs: Any,
|
||||
) -> BaseMessage:
|
||||
"""Create a message from a message type and content string.
|
||||
@@ -293,6 +300,126 @@ def _create_message_from_message_type(
|
||||
return message
|
||||
|
||||
|
||||
def _create_message_from_message_type_v1(
|
||||
message_type: str,
|
||||
content: str,
|
||||
name: Optional[str] = None,
|
||||
tool_call_id: Optional[str] = None,
|
||||
tool_calls: Optional[list[dict[str, Any]]] = None,
|
||||
id: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> MessageV1:
|
||||
"""Create a message from a message type and content string.
|
||||
|
||||
Args:
|
||||
message_type: (str) the type of the message (e.g., "human", "ai", etc.).
|
||||
content: (str) the content string.
|
||||
name: (str) the name of the message. Default is None.
|
||||
tool_call_id: (str) the tool call id. Default is None.
|
||||
tool_calls: (list[dict[str, Any]]) the tool calls. Default is None.
|
||||
id: (str) the id of the message. Default is None.
|
||||
kwargs: (dict[str, Any]) additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
a message of the appropriate type.
|
||||
|
||||
Raises:
|
||||
ValueError: if the message type is not one of "human", "user", "ai",
|
||||
"assistant", "tool", "system", or "developer".
|
||||
"""
|
||||
if name is not None:
|
||||
kwargs["name"] = name
|
||||
if tool_call_id is not None:
|
||||
kwargs["tool_call_id"] = tool_call_id
|
||||
if kwargs and (response_metadata := kwargs.pop("response_metadata", None)):
|
||||
kwargs["response_metadata"] = response_metadata
|
||||
if id is not None:
|
||||
kwargs["id"] = id
|
||||
if tool_calls is not None:
|
||||
kwargs["tool_calls"] = []
|
||||
for tool_call in tool_calls:
|
||||
# Convert OpenAI-format tool call to LangChain format.
|
||||
if "function" in tool_call:
|
||||
args = tool_call["function"]["arguments"]
|
||||
if isinstance(args, str):
|
||||
args = json.loads(args, strict=False)
|
||||
kwargs["tool_calls"].append(
|
||||
{
|
||||
"name": tool_call["function"]["name"],
|
||||
"args": args,
|
||||
"id": tool_call["id"],
|
||||
"type": "tool_call",
|
||||
}
|
||||
)
|
||||
else:
|
||||
kwargs["tool_calls"].append(tool_call)
|
||||
if message_type in {"human", "user"}:
|
||||
message = HumanMessageV1(content=content, **kwargs)
|
||||
elif message_type in {"ai", "assistant"}:
|
||||
message = AIMessageV1(content=content, **kwargs)
|
||||
elif message_type in {"system", "developer"}:
|
||||
if message_type == "developer":
|
||||
kwargs["custom_role"] = "developer"
|
||||
message = SystemMessageV1(content=content, **kwargs)
|
||||
elif message_type == "tool":
|
||||
artifact = kwargs.pop("artifact", None)
|
||||
message = ToolMessageV1(content=content, artifact=artifact, **kwargs)
|
||||
else:
|
||||
msg = (
|
||||
f"Unexpected message type: '{message_type}'. Use one of 'human',"
|
||||
f" 'user', 'ai', 'assistant', 'function', 'tool', 'system', or 'developer'."
|
||||
)
|
||||
msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
|
||||
raise ValueError(msg)
|
||||
return message
|
||||
|
||||
|
||||
def _convert_from_v1_message(message: MessageV1) -> BaseMessage:
|
||||
"""Compatibility layer to convert v1 messages to current messages.
|
||||
|
||||
Args:
|
||||
message: MessageV1 instance to convert.
|
||||
|
||||
Returns:
|
||||
BaseMessage: Converted message instance.
|
||||
"""
|
||||
content = cast("Union[str, list[str | dict]]", message.content)
|
||||
if isinstance(message, AIMessageV1):
|
||||
return AIMessage(
|
||||
content=content,
|
||||
id=message.id,
|
||||
name=message.name,
|
||||
tool_calls=message.tool_calls,
|
||||
response_metadata=cast("dict", message.response_metadata),
|
||||
)
|
||||
if isinstance(message, AIMessageChunkV1):
|
||||
return AIMessageChunk(
|
||||
content=content,
|
||||
id=message.id,
|
||||
name=message.name,
|
||||
tool_call_chunks=message.tool_call_chunks,
|
||||
response_metadata=cast("dict", message.response_metadata),
|
||||
)
|
||||
if isinstance(message, HumanMessageV1):
|
||||
return HumanMessage(
|
||||
content=content,
|
||||
id=message.id,
|
||||
name=message.name,
|
||||
)
|
||||
if isinstance(message, SystemMessageV1):
|
||||
return SystemMessage(
|
||||
content=content,
|
||||
id=message.id,
|
||||
)
|
||||
if isinstance(message, ToolMessageV1):
|
||||
return ToolMessage(
|
||||
content=content,
|
||||
id=message.id,
|
||||
)
|
||||
message = f"Unsupported message type: {type(message)}"
|
||||
raise NotImplementedError(message)
|
||||
|
||||
|
||||
def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
||||
"""Instantiate a message from a variety of message formats.
|
||||
|
||||
@@ -340,6 +467,63 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
||||
message_ = _create_message_from_message_type(
|
||||
msg_type, msg_content, **msg_kwargs
|
||||
)
|
||||
elif isinstance(message, MessageV1Types):
|
||||
message_ = _convert_from_v1_message(message)
|
||||
else:
|
||||
msg = f"Unsupported message type: {type(message)}"
|
||||
msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
return message_
|
||||
|
||||
|
||||
def _convert_to_message_v1(message: MessageLikeRepresentation) -> MessageV1:
|
||||
"""Instantiate a message from a variety of message formats.
|
||||
|
||||
The message format can be one of the following:
|
||||
|
||||
- BaseMessagePromptTemplate
|
||||
- BaseMessage
|
||||
- 2-tuple of (role string, template); e.g., ("human", "{user_input}")
|
||||
- dict: a message dict with role and content keys
|
||||
- string: shorthand for ("human", template); e.g., "{user_input}"
|
||||
|
||||
Args:
|
||||
message: a representation of a message in one of the supported formats.
|
||||
|
||||
Returns:
|
||||
an instance of a message or a message template.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: if the message type is not supported.
|
||||
ValueError: if the message dict does not contain the required keys.
|
||||
"""
|
||||
if isinstance(message, MessageV1Types):
|
||||
message_ = message
|
||||
elif isinstance(message, str):
|
||||
message_ = _create_message_from_message_type_v1("human", message)
|
||||
elif isinstance(message, Sequence) and len(message) == 2:
|
||||
# mypy doesn't realise this can't be a string given the previous branch
|
||||
message_type_str, template = message # type: ignore[misc]
|
||||
message_ = _create_message_from_message_type_v1(message_type_str, template)
|
||||
elif isinstance(message, dict):
|
||||
msg_kwargs = message.copy()
|
||||
try:
|
||||
try:
|
||||
msg_type = msg_kwargs.pop("role")
|
||||
except KeyError:
|
||||
msg_type = msg_kwargs.pop("type")
|
||||
# None msg content is not allowed
|
||||
msg_content = msg_kwargs.pop("content") or ""
|
||||
except KeyError as e:
|
||||
msg = f"Message dict must contain 'role' and 'content' keys, got {message}"
|
||||
msg = create_message(
|
||||
message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE
|
||||
)
|
||||
raise ValueError(msg) from e
|
||||
message_ = _create_message_from_message_type_v1(
|
||||
msg_type, msg_content, **msg_kwargs
|
||||
)
|
||||
else:
|
||||
msg = f"Unsupported message type: {type(message)}"
|
||||
msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
|
||||
@@ -367,6 +551,25 @@ def convert_to_messages(
|
||||
return [_convert_to_message(m) for m in messages]
|
||||
|
||||
|
||||
def convert_to_messages_v1(
|
||||
messages: Union[Iterable[MessageLikeRepresentation], PromptValue],
|
||||
) -> list[MessageV1]:
|
||||
"""Convert a sequence of messages to a list of messages.
|
||||
|
||||
Args:
|
||||
messages: Sequence of messages to convert.
|
||||
|
||||
Returns:
|
||||
list of messages (BaseMessages).
|
||||
"""
|
||||
# Import here to avoid circular imports
|
||||
from langchain_core.prompt_values import PromptValue
|
||||
|
||||
if isinstance(messages, PromptValue):
|
||||
return messages.to_messages(output_version="v1")
|
||||
return [_convert_to_message_v1(m) for m in messages]
|
||||
|
||||
|
||||
def _runnable_support(func: Callable) -> Callable:
|
||||
@overload
|
||||
def wrapped(
|
||||
@@ -463,6 +666,7 @@ def filter_messages(
|
||||
SystemMessage("you're a good assistant."),
|
||||
HumanMessage("what's your name", id="foo", name="example_user"),
|
||||
]
|
||||
|
||||
""" # noqa: E501
|
||||
messages = convert_to_messages(messages)
|
||||
filtered: list[BaseMessage] = []
|
||||
@@ -869,6 +1073,7 @@ def trim_messages(
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"),
|
||||
]
|
||||
|
||||
""" # noqa: E501
|
||||
# Validate arguments
|
||||
if start_on and strategy == "first":
|
||||
@@ -1004,10 +1209,11 @@ def convert_to_openai_messages(
|
||||
|
||||
oai_messages: list = []
|
||||
|
||||
if is_single := isinstance(messages, (BaseMessage, dict, str)):
|
||||
if is_single := isinstance(messages, (BaseMessage, dict, str, MessageV1Types)):
|
||||
messages = [messages]
|
||||
|
||||
messages = convert_to_messages(messages)
|
||||
# TODO: resolve type ignore here
|
||||
messages = convert_to_messages(messages) # type: ignore[arg-type]
|
||||
|
||||
for i, message in enumerate(messages):
|
||||
oai_msg: dict = {"role": _get_message_openai_role(message)}
|
||||
@@ -1176,7 +1382,9 @@ def convert_to_openai_messages(
|
||||
"id": block["id"],
|
||||
"function": {
|
||||
"name": block["name"],
|
||||
"arguments": json.dumps(block["input"]),
|
||||
"arguments": json.dumps(
|
||||
block["input"], ensure_ascii=False
|
||||
),
|
||||
},
|
||||
}
|
||||
)
|
||||
@@ -1550,7 +1758,7 @@ def _convert_to_openai_tool_calls(tool_calls: list[ToolCall]) -> list[dict]:
|
||||
"id": tool_call["id"],
|
||||
"function": {
|
||||
"name": tool_call["name"],
|
||||
"arguments": json.dumps(tool_call["args"]),
|
||||
"arguments": json.dumps(tool_call["args"], ensure_ascii=False),
|
||||
},
|
||||
}
|
||||
for tool_call in tool_calls
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user