mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-11 11:40:19 +00:00
Compare commits
1 Commits
langchain-
...
cc/test_o3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
045ac176d8 |
21
.github/actions/uv_setup/action.yml
vendored
21
.github/actions/uv_setup/action.yml
vendored
@@ -1,21 +0,0 @@
|
||||
# TODO: https://docs.astral.sh/uv/guides/integration/github/#caching
|
||||
|
||||
name: uv-install
|
||||
description: Set up Python and uv
|
||||
|
||||
inputs:
|
||||
python-version:
|
||||
description: Python version, supporting MAJOR.MINOR only
|
||||
required: true
|
||||
|
||||
env:
|
||||
UV_VERSION: "0.5.25"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install uv and set the python version
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: ${{ env.UV_VERSION }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
30
.github/scripts/check_diff.py
vendored
30
.github/scripts/check_diff.py
vendored
@@ -7,8 +7,6 @@ from typing import Dict, List, Set
|
||||
from pathlib import Path
|
||||
import tomllib
|
||||
|
||||
from packaging.requirements import Requirement
|
||||
|
||||
from get_min_versions import get_min_version_from_toml
|
||||
|
||||
|
||||
@@ -39,7 +37,6 @@ IGNORED_PARTNERS = [
|
||||
|
||||
PY_312_MAX_PACKAGES = [
|
||||
"libs/partners/huggingface", # https://github.com/pytorch/pytorch/issues/130249
|
||||
"libs/partners/voyageai",
|
||||
]
|
||||
|
||||
|
||||
@@ -64,17 +61,15 @@ def dependents_graph() -> dict:
|
||||
|
||||
# load regular and test deps from pyproject.toml
|
||||
with open(path, "rb") as f:
|
||||
pyproject = tomllib.load(f)
|
||||
pyproject = tomllib.load(f)["tool"]["poetry"]
|
||||
|
||||
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
|
||||
for dep in [
|
||||
*pyproject["project"]["dependencies"],
|
||||
*pyproject["dependency-groups"]["test"],
|
||||
*pyproject["dependencies"].keys(),
|
||||
*pyproject["group"]["test"]["dependencies"].keys(),
|
||||
]:
|
||||
requirement = Requirement(dep)
|
||||
package_name = requirement.name
|
||||
if "langchain" in dep:
|
||||
dependents[package_name].add(pkg_dir)
|
||||
dependents[dep].add(pkg_dir)
|
||||
continue
|
||||
|
||||
# load extended deps from extended_testing_deps.txt
|
||||
@@ -125,7 +120,8 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
py_versions = ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||
# custom logic for specific directories
|
||||
elif dir_ == "libs/partners/milvus":
|
||||
# milvus doesn't allow 3.12 because they declare deps in funny way
|
||||
# milvus poetry doesn't allow 3.12 because they
|
||||
# declare deps in funny way
|
||||
py_versions = ["3.9", "3.11"]
|
||||
|
||||
elif dir_ in PY_312_MAX_PACKAGES:
|
||||
@@ -152,17 +148,17 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
def _get_pydantic_test_configs(
|
||||
dir_: str, *, python_version: str = "3.11"
|
||||
) -> List[Dict[str, str]]:
|
||||
with open("./libs/core/uv.lock", "rb") as f:
|
||||
core_uv_lock_data = tomllib.load(f)
|
||||
for package in core_uv_lock_data["package"]:
|
||||
with open("./libs/core/poetry.lock", "rb") as f:
|
||||
core_poetry_lock_data = tomllib.load(f)
|
||||
for package in core_poetry_lock_data["package"]:
|
||||
if package["name"] == "pydantic":
|
||||
core_max_pydantic_minor = package["version"].split(".")[1]
|
||||
break
|
||||
|
||||
with open(f"./{dir_}/uv.lock", "rb") as f:
|
||||
dir_uv_lock_data = tomllib.load(f)
|
||||
with open(f"./{dir_}/poetry.lock", "rb") as f:
|
||||
dir_poetry_lock_data = tomllib.load(f)
|
||||
|
||||
for package in dir_uv_lock_data["package"]:
|
||||
for package in dir_poetry_lock_data["package"]:
|
||||
if package["name"] == "pydantic":
|
||||
dir_max_pydantic_minor = package["version"].split(".")[1]
|
||||
break
|
||||
@@ -308,7 +304,7 @@ if __name__ == "__main__":
|
||||
f"Unknown lib: {file}. check_diff.py likely needs "
|
||||
"an update for this new library!"
|
||||
)
|
||||
elif file.startswith("docs/") or file in ["pyproject.toml", "uv.lock"]: # docs or root uv files
|
||||
elif file.startswith("docs/") or file in ["pyproject.toml", "poetry.lock"]: # docs or root poetry files
|
||||
docs_edited = True
|
||||
dirs_to_run["lint"].add(".")
|
||||
|
||||
|
||||
11
.github/scripts/check_prerelease_dependencies.py
vendored
11
.github/scripts/check_prerelease_dependencies.py
vendored
@@ -10,25 +10,26 @@ if __name__ == "__main__":
|
||||
toml_data = tomllib.load(file)
|
||||
|
||||
# see if we're releasing an rc
|
||||
version = toml_data["project"]["version"]
|
||||
version = toml_data["tool"]["poetry"]["version"]
|
||||
releasing_rc = "rc" in version or "dev" in version
|
||||
|
||||
# if not, iterate through dependencies and make sure none allow prereleases
|
||||
if not releasing_rc:
|
||||
dependencies = toml_data["project"]["dependencies"]
|
||||
for dep_version in dependencies:
|
||||
dependencies = toml_data["tool"]["poetry"]["dependencies"]
|
||||
for lib in dependencies:
|
||||
dep_version = dependencies[lib]
|
||||
dep_version_string = (
|
||||
dep_version["version"] if isinstance(dep_version, dict) else dep_version
|
||||
)
|
||||
|
||||
if "rc" in dep_version_string:
|
||||
raise ValueError(
|
||||
f"Dependency {dep_version} has a prerelease version. Please remove this."
|
||||
f"Dependency {lib} has a prerelease version. Please remove this."
|
||||
)
|
||||
|
||||
if isinstance(dep_version, dict) and dep_version.get(
|
||||
"allow-prereleases", False
|
||||
):
|
||||
raise ValueError(
|
||||
f"Dependency {dep_version} has allow-prereleases set to true. Please remove this."
|
||||
f"Dependency {lib} has allow-prereleases set to true. Please remove this."
|
||||
)
|
||||
|
||||
41
.github/scripts/get_min_versions.py
vendored
41
.github/scripts/get_min_versions.py
vendored
@@ -1,4 +1,3 @@
|
||||
from collections import defaultdict
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
@@ -8,7 +7,6 @@ else:
|
||||
# for python 3.10 and below, which doesnt have stdlib tomllib
|
||||
import tomli as tomllib
|
||||
|
||||
from packaging.requirements import Requirement
|
||||
from packaging.specifiers import SpecifierSet
|
||||
from packaging.version import Version
|
||||
|
||||
@@ -96,23 +94,6 @@ def get_minimum_version(package_name: str, spec_string: str) -> Optional[str]:
|
||||
return str(min(valid_versions)) if valid_versions else None
|
||||
|
||||
|
||||
def _check_python_version_from_requirement(
|
||||
requirement: Requirement, python_version: str
|
||||
) -> bool:
|
||||
if not requirement.marker:
|
||||
return True
|
||||
else:
|
||||
marker_str = str(requirement.marker)
|
||||
if "python_version" or "python_full_version" in marker_str:
|
||||
python_version_str = "".join(
|
||||
char
|
||||
for char in marker_str
|
||||
if char.isdigit() or char in (".", "<", ">", "=", ",")
|
||||
)
|
||||
return check_python_version(python_version, python_version_str)
|
||||
return True
|
||||
|
||||
|
||||
def get_min_version_from_toml(
|
||||
toml_path: str,
|
||||
versions_for: str,
|
||||
@@ -124,10 +105,8 @@ def get_min_version_from_toml(
|
||||
with open(toml_path, "rb") as file:
|
||||
toml_data = tomllib.load(file)
|
||||
|
||||
dependencies = defaultdict(list)
|
||||
for dep in toml_data["project"]["dependencies"]:
|
||||
requirement = Requirement(dep)
|
||||
dependencies[requirement.name].append(requirement)
|
||||
# Get the dependencies from tool.poetry.dependencies
|
||||
dependencies = toml_data["tool"]["poetry"]["dependencies"]
|
||||
|
||||
# Initialize a dictionary to store the minimum versions
|
||||
min_versions = {}
|
||||
@@ -142,11 +121,17 @@ def get_min_version_from_toml(
|
||||
if lib in dependencies:
|
||||
if include and lib not in include:
|
||||
continue
|
||||
requirements = dependencies[lib]
|
||||
for requirement in requirements:
|
||||
if _check_python_version_from_requirement(requirement, python_version):
|
||||
version_string = str(requirement.specifier)
|
||||
break
|
||||
# Get the version string
|
||||
version_string = dependencies[lib]
|
||||
|
||||
if isinstance(version_string, dict):
|
||||
version_string = version_string["version"]
|
||||
if isinstance(version_string, list):
|
||||
version_string = [
|
||||
vs
|
||||
for vs in version_string
|
||||
if check_python_version(python_version, vs["python"])
|
||||
][0]["version"]
|
||||
|
||||
# Use parse_version to get the minimum supported version from version_string
|
||||
min_version = get_minimum_version(lib, version_string)
|
||||
|
||||
15
.github/workflows/_compile_integration_test.yml
vendored
15
.github/workflows/_compile_integration_test.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
POETRY_VERSION: "1.8.4"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -22,22 +22,25 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: "uv run pytest -m compile tests/integration_tests #${{ inputs.python-version }}"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: compile-integration
|
||||
|
||||
- name: Install integration dependencies
|
||||
shell: bash
|
||||
run: uv sync --group test --group test_integration
|
||||
run: poetry install --with=test_integration,test
|
||||
|
||||
- name: Check integration tests compile
|
||||
shell: bash
|
||||
run: uv run pytest -m compile tests/integration_tests
|
||||
run: poetry run pytest -m compile tests/integration_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
|
||||
13
.github/workflows/_integration_test.yml
vendored
13
.github/workflows/_integration_test.yml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
POETRY_VERSION: "1.8.4"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -24,19 +24,22 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: uv sync --group test --group test_integration
|
||||
run: poetry install --with test,test_integration
|
||||
|
||||
- name: Install deps outside pyproject
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/community/') }}
|
||||
shell: bash
|
||||
run: VIRTUAL_ENV=.venv uv pip install "boto3<2" "google-cloud-aiplatform<2"
|
||||
run: poetry run pip install "boto3<2" "google-cloud-aiplatform<2"
|
||||
|
||||
- name: Run integration tests
|
||||
shell: bash
|
||||
|
||||
47
.github/workflows/_lint.yml
vendored
47
.github/workflows/_lint.yml
vendored
@@ -13,13 +13,12 @@ on:
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
|
||||
# This env var allows us to get inline annotations when ruff has complaints.
|
||||
RUFF_OUTPUT_FORMAT: github
|
||||
|
||||
UV_FROZEN: "true"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: "make lint #${{ inputs.python-version }}"
|
||||
@@ -28,10 +27,25 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: lint-with-extras
|
||||
|
||||
- name: Check Poetry File
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry check
|
||||
|
||||
- name: Check lock file
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry lock --check
|
||||
|
||||
- name: Install dependencies
|
||||
# Also installs dev/lint/test/typing dependencies, to ensure we have
|
||||
@@ -44,7 +58,17 @@ jobs:
|
||||
# It doesn't matter how you change it, any change will cause a cache-bust.
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
uv sync --group lint --group typing
|
||||
poetry install --with lint,typing
|
||||
|
||||
- name: Get .mypy_cache to speed up mypy
|
||||
uses: actions/cache@v4
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache
|
||||
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
@@ -63,12 +87,21 @@ jobs:
|
||||
if: ${{ ! startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
uv sync --inexact --group test
|
||||
poetry install --with test
|
||||
- name: Install unit+integration test dependencies
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
uv sync --inexact --group test --group test_integration
|
||||
poetry install --with test,test_integration
|
||||
|
||||
- name: Get .mypy_cache_test to speed up mypy
|
||||
uses: actions/cache@v4
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache_test
|
||||
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
67
.github/workflows/_release.yml
vendored
67
.github/workflows/_release.yml
vendored
@@ -21,8 +21,7 @@ on:
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
UV_FROZEN: "true"
|
||||
UV_NO_SYNC: "true"
|
||||
POETRY_VERSION: "1.8.4"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -37,10 +36,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
# We want to keep this build stage *separate* from the release stage,
|
||||
# so that there's no sharing of permissions between them.
|
||||
@@ -54,7 +56,7 @@ jobs:
|
||||
# > from the publish job.
|
||||
# https://github.com/pypa/gh-action-pypi-publish#non-goals
|
||||
- name: Build project for distribution
|
||||
run: uv build
|
||||
run: poetry build
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
@@ -65,18 +67,11 @@ jobs:
|
||||
|
||||
- name: Check Version
|
||||
id: check-version
|
||||
shell: python
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
import os
|
||||
import tomllib
|
||||
with open("pyproject.toml", "rb") as f:
|
||||
data = tomllib.load(f)
|
||||
pkg_name = data["project"]["name"]
|
||||
version = data["project"]["version"]
|
||||
with open(os.environ["GITHUB_OUTPUT"], "a") as f:
|
||||
f.write(f"pkg-name={pkg_name}\n")
|
||||
f.write(f"version={version}\n")
|
||||
echo pkg-name="$(poetry version | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT
|
||||
echo version="$(poetry version --short)" >> $GITHUB_OUTPUT
|
||||
release-notes:
|
||||
needs:
|
||||
- build
|
||||
@@ -189,11 +184,13 @@ jobs:
|
||||
# - The package is published, and it breaks on the missing dependency when
|
||||
# used in the real world.
|
||||
|
||||
- name: Set up Python + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
id: setup-python
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -216,18 +213,17 @@ jobs:
|
||||
# - attempt install again after 5 seconds if it fails because there is
|
||||
# sometimes a delay in availability on test pypi
|
||||
run: |
|
||||
uv venv
|
||||
VIRTUAL_ENV=.venv uv pip install dist/*.whl
|
||||
poetry run pip install dist/*.whl
|
||||
|
||||
# Replace all dashes in the package name with underscores,
|
||||
# since that's how Python imports packages with dashes in the name.
|
||||
# also remove _official suffix
|
||||
IMPORT_NAME="$(echo "$PKG_NAME" | sed s/-/_/g | sed s/_official//g)"
|
||||
|
||||
uv run python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))"
|
||||
poetry run python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))"
|
||||
|
||||
- name: Import test dependencies
|
||||
run: uv sync --group test
|
||||
run: poetry install --with test --no-root
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
# Overwrite the local version of the package with the built version
|
||||
@@ -238,7 +234,7 @@ jobs:
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
VERSION: ${{ needs.build.outputs.version }}
|
||||
run: |
|
||||
VIRTUAL_ENV=.venv uv pip install dist/*.whl
|
||||
poetry run pip install dist/*.whl
|
||||
|
||||
- name: Run unit tests
|
||||
run: make tests
|
||||
@@ -247,15 +243,15 @@ jobs:
|
||||
- name: Check for prerelease versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
uv run python $GITHUB_WORKSPACE/.github/scripts/check_prerelease_dependencies.py pyproject.toml
|
||||
poetry run python $GITHUB_WORKSPACE/.github/scripts/check_prerelease_dependencies.py pyproject.toml
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
VIRTUAL_ENV=.venv uv pip install packaging requests
|
||||
python_version="$(uv run python --version | awk '{print $2}')"
|
||||
min_versions="$(uv run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release $python_version)"
|
||||
poetry run pip install packaging requests
|
||||
python_version="$(poetry run python --version | awk '{print $2}')"
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release $python_version)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
@@ -264,12 +260,12 @@ jobs:
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
run: |
|
||||
VIRTUAL_ENV=.venv uv pip install --force-reinstall $MIN_VERSIONS --editable .
|
||||
poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Import integration test dependencies
|
||||
run: uv sync --group test --group test_integration
|
||||
run: poetry install --with test,test_integration
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Run integration tests
|
||||
@@ -310,7 +306,6 @@ jobs:
|
||||
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
|
||||
DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }}
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
@@ -336,10 +331,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -375,10 +373,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
|
||||
21
.github/workflows/_test.yml
vendored
21
.github/workflows/_test.yml
vendored
@@ -13,8 +13,7 @@ on:
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
UV_NO_SYNC: "true"
|
||||
POETRY_VERSION: "1.8.4"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -27,14 +26,17 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
id: setup-python
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: uv sync --group test --dev
|
||||
run: poetry install --with test
|
||||
|
||||
- name: Run core tests
|
||||
shell: bash
|
||||
@@ -46,9 +48,9 @@ jobs:
|
||||
id: min-version
|
||||
shell: bash
|
||||
run: |
|
||||
VIRTUAL_ENV=.venv uv pip install packaging tomli requests
|
||||
python_version="$(uv run python --version | awk '{print $2}')"
|
||||
min_versions="$(uv run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request $python_version)"
|
||||
poetry run pip install packaging tomli requests
|
||||
python_version="$(poetry run python --version | awk '{print $2}')"
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request $python_version)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
@@ -57,7 +59,8 @@ jobs:
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
run: |
|
||||
VIRTUAL_ENV=.venv uv pip install $MIN_VERSIONS
|
||||
poetry run pip install uv
|
||||
poetry run uv pip install $MIN_VERSIONS
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
|
||||
14
.github/workflows/_test_doc_imports.yml
vendored
14
.github/workflows/_test_doc_imports.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
POETRY_VERSION: "1.8.4"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -19,23 +19,25 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: core
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: uv sync --group test
|
||||
run: poetry install --with test
|
||||
|
||||
- name: Install langchain editable
|
||||
run: |
|
||||
VIRTUAL_ENV=.venv uv pip install langchain-experimental -e libs/core libs/langchain libs/community
|
||||
poetry run pip install langchain-experimental -e libs/core libs/langchain libs/community
|
||||
|
||||
- name: Check doc imports
|
||||
shell: bash
|
||||
run: |
|
||||
uv run python docs/scripts/check_imports.py
|
||||
poetry run python docs/scripts/check_imports.py
|
||||
|
||||
- name: Ensure the test did not create any additional files
|
||||
shell: bash
|
||||
|
||||
14
.github/workflows/_test_pydantic.yml
vendored
14
.github/workflows/_test_pydantic.yml
vendored
@@ -18,8 +18,7 @@ on:
|
||||
description: "Pydantic version to test."
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
UV_NO_SYNC: "true"
|
||||
POETRY_VERSION: "1.8.4"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -32,18 +31,21 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: uv sync --group test
|
||||
run: poetry install --with test
|
||||
|
||||
- name: Overwrite pydantic version
|
||||
shell: bash
|
||||
run: VIRTUAL_ENV=.venv uv pip install pydantic~=${{ inputs.pydantic-version }}
|
||||
run: poetry run pip install pydantic~=${{ inputs.pydantic-version }}
|
||||
|
||||
- name: Run core tests
|
||||
shell: bash
|
||||
|
||||
26
.github/workflows/_test_release.yml
vendored
26
.github/workflows/_test_release.yml
vendored
@@ -14,8 +14,8 @@ on:
|
||||
description: "Release from a non-master branch (danger!)"
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
UV_FROZEN: "true"
|
||||
POETRY_VERSION: "1.8.4"
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -29,10 +29,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
# We want to keep this build stage *separate* from the release stage,
|
||||
# so that there's no sharing of permissions between them.
|
||||
@@ -46,7 +49,7 @@ jobs:
|
||||
# > from the publish job.
|
||||
# https://github.com/pypa/gh-action-pypi-publish#non-goals
|
||||
- name: Build project for distribution
|
||||
run: uv build
|
||||
run: poetry build
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
@@ -57,18 +60,11 @@ jobs:
|
||||
|
||||
- name: Check Version
|
||||
id: check-version
|
||||
shell: python
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
import os
|
||||
import tomllib
|
||||
with open("pyproject.toml", "rb") as f:
|
||||
data = tomllib.load(f)
|
||||
pkg_name = data["project"]["name"]
|
||||
version = data["project"]["version"]
|
||||
with open(os.environ["GITHUB_OUTPUT"], "a") as f:
|
||||
f.write(f"pkg-name={pkg_name}\n")
|
||||
f.write(f"version={version}\n")
|
||||
echo pkg-name="$(poetry version | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT
|
||||
echo version="$(poetry version --short)" >> $GITHUB_OUTPUT
|
||||
|
||||
publish:
|
||||
needs:
|
||||
|
||||
11
.github/workflows/api_doc_build.yml
vendored
11
.github/workflows/api_doc_build.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
PYTHON_VERSION: "3.11"
|
||||
|
||||
jobs:
|
||||
@@ -45,18 +46,20 @@ jobs:
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Setup python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@v5
|
||||
id: setup-python
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: api-docs
|
||||
working-directory: langchain
|
||||
|
||||
- name: Install initial py deps
|
||||
working-directory: langchain
|
||||
run: |
|
||||
python -m pip install -U uv
|
||||
python -m uv pip install --upgrade --no-cache-dir pip setuptools pyyaml
|
||||
|
||||
|
||||
- name: Move libs with script
|
||||
run: python langchain/.github/scripts/prep_api_docs_build.py
|
||||
env:
|
||||
|
||||
24
.github/workflows/check_diffs.yml
vendored
24
.github/workflows/check_diffs.yml
vendored
@@ -18,8 +18,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
UV_NO_SYNC: "true"
|
||||
POETRY_VERSION: "1.8.4"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -128,19 +127,24 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.job-configs.python-version }} + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python ${{ matrix.job-configs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies and run extended tests
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with uv..."
|
||||
uv venv
|
||||
uv sync --group test
|
||||
VIRTUAL_ENV=.venv uv pip install -r extended_testing_deps.txt
|
||||
VIRTUAL_ENV=.venv make extended_tests
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install --with test
|
||||
poetry run pip install uv
|
||||
poetry run uv pip install -r extended_testing_deps.txt
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
|
||||
15
.github/workflows/run_notebooks.yml
vendored
15
.github/workflows/run_notebooks.yml
vendored
@@ -15,7 +15,7 @@ on:
|
||||
- cron: '0 13 * * *'
|
||||
|
||||
env:
|
||||
UV_FROZEN: "true"
|
||||
POETRY_VERSION: "1.8.4"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -25,10 +25,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ github.event.inputs.python_version || '3.11' }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: run-notebooks
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
id: 'auth'
|
||||
@@ -45,17 +48,17 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv sync --group dev --group test
|
||||
poetry install --with dev,test
|
||||
|
||||
- name: Pre-download files
|
||||
run: |
|
||||
uv run python docs/scripts/cache_data.py
|
||||
poetry run python docs/scripts/cache_data.py
|
||||
curl -s https://raw.githubusercontent.com/lerocha/chinook-database/master/ChinookDatabase/DataSources/Chinook_Sqlite.sql | sqlite3 docs/docs/how_to/Chinook.db
|
||||
cp docs/docs/how_to/Chinook.db docs/docs/tutorials/Chinook.db
|
||||
|
||||
- name: Prepare notebooks
|
||||
run: |
|
||||
uv run python docs/scripts/prepare_notebooks_for_ci.py --comment-install-cells --working-directory ${{ github.event.inputs.working-directory || 'all' }}
|
||||
poetry run python docs/scripts/prepare_notebooks_for_ci.py --comment-install-cells --working-directory ${{ github.event.inputs.working-directory || 'all' }}
|
||||
|
||||
- name: Run notebooks
|
||||
env:
|
||||
|
||||
22
.github/workflows/scheduled_test.yml
vendored
22
.github/workflows/scheduled_test.yml
vendored
@@ -14,9 +14,7 @@ on:
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
UV_FROZEN: "true"
|
||||
DEFAULT_LIBS: '["libs/partners/openai", "libs/partners/anthropic", "libs/partners/fireworks", "libs/partners/groq", "libs/partners/mistralai", "libs/partners/google-vertexai", "libs/partners/google-genai", "libs/partners/aws"]'
|
||||
POETRY_LIBS: ("libs/partners/google-vertexai" "libs/partners/google-genai" "libs/partners/aws")
|
||||
|
||||
jobs:
|
||||
compute-matrix:
|
||||
@@ -81,8 +79,7 @@ jobs:
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} with poetry
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@@ -90,12 +87,6 @@ jobs:
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + uv
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
uses: "./langchain/.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
@@ -109,20 +100,12 @@ jobs:
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies (poetry)
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: Install dependencies (uv)
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with uv..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
uv sync --group test --group test_integration
|
||||
|
||||
- name: Run integration tests
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
@@ -134,7 +117,6 @@ jobs:
|
||||
AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
|
||||
37
Makefile
37
Makefile
@@ -1,9 +1,5 @@
|
||||
.PHONY: all clean help docs_build docs_clean docs_linkcheck api_docs_build api_docs_clean api_docs_linkcheck spell_check spell_fix lint lint_package lint_tests format format_diff
|
||||
|
||||
.EXPORT_ALL_VARIABLES:
|
||||
UV_FROZEN = true
|
||||
UV_NO_SYNC = true
|
||||
|
||||
## help: Show this help info.
|
||||
help: Makefile
|
||||
@printf "\n\033[1mUsage: make <TARGETS> ...\033[0m\n\n\033[1mTargets:\033[0m\n\n"
|
||||
@@ -29,20 +25,20 @@ docs_clean:
|
||||
|
||||
## docs_linkcheck: Run linkchecker on the documentation.
|
||||
docs_linkcheck:
|
||||
uv run --no-group test linkchecker _dist/docs/ --ignore-url node_modules
|
||||
poetry run linkchecker _dist/docs/ --ignore-url node_modules
|
||||
|
||||
## api_docs_build: Build the API Reference documentation.
|
||||
api_docs_build:
|
||||
uv run --no-group test python docs/api_reference/create_api_rst.py
|
||||
cd docs/api_reference && uv run --no-group test make html
|
||||
uv run --no-group test python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
poetry run python docs/api_reference/create_api_rst.py
|
||||
cd docs/api_reference && poetry run make html
|
||||
poetry run python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
|
||||
API_PKG ?= text-splitters
|
||||
|
||||
api_docs_quick_preview:
|
||||
uv run --no-group test python docs/api_reference/create_api_rst.py $(API_PKG)
|
||||
cd docs/api_reference && uv run make html
|
||||
uv run --no-group test python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
poetry run python docs/api_reference/create_api_rst.py $(API_PKG)
|
||||
cd docs/api_reference && poetry run make html
|
||||
poetry run python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/
|
||||
open docs/api_reference/_build/html/reference.html
|
||||
|
||||
## api_docs_clean: Clean the API Reference documentation build artifacts.
|
||||
@@ -54,15 +50,15 @@ api_docs_clean:
|
||||
|
||||
## api_docs_linkcheck: Run linkchecker on the API Reference documentation.
|
||||
api_docs_linkcheck:
|
||||
uv run --no-group test linkchecker docs/api_reference/_build/html/index.html
|
||||
poetry run linkchecker docs/api_reference/_build/html/index.html
|
||||
|
||||
## spell_check: Run codespell on the project.
|
||||
spell_check:
|
||||
uv run --no-group test codespell --toml pyproject.toml
|
||||
poetry run codespell --toml pyproject.toml
|
||||
|
||||
## spell_fix: Run codespell on the project and fix the errors.
|
||||
spell_fix:
|
||||
uv run --no-group test codespell --toml pyproject.toml -w
|
||||
poetry run codespell --toml pyproject.toml -w
|
||||
|
||||
######################
|
||||
# LINTING AND FORMATTING
|
||||
@@ -70,9 +66,9 @@ spell_fix:
|
||||
|
||||
## lint: Run linting on the project.
|
||||
lint lint_package lint_tests:
|
||||
uv run --group lint ruff check docs cookbook
|
||||
uv run --group lint ruff format docs cookbook cookbook --diff
|
||||
uv run --group lint ruff check --select I docs cookbook
|
||||
poetry run ruff check docs cookbook
|
||||
poetry run ruff format docs cookbook cookbook --diff
|
||||
poetry run ruff check --select I docs cookbook
|
||||
git --no-pager grep 'from langchain import' docs cookbook | grep -vE 'from langchain import (hub)' && echo "Error: no importing langchain from root in docs, except for hub" && exit 1 || exit 0
|
||||
|
||||
git --no-pager grep 'api.python.langchain.com' -- docs/docs ':!docs/docs/additional_resources/arxiv_references.mdx' ':!docs/docs/integrations/document_loaders/sitemap.ipynb' || exit 0 && \
|
||||
@@ -81,8 +77,5 @@ lint lint_package lint_tests:
|
||||
|
||||
## format: Format the project files.
|
||||
format format_diff:
|
||||
uv run --group lint ruff format docs cookbook
|
||||
uv run --group lint ruff check --select I --fix docs cookbook
|
||||
|
||||
update-package-downloads:
|
||||
uv run python docs/scripts/packages_yml_get_downloads.py
|
||||
poetry run ruff format docs cookbook
|
||||
poetry run ruff check --select I --fix docs cookbook
|
||||
|
||||
@@ -21,6 +21,7 @@ Notebook | Description
|
||||
[code-analysis-deeplake.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/code-analysis-deeplake.ipynb) | Analyze its own code base with the help of gpt and activeloop's deep lake.
|
||||
[custom_agent_with_plugin_retri...](https://github.com/langchain-ai/langchain/tree/master/cookbook/custom_agent_with_plugin_retrieval.ipynb) | Build a custom agent that can interact with ai plugins by retrieving tools and creating natural language wrappers around openapi endpoints.
|
||||
[custom_agent_with_plugin_retri...](https://github.com/langchain-ai/langchain/tree/master/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb) | Build a custom agent with plugin retrieval functionality, utilizing ai plugins from the `plugnplai` directory.
|
||||
[databricks_sql_db.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/databricks_sql_db.ipynb) | Connect to databricks runtimes and databricks sql.
|
||||
[deeplake_semantic_search_over_...](https://github.com/langchain-ai/langchain/tree/master/cookbook/deeplake_semantic_search_over_chat.ipynb) | Perform semantic search and question-answering over a group chat using activeloop's deep lake with gpt4.
|
||||
[elasticsearch_db_qa.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/elasticsearch_db_qa.ipynb) | Interact with elasticsearch analytics databases in natural language and build search queries via the elasticsearch dsl API.
|
||||
[extraction_openai_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/extraction_openai_tools.ipynb) | Structured Data Extraction with OpenAI Tools
|
||||
|
||||
273
cookbook/databricks_sql_db.ipynb
Normal file
273
cookbook/databricks_sql_db.ipynb
Normal file
@@ -0,0 +1,273 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "707d13a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Databricks\n",
|
||||
"\n",
|
||||
"This notebook covers how to connect to the [Databricks runtimes](https://docs.databricks.com/runtime/index.html) and [Databricks SQL](https://www.databricks.com/product/databricks-sql) using the SQLDatabase wrapper of LangChain.\n",
|
||||
"It is broken into 3 parts: installation and setup, connecting to Databricks, and examples."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0076d072",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation and Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "739b489b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install databricks-sql-connector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73113163",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connecting to Databricks\n",
|
||||
"\n",
|
||||
"You can connect to [Databricks runtimes](https://docs.databricks.com/runtime/index.html) and [Databricks SQL](https://www.databricks.com/product/databricks-sql) using the `SQLDatabase.from_databricks()` method.\n",
|
||||
"\n",
|
||||
"### Syntax\n",
|
||||
"```python\n",
|
||||
"SQLDatabase.from_databricks(\n",
|
||||
" catalog: str,\n",
|
||||
" schema: str,\n",
|
||||
" host: Optional[str] = None,\n",
|
||||
" api_token: Optional[str] = None,\n",
|
||||
" warehouse_id: Optional[str] = None,\n",
|
||||
" cluster_id: Optional[str] = None,\n",
|
||||
" engine_args: Optional[dict] = None,\n",
|
||||
" **kwargs: Any)\n",
|
||||
"```\n",
|
||||
"### Required Parameters\n",
|
||||
"* `catalog`: The catalog name in the Databricks database.\n",
|
||||
"* `schema`: The schema name in the catalog.\n",
|
||||
"\n",
|
||||
"### Optional Parameters\n",
|
||||
"There following parameters are optional. When executing the method in a Databricks notebook, you don't need to provide them in most of the cases.\n",
|
||||
"* `host`: The Databricks workspace hostname, excluding 'https://' part. Defaults to 'DATABRICKS_HOST' environment variable or current workspace if in a Databricks notebook.\n",
|
||||
"* `api_token`: The Databricks personal access token for accessing the Databricks SQL warehouse or the cluster. Defaults to 'DATABRICKS_TOKEN' environment variable or a temporary one is generated if in a Databricks notebook.\n",
|
||||
"* `warehouse_id`: The warehouse ID in the Databricks SQL.\n",
|
||||
"* `cluster_id`: The cluster ID in the Databricks Runtime. If running in a Databricks notebook and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the cluster the notebook is attached to.\n",
|
||||
"* `engine_args`: The arguments to be used when connecting Databricks.\n",
|
||||
"* `**kwargs`: Additional keyword arguments for the `SQLDatabase.from_uri` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b11c7e48",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Examples"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8102bca0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Connecting to Databricks with SQLDatabase wrapper\n",
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_databricks(catalog=\"samples\", schema=\"nyctaxi\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9dd36f58",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Creating a OpenAI Chat LLM wrapper\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5b5c5f1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### SQL Chain example\n",
|
||||
"\n",
|
||||
"This example demonstrates the use of the [SQL Chain](https://python.langchain.com/en/latest/modules/chains/examples/sqlite.html) for answering a question over a Databricks database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "36f2270b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.utilities import SQLDatabaseChain\n",
|
||||
"\n",
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "4e2b5f25",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new SQLDatabaseChain chain...\u001b[0m\n",
|
||||
"What is the average duration of taxi rides that start between midnight and 6am?\n",
|
||||
"SQLQuery:\u001b[32;1m\u001b[1;3mSELECT AVG(UNIX_TIMESTAMP(tpep_dropoff_datetime) - UNIX_TIMESTAMP(tpep_pickup_datetime)) as avg_duration\n",
|
||||
"FROM trips\n",
|
||||
"WHERE HOUR(tpep_pickup_datetime) >= 0 AND HOUR(tpep_pickup_datetime) < 6\u001b[0m\n",
|
||||
"SQLResult: \u001b[33;1m\u001b[1;3m[(987.8122786304605,)]\u001b[0m\n",
|
||||
"Answer:\u001b[32;1m\u001b[1;3mThe average duration of taxi rides that start between midnight and 6am is 987.81 seconds.\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The average duration of taxi rides that start between midnight and 6am is 987.81 seconds.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db_chain.run(\n",
|
||||
" \"What is the average duration of taxi rides that start between midnight and 6am?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e496d5e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### SQL Database Agent example\n",
|
||||
"\n",
|
||||
"This example demonstrates the use of the [SQL Database Agent](/docs/integrations/tools/sql_database) for answering questions over a Databricks database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "9918e86a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import create_sql_agent\n",
|
||||
"from langchain_community.agent_toolkits import SQLDatabaseToolkit\n",
|
||||
"\n",
|
||||
"toolkit = SQLDatabaseToolkit(db=db, llm=llm)\n",
|
||||
"agent = create_sql_agent(llm=llm, toolkit=toolkit, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c484a76e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction: list_tables_sql_db\n",
|
||||
"Action Input: \u001b[0m\n",
|
||||
"Observation: \u001b[38;5;200m\u001b[1;3mtrips\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI should check the schema of the trips table to see if it has the necessary columns for trip distance and duration.\n",
|
||||
"Action: schema_sql_db\n",
|
||||
"Action Input: trips\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m\n",
|
||||
"CREATE TABLE trips (\n",
|
||||
"\ttpep_pickup_datetime TIMESTAMP, \n",
|
||||
"\ttpep_dropoff_datetime TIMESTAMP, \n",
|
||||
"\ttrip_distance FLOAT, \n",
|
||||
"\tfare_amount FLOAT, \n",
|
||||
"\tpickup_zip INT, \n",
|
||||
"\tdropoff_zip INT\n",
|
||||
") USING DELTA\n",
|
||||
"\n",
|
||||
"/*\n",
|
||||
"3 rows from trips table:\n",
|
||||
"tpep_pickup_datetime\ttpep_dropoff_datetime\ttrip_distance\tfare_amount\tpickup_zip\tdropoff_zip\n",
|
||||
"2016-02-14 16:52:13+00:00\t2016-02-14 17:16:04+00:00\t4.94\t19.0\t10282\t10171\n",
|
||||
"2016-02-04 18:44:19+00:00\t2016-02-04 18:46:00+00:00\t0.28\t3.5\t10110\t10110\n",
|
||||
"2016-02-17 17:13:57+00:00\t2016-02-17 17:17:55+00:00\t0.7\t5.0\t10103\t10023\n",
|
||||
"*/\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe trips table has the necessary columns for trip distance and duration. I will write a query to find the longest trip distance and its duration.\n",
|
||||
"Action: query_checker_sql_db\n",
|
||||
"Action Input: SELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1\u001b[0m\n",
|
||||
"Observation: \u001b[31;1m\u001b[1;3mSELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe query is correct. I will now execute it to find the longest trip distance and its duration.\n",
|
||||
"Action: query_sql_db\n",
|
||||
"Action Input: SELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m[(30.6, '0 00:43:31.000000000')]\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI now know the final answer.\n",
|
||||
"Final Answer: The longest trip distance is 30.6 miles and it took 43 minutes and 31 seconds.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The longest trip distance is 30.6 miles and it took 43 minutes and 31 seconds.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What is the longest trip distance and how long did it take?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -528,12 +528,7 @@ def _get_package_version(package_dir: Path) -> str:
|
||||
"Aborting the build."
|
||||
)
|
||||
exit(1)
|
||||
try:
|
||||
# uses uv
|
||||
return pyproject["project"]["version"]
|
||||
except KeyError:
|
||||
# uses poetry
|
||||
return pyproject["tool"]["poetry"]["version"]
|
||||
return pyproject["tool"]["poetry"]["version"]
|
||||
|
||||
|
||||
def _out_file_path(package_name: str) -> Path:
|
||||
|
||||
@@ -1 +1 @@
|
||||
eNrtVmlUFFcWJsGFmBk00XGJW9OikyDVVO8LommgwQ5ikxZUQAarq17TBV2LVdXQwDgKxhg1iZQyJkhGoyytiCjgoKgY0cRxQRj1iIe4xMkYiXNy1IgROaLOa5YRj/6cP5mxzuk6XfXuu/e7937v1pfvyQQcTzL0K5UkLQAOwwX4wK/P93BgiQvwwgflFBAcDFEaZ5kXX+LiyLYghyCwvCEkBGNJGcMCGiNlOEOFZMpDcAcmhMD/rBP0uCm1MUR2G5srpQDPY2mAlxqSc6U4AyPRgtQgjQdOp4QCEkySzmQAabCUY5wAvnfxgJMuTQmWUgwBnPBFGisgKgahSJqEVrzAAYySGuyYkwfBUgFQLEQuuDi4F5WhSz0OgBEwras+I0odDC+IVc9C3Y3hOIAeAY0zBEmnibvSckg2WEIAuxMTQAUESIOeQogVGQCwCOYkM0F57y5xD8ayThLHvOsh6TxDV/YlhAjZLHh+ucKbDQKzpwVxrwWCMJpD4rJhTWmJXKaRy+R73AgvYCTthEVCnBjEU872rB8cuMBieAZ0gvT1Syzv3Vw10IbhxbJYDLfMe8YlxuEOsQzjKI2qduB7zkULJAVET0Tc8+H6Fp+GU8rkCpmu+hnHfDaNi2U9bdj3zGYgcNkIzkAf4la0HGeYDBKIbXdTU3F7qo0Km2NCoy2kJWGJFTUlzVVl8Hi21pZqtfAul9nORlrM6a73c2h+NkcyiFyr1Gl1ehTVIXIZKoMpI2p9hs5BWGZHuMzuKDQyJtwSu0Rn5GJ182z0wki5PUuXoGc5U6SGRuPpeE1SGodRQlSmiiA0RIIuNsklk0U7+Sgnk8CkJ8oUiXGpcxRoVqgEonNlkkQYatQnLnFHxsforQ4NRrgzIq0uR1YCistiw91ah5GLizImpVnTFWzaAHgauRZB+xBqUJUO9V5V/dxwAjpNcIglcrliOwd4Fp4OsKIclkxw8fmlkIeg6YSn75hss8Q8pfDo0kjISbFhASCCJahaEgVsEgWqUMObQa0xqPSS6Nj4yoi+MPEvpGB1PIfRvB3S0NRPeQ/ucNEZgKiIeCHZG7xkh530woeHEgFuluEB0odKrFyIWHvnA2KOrO09WQjDpWE0mdMTVmzoYX1WjjuLwF0E4cjMolB9jkpJ2oALt+/t28JyjDcMBIRQvFiiU6ir+lb6eVcBc0UROYqg8gNuBJ5y4CQpEtaz5943pHixVA2Lvf95AwHOFZoXPaqebqCHB1pwgIKE9cZ+6kal1+sPvdio35USmui1+gPPWvFgIBq5guL3P2/Q52Ibyle6+60RkhDbAuFDqhpVaFDcrleplEoVhioBIVdDetlwvV0D1JimHs49EodevM1kGU5AeIDDiSxki23BFOb2zpgwpVyt1MBMQyUkjTtdBJjnskUy3hz4UAnLASeDEbtxO4JjuAMgvfwTPZGJc42x5oi6hchAIiEWtmeMix6a4WnSbi+fBzjYGLECdzIuAg5LDpRHRCFWY6K4V48Satyu0MltQKuzY3YkHI6hfm//oV2pd9J6MCfEnomLtQ5lmNQA85WGSigsTKeBber5ZuSVe3Ol07555dTktX4+PZcv/D158rG1mr6Ejrjyy+8WNM8PSDZOzX3Nujciz//mCMExLG+a5dzUUScL4v8sufPVmMJRMVHJX76+qvPWpO8OHrw4aLlIDF287QejML9M23LmWMGy+oZc5xpmX9aW7vPt3+/4adPu9WvzKNsq/cwv/ZiU2ytvVh59N8qylz3hG3WBEj8z1bfUcIGGKe9/U3f8w3unSieS72W13Qo6s0ZIbr6yKyBv88Rr0a/63DEcPuvZ+KDJv4C6WuDf0P7bmmP1fsv1JdKj+zuv+17Mo0Zbx06MW9f61b4QiTnc8caha/4rlb6/Kc3LuOabdfetW780PP4ov3PSVtPY/UPPr9oeZNa++1bS3biRQV30o0+OHeicEXWhwar8k+Hh/PLq5gmPmrb8vEnVVcedaDh7tmT48RoxbKrPrL913RsjFrnKa7cOu/GGa9Rl+vf6Oicw3UpTfH7kQn57x5yQC0Gr8zbc3dmlaDSt5W4u2tiYa7abAnc2LdvUVXy/sFjomHX39YBB3y6vqfGfuvr0kZiiY+7aJ0GhLW5V04wDqYF7hn0gu2W0nclVp7zZETJz8YILpeOF1rPC3L/u+rSmqePK4ccFtdcLu4d4G+br83hqVcci2L2XmuKlpnipKX4FmkKN/i9pCo32/1hTKJR2zEZo9Da72qbUqgGmVGJ2XKEiMB1QauzyX4WmACiw/zc1xfGnmuJja+On59ERDe3Th2wZX1hnJHONgjE8f+SE0atNYoZlfeKESzV/oT5LeDJRf1KzY3FZw4M7V5s6cpUdhT7oeufQOaoZH7VW3Du24/CspY+6Sx7Wtkx/FPbw/g/3p9zQah+u6j6/CYF64nKJX0sKs5LcBfXE8F49AaqvjiqmNlK529RvJjh3UrP5DXWl76SAnTOgnqgfd3B8xKthlxpbBvlc27GsxIL8/HVmAbJZHN3Qrq65UjZQTyDTvHqis5Upji4IDFD5/YFddbRoygSz78Uj6y6f7GQ9t+9NGnx7RvgK9vqkOWO2VQSm+JKOExGm7deLu7T+O5s7xiZkvffJg67vk4u+NfB3pixrnH7uevG5gM6o6HUzx57Ouf+FT/e/2s+nmOytRa0fKj7/bnvJNU5/opqcruRC1zcOqpr2x7Dme0m7Csfq3plVHDJykeptTzM57tAGwh6pON29u+gJObzefHG3uMUv4O9DCstcb5sXDd18asLx4qU7flL88yra8eOk2YvH+aVcXvH1mqz0/YE3Ji89Ulc5++KPlSX/+CIoefT0wsGzDp0eHNN8MtWnV0+8Jgua7NUT/wZqjsb/
|
||||
eNrtVntUFNcZhxBTqqbBHGOCmrJdo4Iyy+x7FyTIYxGU5bE8BJXg7Mzd3ZGdmXVm9sGilGBMfCDHodFTtbGtwG6KgiAPH/isJdGqyenx5LGixJhGEa31UU0TPNK76xLx6J/9J61zzs6Ze7/v/r7X73771XgdgOVIhg7dRdI8YDGchwuuvsbLguV2wPHveCjAWxiiMTcnv6DBzpK+WRaet3HxcXGYjZQwNkBjpARnqDiHNA63YHwc/LZZQQCm0cgQFT53pZgCHIeZASeOFy2uFOMMNEXzcCEuAFariAIiTLSMKQfiWJGYZazAL7FzgBWvLIU7FEMAq3/LbOMRBYNQJE36NTmeBRgFBSbMygG4wQPKBkPg7awfAZWgK70WgBEwwP6QiEYLw/FC6+NO78ZwHEBUQOMMQdJmocXsJm2xIgKYrBgPmqGnNAikRGguB8CGYFbSATwPTwltmM1mJXHML49bxjH0rmBkCF9hA0+Km/0xITAPNC905kAnkjPjcitgdmmRVKLUSpRtLoTjMZK2wnQhVgz647EF5D2jBTYML4cgSLBygufh4dbROgwnNOkxPCf/MUiMxS1CE8ZSKkXH6H3WTvMkBQRvau6T5oLCR+bkEqlUom5/DJiroHGhKVCIvY8dBjxbgeAMxBD+iHpwhikngeC7XVaGm8qMVKIm362wVBQVuiVqLl3lLJlXnJeWZTKTejWPpiqcBTSjXJBv06F2vR6RquUqhVKJyrSIVIJKpBIpUpabW0igGUX2ihI3quTs2cYC3fzMZEVWXg6LqzPUWp2S0xcvo+zLFyhINDsr3VnAUvoinCjOIdm0PLTcapkvMeSlYOTCDJnDjmU7C3UyZ4IIemd3kERiSbFWlq4sn5e+HMtaaCgsTAc6uUuzSKZg7Sa1OWsBn6dWagupZFlxySj3UCWKoEEPVahCg/qf1hFuWAFt5i1Cg1Qm+5AFnA3eE7DKA1PG27maRshDcPqEN3hhduQseEThSY1pkJPCoQKLPVaEakXzMVokQ2VK+IqXq+Phzjx9wa7UoJmCp1KwvYDFaM4EaagbobwXt9jpckA0pz6V7If8ZIeV9LsP7yYCXDaGA0jQK2FXMWJ42CmQzLSOhzcLYVgzRpPugFnhUID1TrfLSeB2grA4nBSqdSvkpBHYcVNn8IiNZfxmoEMIxQkNSrm0NSgZ4V0zjBVFpDC10gMuBF5zYCUpEuYz8A62K05oVMJk73tSgYf9heYEryJQDfTwaA0WUJCwftuPYBRarfbg05VGoORQRavWHnhciwOjvZHKKG7fkwpBiB0ot8s1oo2QhOB7Ay7K5HIAUKmUwNUahRLTKgCOawmpwiRXy2SoSqnaD1sfiUMUfzFtDMsjHMBhb+YrBF8shbn8PSZRLlXKVTDSBBFJ41Y7AfLtxjTGHwOXILKxwMpgxO7UdCQVwy0AyQ/wT/CmlWQn6zNTu4uR0URCcmyBhi54aYajSZPJkw9YWBihGbcydgI2SxZ4IJYhuUTo1JhMarlWbsQx3KjACYCkwDY0gvYj7Rr9ndaLWaHvDlzosMgTxfEKhVycIKKwRI0Klinw7/G2xx8rbe4N7Y5aHx4SeMLgb3i41nCs7iw68eCV2b98Ux191RTTN27RvaMTfrHmuZ5od1dZ/c/qKF/dX/cMHpnm9c14RbT8Fnb5jOuyLyeyeun0bhHxXt697Ct7Z1RdWPHDzQVnL5Tezb5xZEXVpwm/3pnk/ofDuGmiY/gDYkXRofe7nVvFi7kdTTNr319y6sxnOwd/OFD8VW0a88rVN8pLT8/e9p1VU/bqtVlxqorv3Qbn5ulj2lcPjQ+5SM7Jz24Y8H6Z0u5Od48NXfxy352I6ptFM9OWhk8Vb40ukse0rCU+Gui5MWHi7DWvJ28cyHRXVodNmRvmmru7/+iqCpTJvBQRHl8T9kJbalbrOv046x++CXnxN0Vxm79wLXu3+k/NkZ3bb36j3dvdcscRNeNBrdqq4btmjv+8sfZXi2am3/tt6Iop38mXFEz6pK+/N/5cFFGPlWy//pf7s8bdNokbNnxovr/s3NWmxS3R4mttX+7eGJ1g6GCrxk+d2DdGQOKHIy8z/d5+67nh9t//POakztB1p7H4rUnbP57TlDS0u/1O3cdHxXPezVh5oY08mfCpN+rsvgPRwlbPrHCd43RVmL84YSHGQY04CX4/mySeTRLPJomf1CQhRf+XJgmZ7P94kjAaAQo0RpXRpFUbpRo4RCgJKQ7kOI5qVJhM9lOYJBSoxqg0/RcniT2PJolawym6D404eG1295zFq1viq0rXTdKN7Rxoj97oibCw06lU4fn9NcbnH3xXU4/HrAw5NIB2yLf6vs1/QZSxPiz8s4yiFvX3CaecfTnevvcGT112DLc7p1Sdr/r3/S5NvxaZlPl1XVLerY7BLrZFfeLv1zN2VtdL97d2M6pr27aB9qg1n5OKzKLKT76QMCcNb8Wutx8+f3nnBd0FjeG13tDD5pCQnOPqbzsnR/WGbl5vbux9DWy4ZHv9uYhjW94Jj5ym27y51yje1FHrWPOgRfv2jBfPi9KmhzVfdA+crOkUVUb+q6+ykvmqob40zRLhjjxm/12W0jf5RErqhFkpDWVnhl9uG9yRpafMp8ckleFTFg5drLv157Jb62J7PeOjGzpijMSV3qPb5dVRx90HXt2w+qXWu0v2VM7dNn7hzr9lZ83bIvSMO1nXtOXmrTlHejZNLqlf23F37PTCVW5D9t6NH8xQT9tTciXp+P0zldf13Q+ihtbmjk1skKycqx8EX5OxS917hif33HCFx16KkXUDXfdHF8M2vOS5DQqjS6+rhrom+rRDYx5OEf+cWpXyJpwi/gNh7aPV
|
||||
@@ -1 +1 @@
|
||||
eNrtWE1v28gZ7qK3XHopemaJngqPREqkvgyhcPxRK15bie1d21kshNHwlTgWyWE4Q1ly6kPT3gsW/QPdOFZhuNldJGi3H+m5h/4B76G/ZV9SVGQjabfudaWDpJl55/143o9H4rPJECLJRfDBFQ8URJQpXMjk2SSCJzFI9esLH5QrnPOH7b3953HEr3/qKhXKRrFIQ14QIQSUF5jwi0OzyFyqivg99CBTc94Vzvj6F091H6SkfZB645OnOhNoKVB6Q3fB84S+pEfCA1zGEiL97NMl3RcOeLjRDxWxBPF5wFFKqgiorzdUFMNs1RFhZklvPNV5wLzYgU6cmpqKnZ1NXKAOhvibc1dIlby87fTnlDFAIxAw4fCgn/yxf8rDJc2BnkcVXKKrAWSQJJcDgJBQjw/hYnor+YKGoccZTc+Lx1IEV3loRI1DePf4Mg2QoHOBSl630YmVVvHhGNENNLNQMQvmFyMiFeWBh3ARj6I/F2F2/rebByFlA1RC8swlF9PLL2/KCJm82KasvXdLJY2Ym7ygkV+xXt3cj+JAcR+SyerDd83lh3Nz5YJZKtS+vKVYjgOWvOhRT8Kfb10GFY0JE6gj+b3xcoaPB0Ffuclzs2T9IQIZYgbhVxd4TcXy2TnmAv71z0leNJ+1t2ZJ/Pf3fnS+hnlJ3hyAs6QZtrYBXa1klGx8a9iVhmVrP9/ev1rNzeynabjWFIxUEYbpzrRqljWs1EiCasaqR2pf7kc0kD3MzfqsDibMjYMBOJer762AN2kFYHhpPFi8BEahkEByN5OrQ7I7bR/SWns1LTcioj4N+GlWDsmbrBROTkcnDosdxx2e+Eb91CrzLsSs9zq/EkYiNYMOEV8mz+1q+WV+MkvGJQZvENMghvnXEYkQG4/7HAHO3vMelsm5bRjGV+8KKDEA7PaJZWSvf9yUiMDHLKa252qser3+9/cLzVSVUaReu+0NphhuemOWfPnVuwK5is8MeTWaSRPuJNc/wUXHtAxq04ptOYzZdoXWzVIVKk433aOWWf0L5pYz1JImMxQRJhsYDiw1Tq6XfDpKG69ZNu1yBSNd1vJpsRd310Qag1zWwgg8QZ3PWY8wylwg04JMJmtHOyvbrdXLPXRyVYgBh99+/cH3Ox3W63T95tg7Nh/vr9aGh9tb1VYlsg7g8IBXH7daEW0/CTo9KQ6j+DAsWTViVsu1aq1uGDYxC0YB25asb3jllrUuN3ds2zg83tjqR+2W1SntPNo16pR93Pef+Gb7gdUblT92gw9XhOeuq634uBR2tz8c9Xfu70QPT7YPWuIgvL9a2ToqlQc7gxWMhiq3WVzWsDY54tvMW4Zgy5C0YcxGadYwy5qTYdAs3B6Py9omzvp24I2Xtb0UTMBP6sMeV9DcEQFc/w4xiIfcae625aZ5uq5Uf3DkrT8Odumj4/ajcIMWjh7cH4mT4cA+egAndSoe3QDBqJeIkeNQMaxaVoVz1/9Pr/50SG5OANKeckQyCYQMeK93sQcRNlByyTwROzjpI7hY3SC7K0fJ67rh2KxnMEZL3bJR6ZL77b0J9bCYhix55ZabesOyyvqy5tNmrYJ9k3HcLy/S4gv6X//ghw5VtKEhGzlIYCkhMqRDsjLqH8vtbfYRs4fjTfBjZ9R2ux/Fh+Ph4xi5TXSPccrkNwpzCi1kcwgFGM4tBajzLXjWe3mS4ECwiFElZi2lTAyUM+gojsza0JHWaOyp9GAsFfidHvoMUYiup7Z7YadaAqdKu7bFUpuuwMtT0uaBAyO9YSyhEk/RlG9z1qY4oLBRglTtnNpTVoceUjH6F8Sed7ake6KPA60rpxtLOhrn0u1gYMiNuRSSf07f2fLeve8OnHPsNrOfRQvE/nfEfrxA6w5oaZviZAHYXQBjNFgAdhfAWgu47gLXlEMXmN0Fs7GIF4DdBTAlHDpeQHYHyH62QOvb0Pp2gHSpRKh/RyCaR/lUR1D8UHWmTzX0Ri39ezRz9+2uiVgqoag330HBW1c7DijKvewZZ/Zcwnkri5dp7HAx3zh7j5WbCqZZwWj+iw7cyB5woqEwAoezWx4b6T+7NAn/4fjs7G1yP1lr76x/eu/eN5t/1lQ=
|
||||
eNrtWE1vG8cZbtCbgV5z3ix6KjjkLrn8FIhCJu1KpiXKohKLMgxiODvLHWt3ZrszS5ESdKgToMdii/6BJLIYCIqTwEaSfrjnHvoHlEOv/Rt9d7mSKEhtgRwD6kBx3nk/n/dLo5ezMQ0lE/y9c8YVDTFRcJDxy1lIfxtRqT459alyhX2y1e3tfB6F7OJXrlKBbBQKOGB5EVCOWZ4IvzA2C8TFqgDfA4+mak6Gwp5e/P5I96mUeESl3tCeHelEgCmu4KC71POEntP0UHg0IUSShvrxc6D4wqZeQhoFClkC+YyzhFOqkGIfLlQY0avzQASpSaAf6YwTL7LpIEqMZpzHxzOXYhui/cOJK6SKX9/0/ytMCAVLlBNhMz6KvxwdsiCn2dTxsKJn4DSnKTrx2T6lAcIeG9PTuVT8NQ4CjxGc3BdeSMHPsyCRmgb09vVZEicC77iK33bBidX1wtYUgOaamS/X8+WvJ0gqzLgHyCEPgz+nQXr/18WLAJN9UIKyJManc+HXizxCxq82MOn2bqjEIXHjVzj0K9abRXoYccV8Gs9aW7fNZZfX5kp508xXv7mhWE45iV852JP0uxvCVIVTRAToiD81Xl/i41E+Um78uVkqfxFSGUAO6cenIKYi+fIEckH/+Y9ZVj+fdTuXSfzXz94/aUNe4nc7bpTTjLr2CHOtaBTL8NEoVRtGVfvNxs55KzOzk6ThQlN0ogp0nFDmdbOiQdGGkqpmpBxU+2YnxFw6kJsHl3UwI27E96l91rqzAt4lFQDhJfFAESM6CYSkKHMzPt9F2/NOQuvtN/NyQyIcYc4O03KI36WlcHA4ObBJZNvu+MA36odWiQ1pRJy3mUgQisQMOIR8CWiVrdfZzWUyziB4A5kGMsy/TFAI2HjMZwBw+pm1s4xPyoZhfH+bQYl9Co0/s4z05++LHCH1IYuJ7Ws1Vr1e/9vdTJeqSsBSr930BlJMF70xi778/jZDpuIzQ55PLrkRs+OLX8Jh4BSLtl21bFyt0KIJ+BeJ4xiOUylVLbNcH/4ZcssIaEmSGYgQkk0JzC41jS9yPp4kjdcsmeVSBSJd0bJx0YuGbZHEIFe0IKSewPZXrYeohYlLUS8tyHjW7m+ubqy3znrgZEuIfUb/+MN7Px8MiDMY+s1NeuhVzfKHT8f2w1310bTjrz1qq/JDN+wfblW6q/k8Czfue9teXiCzWqpY5bJRrCIzb+TNvIn6u9Ng2n8ixnUnKm+0ui2/WnRfPNmrjPoBvV+UIVvb9Iaj8VDtcY7tLn8kt54G651Do/9gyzI7D0YHFtmw9nqiUib8Sb3TGewOVyEarNxmYUWD2mSAbzNrGQQtg5KGMaFbsoZZ0ewUg2b+5nhc0dZg7He5N13RegmYFH5jn/aYos1NwenFnwCDaMzsJmnLzofYlx5pd1tBpz/5aPp4c69eHI/yT4u7lf3t/Q7bo4Pt8OnGAghGtYyMDIeKYdXSKrx2/Ud69e0uWpwAqDvfEvGMC8mZ45z2aAgNFJ8RT0Q2TPqQnkLOt1f78dua41RL9aJdw2a1bpQr6H63N8MeFNOYxG/cUlNvWFZJX9F83KxVoG/Sdfe706T4+OiHX/zbxgqn68jWG3qyGwlsRrTK1Z5Vn2yY/s5at7/bd7lv1LaCcfg4aD/Wc7oYvoApk0nkr7dpPp1DwEBgbikKOq/Aq+Qut+XiskQwECxkQHnVQEpOpaL+wAHXaBiAh4kJJ4BeInaxVqlbOFHtCkaSNf0sWaI2negNI6eDZoX1xlG2pHUMcwgGNGjIXe9yHQ4hdWDlghs88rzjnO6JEcytoZwTcjoYZ9IdgP+wAjOu5zk9W9Pp8d69nxxq1xCtpX/tLIG5BcwHS1Bug6KtiYMlLnfgQjBf4nIHLutLVO5AZb6tltDcAc1UREtc7sBFCRtPl8jcRubXS1AyUP4/DrpUItB/WkhcB3OkQ+x+oAbzl7reqCVvgUuvrqj1nK6Ewt4VwazmbkoObKow82RaaMlT277iBbhxZDNxTTi+w8iigjn2EMz/0AGE9H92YAhe+TYjNxw2kldMAvV/uT4+vkrhs3Z388Hze/f+A8TTXFg=
|
||||
@@ -1 +1 @@
|
||||
eNrtWktz28Ydb5qbTz31jKDtpUNA4Puh4WQoyZYlWaJiypHdxMNZLhbESgAWxi74kEaHuv0C6PQLNFbEjkZxknGmjdO45x76BeRDP0v/C5AiFbmNkZmcAh8o7uL//O3/BXOfTgYk4JR571xQT5AAYQELHj2dBORJSLj445lLhM3M0912Z+9ZGNDL39pC+LyxtIR8qjOfeIjqmLlLg/wStpFYgu++Q2Ixpz1mjl///DfHqks4R33C1cZHxypmoMoTakPdI46juERBygE7JGpODZhDYD/kJFBPHudUl5nEgY2+L7QS01zqUUlFuA/iSddigYtA0LEqxr5kPODM63JsExcB3eIKaObfTMJxQH1pJDBtgm5FMEVIa6RqHXj9AHwLBJU2AysRoX+TM4YIvkpuIFFCf+6KoCL2pROz5mYWchFQr6+egIbQw7ZDPXJTLvL4kARSKrjKnAGYZ5PvCt694r8h/ERi9CSkATEB8an1ixofz8VsTqUmEljvgGABa2SaVFqDnN0FJCzkcALSPeQu8Eq1wNQQQUikblgT5Cbr2arLYudiNKmHndAk3VDGxIxtYhNkQjD+52e/OLUZF9Hz6wH2OcKYQBgQDzMTvIw+6x9RP6eYxHKQIOcQVR6Jwzc6PyTE15BDB+Qs4Yq+QL7vUIzk8yUZFxfTKNSk4zcfn8s40MA8T0QvW3zs4TZY0tpY2h1DOnhKXq/k9fwXI40LRD0HwltzEBh15sfP/7H4wEf4ECRp01SLzhLm54s0jEefbiPc7lwTiQJsR5+iwK2UXizuB6EnqEuiyeruTXXTh3N1RT1f0GtfXhMsPYo+i/804k/KXi4S2MSBQ9eS5I++7RGBdJne+kJ668nJ/v2aciKCsYYZ2BD9xXg+A9khXl/Y0bNyqf7XWfb+4QzYRMifnsKBkn//azItEp+0t+ax8MvTNTjc6NU+MXOKUVbukJ5SMApl+GiUK41SVVnf3rtYnarZk2d5CYk8EktkIHcSE5cVsD2ANGiGwtJqX+4FkGEW+Hd7FkwTbIfeITHPV98YRq9kGIF70h+oURoZ+YwTbWpmdPFQu5/US21j7UUSsxoL+sijR3FMRa/ieBoejYYmDk3THgxdo35UKtIeCbH11ZQFio5UAwZpLo+eVY368+mT2WGeg/OGljc0I//NSAsAG4e6FACOP6dFm0enZcMwvr5JICBhobxPSkb875+LFAEUR6ivoHsuplSv1799M9FMVBFI6tX6N9epAOsFMfmCy7++STAV8YnBL0Yzao2a0eWvYdEtVyrVWr1XqALG1ULP6lWtnkVwr0CKFaNoVV8mZUcT8jB9FsBhEwwdSoyjy5yLRjJ7m8V8GagNY1mZFp1O2Ftj0ge+rPgBcRgyP8eWhhE0By0JyGiy9mintb2xeg61W1tl7JCSP71+591uF1vdntu07naQvrJl7m1VyuPdD+/dGRyMxmzYfRTuj93tQs0/qFr7dw9b1Y0PtHy1WAMvDKOq5XVDh7TXcP1JpT00vNtVZ9UYhit0xNuP+vbDjYebv/PWibU58jbcJw/WwnIhWHf0tRX04LBTDYatUT3o3tt3xoPtVn1lrby9Pqp96G95R8MPSm4LvEHCbi4tKxCbUPh5c5oyGqSMJhMm3yjMEmZZMWMMmvr1Grus3IXm3vac8bLSkWAS+AulvkMFae4wj1z+GTAIB9Rsruq2l39QWa1zr1cbuX5xZ2d/H9+l44PyPUP3Udja2lm5XdDXj7YXQMgXy5oxxaFilGpxFM5N/4FW/e2htlgBtHbSaqKJx7hHLeusQwJIoOgcOyw0oV0E5Gz1jna/9Sj6qm6YZWzl62WzYJXMPNZW2p0JciCYBjh6YRebaqNUKqrLiouatQrkTTzU/P4sabWvf9U1kUANBZoatFpVlkgMBVJrjfoH4cpoY8Me992Ng53d+9YQ+/jI3XyACtAyp3024Vgoqnpch4AAQ90Ssn1fgVd54zikQUEoaTK8arITg6MUky506wDooDei0JHtnI+5IG7XAptJ4IPpUrfld6sFYlZRr1zCUqfNgDkZ0qhnkpHaMHIgxBHx1DQdzhAUKEgUT4qdj3LxWGZBRwf7vNBxYAxwWB8KWo8nGzkVlFNud8ExLqecmAoGkekUEC9v3frpwDnH7vhjVc3wenu8knk2Q+ztEftYbWQxlgqxfXuc4ZUCL8UEbzPAUgAGr/QZYGkA4xgGtwyyFJDhgA0zwNLE2JB6GWBpAEMZXunwGqIga5RpIHs/QyvVpJ/LJv1UiGVvkmnQkj8hZYClACz+vS0DLPu/ih8PsRWCUcizKEs1iGVv3ynfjBDPAEsDGAuF/JnEnF73yIB7W+CyV/CUlYxmmZkKMIsSJ3sHTwPZe9lElgqvkwyt70Pr+wFSuWC++hOBaO7lsbx76vqim1yPUhu1mrxoMbP3artQzamCCeRc7eTz5dx15q5JBKJOfOsyvuJkXhHDUaDQpGy+cfIGNYsCknMBf/6PDNiIL1yCIj8gJsXXTDbkJRF5DP/j8cnJ1fF+tNbeuf341q3/ArgMR1M=
|
||||
eNrtWk1z28YZbtqbTz31jGJ66hAUQIIUSQ2nQ1ORbDkSJVGJJTsezmqxENYEsDB2wQ9pdKjbU2/o9A80VsSORnGScaaN07jnHvoH7EN/S94FQYks0TaTYwY6kNzdd9+PZ9+vFfB8MiAhp8x/75r6goQICxjw+PkkJM8iwsXvLz0iHGZd7Ha6By+ikL79tSNEwBsrKyigRRYQH9EiZt7KwFjBDhIr8DtwScLm4phZ43c/7Z6pHuEcnRCuNpTHZypmIMsXMFAPiOsqHlGQ8pT1iVpQ1JC5RK5EnITq+ROY8ZhFXDl1EgjNZJpHfZpQEh6AFNKzWeghye5MFeMg2f2UM7/HsUM8JEnnh5Js7qdFOA5pIBWWG7dADUUwRUjFpA5FuT8IwdJQ0MQC2E5EFGTtTjCD35IDEClRMGeZoGJqWjfZLmdSbbkIqX+inktJkY8dl/okiz3y+ZCEkjnYztwBaOqQZf67NzwyZJwnyD2LaEgseRypMQuin8xz25qxTxmx46cECzmDLItK1ZC7O4+PjVxOpBgfeQscpApYnpMII5IoAjMEeenMzbjHEounWFMfu5FFepF0oJu9E4cgC1z33z/5+YXDuIhfLrrj5whjAu5CfMwssDv+7OSUBgXFIraLBLkCF/RJ4uzxVZ+QQEMuHZDL6a74CxQELsVIrq9I37lOXVaTICwvX0lH0UA/X8SvW3zs4w5o0rq/sjuG4PEVo1ipFytfjDQuEPVdCAbNRaDUZZCs/31+IUC4D5y0NDDjy+nml/M0jMefbiPc6S6wRCF24k9R6FXNV/PzYeQL6pF40t5dFpcu3oorFw2juPrlAmNpUfxZ8tVIPil7PU/gEBeOX5umivjbYyJQUSaD4lwyKE7P9m8LwokIxxpmoEP8Z/3lDGSX+CfCiV9U6pW/zIL8d5ewTUT8+QUcKPnXPydpSvmk8+DWF35xsQ6HG785cKKCoteVLeQrJb1UgY9GebWh15TN7YPrdirmQJ7lW4j0kVghAzkzVXFNAd1DCItmJGyt9uVBCHFng33vz5xpgp3I7xPrqp3pRm+kG4F50h5IZxoZBYwTLVUzvj7U9qfZVbu//mrqsxoLT5BPTxOfit8k/jQ8HQ0tHFmWMxh6ev3ULNNjEmH7q3QLJCUpBhTSPB6/KJWNl+nK7DCvwHhdM3RNN74ZaSFg41KPAsDJZ5rieXxR0XX962UCAXELxWBi6snfP+YpQsifkIdB9i0bs16vf5tNNGNVBpL6av2bRSrAeo6NUfL418sEKYtPdH49mlFr1Irf/goGvZJhmTrGJWwghEplU1/Vy6vExAgZq2ULV19Pk48m5GEGLITDJhjqmRjHbwseGsnobZaNSrkKlq4padbpRsfrTNrA15QgJC5D1uftDa2NoH5o3cQh48n60U5r+377CtK61masT8kf3733s14P271jr/mMbT3ceWDX9PYWOr1LVksbe6xu7o3Mjeqwfuh27f2SMToqD1vhngaqVs1KRS/VNKOoF42ioT2q7fHRPUT65PTho3VkfETK9vr+Q4vaQ7HR2beKg5aoeofHoh+iUr10tHnYLYUP9gy7dHRsDLro3iNjR3/aPt3uf3BYRJsla5d99GwI1iDhNFfWFPBNqAS8mYaMBiGjyYAxGvosYNYUK8GgWVzMsWvKPWgFOr47XlO6EkwC35Dyu1SQ5g7zyds/AQbRgFpNapFKtCFabm209YG56zJ7v91iRx92quG2LsJwf8fZbFlbm50DNgeCUa1reopDVTdriRfeqv4DtfrroTafAbTOtNjEE59xn9r2ZZeEEEDxFXZZZEG5CMklnPl+6yj+qmbbq+V6uWzbVrmKcEW72+lOkAvONMDxK6fcVBumWVbXFA81a1WIm6QF+u3ltPi+U/5gIYGSqga1V5UpEkOC1Fq+eFQ97Rs1kwUty6ivf2iQ+xVW75+Ot3fVwqzkTnfMJdVikoeAAEPeErKe34JXmPVN822TBgnB1PRVzajBLj7mgng9G1QjYQAaShF20LNL2CrVqnUTSdYOo1jW9ceyFltkpDb0AjQmroD+6Sxt11QEeQgSNHAo3LZ3KgxCYkPlBjX8yHXPC6rLTiBvHfPpREEF4ZQ7PdCfyx4noXpSUNNqnwzv3PnRoXYL0dnHqprDsgTLtCnNgVkC5mO1kXtMFjAPnXEOyzIsigVG5bgs4wI35xyXDFw4hoYnR2YZGRyyYY5LhscMqZ/jkoELymHJhGWIwrwkZSDzmxyUrH63kPe7WcDkt6MMUOQTjByXZVySpzo5Lvlt+vsCc5dgFPHcZ7IamPzimH0NQDzHJQMXFgn5n3Fr+oQ/x+c/8clvj9lZhubhlIWLTYmbXx8zkPll3slkwXKeg5KC8v9xULlggfrjQuLWmDP5Gp8XiN70TRK1UavJh9UztW6mS9WCKphA7s2MYZiFxc09iwhEXZ74mnwbxLohBsRRZFF2O3GeIWaewRR+sOd/8ICJ5N00EBSExKJ4QWVdPmiXaP+X5fPzm1N8vN7Zef/JnTvfAT6lHnY=
|
||||
@@ -1 +1 @@
|
||||
eNrtVU9v3EQUV4/c+AiutxKXzNpjr+3dlRBKWrUpTUkKgfCnaPU887Lrrj1jZsZpNiEHAhJctx+hiRIUVdALR245cOALhE/DzGY3CALiwKUSPdjWvHnz5r3fe7+fD093UOlCihsvCmFQATN2oZ8dnir8okFtvjmp0IwkP95Y/2DzqFHFxcbImFr3gyBHriQbE9UIU1TYbjRB0IbQNlSwJwU81W0mq6CSHMsAhBkpWReszUpoOJKdKCjEjhzjcS755OLLff/KZTBPyu97/uKWKIxiEiYkpv6S51eoNQxRW4/P9n0lS3S+jUbldpm0tQjjTJtYll6FHnhP7FX+wefuMOwOjF0Jd5yGUefgdITA7Z3fniwzhrWZnr8JdV0WDBwcwRMtxYvbl0HJ5qTGv9k/+9BeTpaH1mV6/mxFGhkHtB2nbRp7FQ+cgUmFrYWtgSBqh57UQQVM6lYUt1O7tq6g2KgFqko7XgliGNQT2wDRits0anedQz0pqrps3d6YbXhsexgoNGpCHNKtEofAJt7iwkUSZx+T5WqP3AHjsrdoJmEUJpv2SZPepz+B3dN8TFxLLqsiBZ+e30qjvMtitk26OaWkk3YY6fXyjAANgQIkEKfbPy4Oz2dmev4GGINVbd6mPyxgW0MxNKPpEY063yvUtZ0y/PpEGzCNPjxmNvNffzmdt/X5+oNFR767uDlLXJCV+SDc/yPDNVuMYJPpUS+lx7PKft5CvuSFiXcXc88VaV/9JOuHoXfv4ea1WOuNqRvbUjcM5La0gzx9HmXXco7D9MzaBM7YMT0bI9YEymIHL7xr2V0LSOOXuwSc0/uX+Nzn04tbYZpF0LVIctpNSCfKYtIDYATCBLpJTHudlP955l7+deRmxPnKYqgKMfztRrTvF9zv+5UeDnKuxoOQpqvvPVp9KuTK+JO1ZDt/1L3X7O1aAvnGBnSul3hbwyWFfNC6sD2x3FnyZ7y1tjlf7bRa4xW1LO3mQQzuOvfZp++vosK3tOWbLspyMmNd33ssHoutERiPS28iG4+BZSXY1dCrYFiwAsQ73rId99wqEHD34VLddHT1tZH1QFllcXrgo+AD0yjhzze0g1Qwm4doynLJSoCrp2+hcI24ojmNl3w5a/aVKcoODl7L3P9H5rIw/GeZyyFCyhIgeZZHpJOnnEDUoyRmSZhsdzLOu/RVkLn4X2SOvmoyB0jRYhESlqH9f8S8Q/JeRkkCFDEOwxCz6D/L3GoPa3V3vDfZqfDBR+8+3E3k1p3ktcwtZO535liL9g==
|
||||
eNrtVMtuHEUUVZbs+IRKTyQ2U9PV7+mRUOQYCCEOdmBQIhM0qqm+/ch0V7W7qm13zCwwSLCdfEJs2ciKIBuW7LxgwQ+Yr6F6PGOEJkEIKdmETbfq3ltVt8655xye7kIlM8GvPc+4gooypRfy6eFpBTs1SPXdSQEqFdHx1ubnw6O6yi62UqVKOTDNMUSVYBNc1VxlBfRqiYFKha0eLegTweme7DFRmIWIIDcpV2klyoz1WE7rCPCubWZ8V0zgeCyi5uLrA+OqZLRoyhggY3mLTWwHEw87ltFFRgFS0gSkrvjywKhEDm1tLaFqs0zot3DVhoaQ56gARNFjfZUx/ardTPdHSq94u90itjs9TYFG+s7vT9YYg1LNzt+lZZlnjLZwmI+l4M/XLw/Fw6aEl+TPvtCX47VEl8zOn94SSjim1XO8nmujIjLbABMVdJaxmpp2jyAhzYIyITu20/P1WpfSiqUdWhW+i3LKE7NsNAG84/Qsqxe0BWWTFWXeWd+aJxCLE7MCVTW4RbqTQ0JZg5YXLps4e4jXiif4A6ra7jWaHrFIOLSJE1j97V+ozslogltKLl+Fs2h2fsMH4lq0H2M7cmLsuhHBNLYiHDDL9hjzfeZYPy83L2Zmdv4OVQqKUr1v/bSEbQN4otLZkWW7P1YgSz1l8O2JVFTV8vCY6c5//+10QeuzzbtLRn5YOcAh/sX1+WM4vrUYjs1albWmpiUVrws9kLNndvB3yl6sMKbTHOYDPzubAJSY5tkurBx+5y9INjR6nDWzo75NLtBK3UoPlvNiH9O26LNLaO5Es4sbtuv6fgw+jkPX05jaFIc+AzwmfY9SxwVmu8dzon4dpnUXkRB9QjlqOdOfgRMMrBDdvjecC+cbjWGV8eSPa/aBkUXGwChkMhpH1WRELNfasR5s3+/TDz9q1lL+6c7kfrpdGl1DaUTa0ku8deBSQgaVMtOcaO10jbludWyhVz2tOnglLS27xSEK9tvy+W9gfAwVvCe13mSW581cdQP0iD/iD1KqUCRQI2rEqFYl1asEFTTJWEb5TbSmx32sHYhG7S8S1fVWroZUohxV2llaPzCARyNVV9xYJGSLK2e6D17neVdbQPuegYaiZeNK5pbTNcR8SK5CdjCd/m9zb5PNha+2OS+I7D71CWZ9om1uTD08ZiTAzBuHACFxA4jftM05/87mvNdpcyT87zbngQ9uTFwMgcM0pi7DWtweDpyQeTEJ+v3Y+Webs8lLbM5ctTnbHW49bBI74+HYKne3NyaJ21dv3OZe4XJM7KG9TKWIC6SnVt5EtyuNW4TGAPFr9jhvOv0TTIuLpA==
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
eNrtVs1u20YQbq9GD730rhI9FVqJlKhfwyhs2a4Vx5b/EP8UgbBaDkVaJJfmLmXJhg9N+wJ8hCaOFBiukyBBm/6k5x76Au6hD9En6FCSYRk24geodCDE3dmZb775ZodP+m0IhM29j89tT0JAmcQXET3pB3AQgpDf91yQFjdO12qbW8/CwL780pLSF+V0mvp2ivvgUTvFuJtua2lmUZnG/74DAzenDW50L7eOFReEoE0QSvmbY4VxjORJpaxY4DhcSSoBdwBfQwGBcvI4qbjcAAcXmr4kOieu7dloJWQA1FXKMgjhpG8BNRD6Px99empxIaOLm3BeUsYAj4PHuGF7zejH5pHtJxMGmA6VcIYgPBgkG521AHxCHbsNveGp6BX1fcdmNN5P7wvunY9AE9n14fb2WQydYIaejN7WEMRsNb3WRd68hJbKayntVYcISW3PQSKIQxFPzx/s/za+4VPWQidkVJOoNzx8MW7DRfR8hbLa5g2XNGBW9JwGbl5/M74ehJ60XYj6lbXb4Uab1+GyKS2TKr6+4Vh0PRY9N6kj4Ocbh0EGXcI4+oh+UHuM85YN0eW/9Toz6w13puvsa3tblWJ7Z2W5UM0H+jbsbNuFvWo1oLUDr24KvhOEO35GLxKtkC0WiiVVzREtpaYwZbKw6GSr+oJYWs3l1J39xeVmUKvq9czq+oZaouxR0z1wtdoD3exkH1new1nuWAtyOdzP+I2Vh53m6txqsHa4sl3l2/5cJb+8m8m2Vluz0wlEF7ZtY2ajJpa0owUpm61dZ2HP26Dr+7V1f5Gmdh/Mdfhhu5XbfQCHJcrXx+CppQxRRwjzql5U49/FlTYc8JrSip4W9RcBCB8bAL7rIWMyFE9OUYbw15/9USc8rS1fK/iz03mUZPR+G4xkQs0lFqGRyKiZHD7KuXxZzyW+Xtk6r4yibMUKvExI6Mg0tOOVYWNMJ7D9AgFyJpQmKb7eCqgnTJTlwlUL9JkVei0wzip3iv99LH6sbJwOdiSBjs8FkBHM6HyHbAzvBFKdfzPsNMKDJvXso0EnRO8HXXB41Dk0WGgYVvvQVUtHetZuQMjMt6MjfsDjMAiIuCJ6lteyF6OdKx2eYfIq0VSiar92SIDcOLZrI7+D5+hiEtFpDsl/d9tA8hbgFdbXB9VR/xi3CMBFAcexr93opVLp97uNrlxl0aRUvIkGSwzjaLSMK97dNhi5eKqK886VNbGN6PILfKmbmp4paEUwMoWCqheYWco3IKfpJWpmsmYRfsHa2gy9xMX0eYDFBoa3sOxGl0mXduI7Zyar5bJ5zHQ6YXvMCQ3YDBvzPM5BTCf8ABxOjZfMJIwyC8hQkFF/fnd1dqVa+WmHjCuL1PzhBOh7XHi2afY2IcDCRGfM4aGBl2cAvcoi2Zjdjd6WVCPHTC2DCtGzar5B5mqbfeogyDaL3ljZGaWs61llOuHSmWIe6zEYCN/24qS85t+fvDCopOXEsWIbeNvH04Ph7CCznea+PJBNL6ev+0eudrTr8xKbLxZae9VNHAS8sY/qHZ1IXc+b1EDfaMCwHySgz+vOvXOoEBSaTtQC0YrxfMFEbQZ1aeMYKis4KWjoyHijKyS4dRMxQ+Aj9Di26dcLGTAKtJHTWRzT4nh4OOFsz4COUlaT6MSRVCkfX404isLHAnix2+s5GI9AMENBEZ8XOs5JUnF4ExulIYYLSQWD28KqY2I4bkZWj0+mpv4/DF7TtTT4bJiQ9EGSPp8Q9GGCEkv8cMLRPRwx6k04uoej6oShexgaTr0JTffQ1OXhhKN7OJLcoN0JSx9m6asJQXcQdD8nipDcV8ZY+Wa+trrweGrqP/aerWg=
|
||||
eNrtVs1u20YQbq/upZfeWaKnQiuTEvVDGUZhS05tq7Ic24ktp4GwWi5F2uQuy13KkgwfmvYFeOgDNHGkwnCdBAna9Cc999AXcA99iD5Bh5IMy3CA3lvpIGh3Z2a/+eabHT0admgoXM7ePXeZpCEmEhYifjQM6RcRFfLrgU+lw63Tzfr2zpModC8/dqQMRGl+HgdumgeUYTdNuD/f0eeJg+U8/A48Ogpz2uJW7/LzY9WnQuA2FWpJeXCsEg5XMQkL1aGex9WUoobco8lGJGionjyEHZ9b1Eu22oFEBke+y9zEUsiQYh8OZBjRk6FDsQUp/PXO+6cOFzK+uAnrGSaEQgDKCLdc1o6/b/fdIKVY1PawpGeAhdFR0vHZIaUBwp7boYOxV/wcB4HnEpyczx8Izs4n2JHsBfT28VkCH0GmTMav6gBiaW1+swf8MUVP58x07nkXCYld5gEhyMOAZxCMzn+ZPggwOYQgaFKbeDB2vpi24SJ+WsOkvn0jJA6JEz/FoZ83Xk7vhxGTrk/jYXnz9nWTw+vrsmldTxde3AgseozET23sCfrjDWcqwx4iHGLE32oDwvmhS+PLv5tNYjdb/uIG7XsFPXdvt2Pd2ZP3e1V/db0ic3ecsNHfzNeX0mk3rC17W16aI72QzRu5nJYpID2tpfW0jhp7vaDXuMs7ph3lauV62S9knIO7+/l2I6DLGRG6qxteq91pyX3GsFVn62JzN1ir9rXGyqahV1faRwapGfvbPJ8j7K5ZrTb3WksLCqCLOq61SCqieg/7wiOVejmoNrr3e59t7JuZTju9m9nLH24dVt192twKd2tT8LRCDmkThHnNKGrJ5+JKGx5lbenEj83MdyEVATQC/WoAjMlIPDoFGdI/fh9OOuJxvXqt4A9OKyDJ+M2OE6UUzVTWMVMyWiYHX6VsoaQVlE9rO+flyS07iQIvFUm7cp52kp1xZywo0IahoHIxkjYqvtgJMRM2yHLlqgWGxInYIbXOym8V/5tE/FDZJB1oS0S7ARcUTWDG53toa/w2oLXKy3GnIR62MXP7o06I34y64KjfPbJIZFlO58jXzL6RdVs0IvariUsQ8uQaAIR8ET/RC4WLycmVDs8geQ3pGtL0n7soBG4813eB39H35IES8WkOyH9920DyQwpP2dAYVUf7bdoipD4IOLn7Ooxhmuavbze6CpUFE7N4Ew2UmE6j0TO+eH3bYBLisSbOu1fWyLXiy49g0TSLhZZhmPmMlSsYtJhtFfMYG5m83crrOingn6C2LoEoSTEDHkKxKYHXWPbiy5SPu8mbs5jVc9k8ZLqguIx4kUW3o1aFJzmIBSUIqcex9ax8B5UxcSjaHgkyHlYaG0u1tfIPe2haWagejCfBkHHBXNsebNMQChOfEY9HFjyeIR1ArK2lRvyqaNuFrJnVzKJlmlouj5br20PsAcgOiV862UW1ZBhZdUHx8WIxD/UYDYYvB0lSrP3ne99YWOKScqy6llpSkylCYIagJSb3c5ly8YDZtGHUBPP31o+427eMGl5WUypvHYB6Jx7p67mTHukbDAj0g6QQ87pzU1dzZXqsIBCagTR4dYrgJXpCUr9pAzQaBoAwucIOmnaGWJli3jRwEtrhLkkGGswzl1m0q5a0lAqRJVZLx5NxpmLQNzQ+REhdTz0VFiG1I4EBBos87ySlerwN/dAS442UCpe7wmkCfpgqE6uHJ3Nz/zmirllZHf0VmHGROH4442HkqKzyoxkVYyoIZjMqxlSszYgYEzGeLzM2xmz0eDSjYkyF5BbuzcgYOX7yf+bh31NXheSBOpX8g0p9Y+Xh3Nw/AhlHvQ==
|
||||
@@ -1 +1 @@
|
||||
eNptU39oG1Ucz5g/NqeuTFFEZPGcyrQvubRJf4QJdklrZ9cfayNbJ7O8vHvJ3XJ573r3LjbtZm2nICvIHgiDwQbaNKlZ3VLdrNP4g3VCtWX/+IuhiOCcSnX+IaIrjvqaJrWlO7jj3ff35/v5vMFMApuWRsmaMY0wbELExI/FBzMm7raxxV5KxzFTqZJqa+0IDdumdulhlTHD8rvd0NBckDDVpIaGXIjG3QmPO44tC0axlQpTJXlJ6ZPisKeL0RgmluT3yBXecqkUIvmf7ZNMqmPJL9kWNqVyCVExBGHCoGJdp9LBfSKcKlgXFqRDW8GgEqhQi9mgQtSSK+Vq6WBGxVARKL53lKVUajE+vmqy0xAhbDCACaKKRqL8rWivZpQ7FRzRIcNZ0ZfgAnSejWFsAKhrCZxezOI5aBi6huCC373fomSsOCdgSQOvdmcX0AABkTA+UVeaw92WFIskTtnl9bkqcj3AYlAjulgG0KEYKW0U/B8sdxgQxUQdUCSJpxeTTy2PoRYfaYaotWNFSWgilY9AM17lfWe53bQJ0+KYZwJtq9sVnf+3q3R5Klw14ysKW0mC+EgE6hYeX1ryUkpW8FIJ5CogeyZWlMbMTAJERQf+unyqtEAdkyhT+bBHlkdNbBlCe/hQWqQx2xpMCbLwzFSmKJg3WptKVA+lgoI2/uFurJQ7ZZ+zAYedorFPfPy+Kr/X63yqOTQWKDYJ3ZCl8ZAJiRURTNWXVJFBqk1iWMkGbqiHbPFOAE3heXHukj3V1buNnb5wc4tqqI17O3e172nE6vPDCQ3yrMflcUYpjer4NIoABJGKwSIyngl2ttQ17wiM7QHtNEyZBUIwylOEEpzuwKZYJs8indqKkKeJ04EG0F7Xyc/UyooPRWQIw0ptTQTVgu2C9RLKJRSpBW0X7t+A2KQpTBf+2Ty0zlF41op3fl55ta5p8smyl+c/9/98R+rNL9++l549MfIox989cOzI7zuPHa9vmuo/GtpwOHPrH30H8uvfX3dny7VfZr+abri29erc3+9u/HbyyoFXPMGbN/Tep3buutizJSc/vfe2mXPTh/N6y6atA4eO5ILZr58Z2teVTZOrs/Z1xz2DuU2Pv/fNr5HQXH7uqJHLvHb54m8/Td6k/Kl/dP2K9OloML/t/L+RB0/e/sKWmfM/vFjTCx6a3DjFXBcGPune8dn2c88l7mLb7v7rsSe6b5k4WyafuTzZOHo8Nz89e//m9T/2P1LfX30ycuLjAry1ji/M0P6mNQ7Hf3JxBh0=
|
||||
eNptU39oG1UczxyDFRHmHE7UaYjzdy+5a9okjbNY06lb1zVNg7jKOl7v3uXOXN47770rzdoKdp3+Edj6/hjFMQRtenEh6xYrVNjmP60gstI5pG4yREE6EHQIQ9bhiC9ZUlu6++O4+37e98fn8/m+kVw/tIiO0YaCjii0gEz5D2EjOQu+b0NCR50UpBpWstHO7viEbenXntUoNUnY5wOm7gWIahY2ddkr45SvX/KlICEgAUm2Dyvpa8agJwUGDlGchIh4wm5JbGisd3tqh3jk3UGPhQ3Ivzw2gZaHozLmkyBaDmnQMLBn+GA5ByvQKMdkA9gKFPyCBvSkLTTwkqJfDHqGcxoECmfzi2tLVsOEsuK6Cc8CWYYmFSCSsaKjBDuTOKyb9W4FqgagMM9bI1iRgOWTEJoCMPR+6NzLYueAaRq6DMq47z2CUaE6qkDTJlwP58uMBE4UUTbTWpvDF01zQZFb9DaKXvHcgEAo0JHBJREMwEdyzAp+fjVgAjnJ6whVs5hzL3lq9RlM2GQHkDu715QElqyxSWClAo3Tq+OWjaiegiwXia5vVwX/b+f3SpI3WFxTmKSRzCZVYBBYXBF5JSXPffELYkAQpZk1pSG10oKMeQf2mThVE9CAKEE1NiGJoS8sSEy+g/CIw9OoTUay3Cx46btcdW0+72yvWZ3JtnHb2MW4Zte7xWb3XoDcvHETf4X9wbAYcL/ZES9Eqk3i93WpGLcAIip3andtK3KyZqMkVPKR++5Dvno3BF1hF/j3IVHq6oklgiFlX0iPEtK+FxC1SwWxiX4dsLzkldwJjBMGPBt5Q4gAWYNCd4UZy7Ud2N/asSdSeEeI4T5MiRAHCZZFGEGnG1pcTJaXDWwrfD0t6PD0WOsB9lVIVYP+5gbQBGCgUVZDwuvc9RrLFRbZ8m5X7uGHXEmLh+ZuP53Z7Ko8G5VjP7TPio/NlU4MLLxIz4jzGd/L+zb93bWlTrq7eELd89MQWLxSOhje8ULLjus3//q6LvXQ0YhBv/mZnv/j1qeL87PO0J2hQu+xhfnUprqnBnti2yb0zVsjZgb0Gc88sTAWGj355LaC/7RTuPSP82fs7d+/tXuXkm9d/fWTXZGL+8d3O8cnBx89KjRvn5oLnBpt6r2euTksfbQz9KN6Yfbf5x5uvnXY/ciRjx9Y7iGP7/rgtazswXXfP39j55XlL09evtNSXLrMjkeDWxevvqq/MvZbYODu+I3tLWOnS9Njt2emX1paftDlKpU2ukjb0nj7BpfrP/yBBQM=
|
||||
@@ -1 +1 @@
|
||||
eNqdVXtsU9cZT5qtDRVqR1WVtgxxZ1HRh8/1vX5fZ2ZKYhKiYJzaDiRBNDq+99i+8X3lPhI7lD3CJDatr1sQLYVCWzt2m6RAQqCUNqjvBoa6jU1rXa2ok0bY1oKGEF27bmHHjjMSwV+7f9zX+c73/b7v9/2+M1joQ6rGy1L1KC/pSIWsjj80c7Cgol4DafrP8yLSkzKXawtFollD5YsPJnVd0Xw2G1R4UlaQBHmSlUVbH21jk1C34XdFQGU3uZjMZYrKFouINA0mkGbxbdpiYWUcSdItPksUCQIhIgISPXIKWawWVRYQ/m9oSLVs3Wy1iDKHBPwjoejAKQORl3hspekqgqLFF4eChqwWHYkKRq4bKt5LkdTWQhJBDqf1ZC4pa7p5YCHQg5BlEfaHJFbmeClhvpoY4BUrwaG4AHU0jOFJqFwGcziFkAKgwPeh/Owu8xBUFIFnYWnd1qPJ0mglHaBnFHT98nApF4Bzl3RzIoRB1LfY2jK4ohJBk26apA+lgaZDXhJwiYAAMZ68Ul5/Y/6CAtkUdgIqbJn52c0H5tvImjkUhGwossAlVNmkOQRV0e08PP+/akg6LyKz0Nh2fbjK4rVwDpK2k96xBY61jMSaQ2USXluwGelqBrAy9mG+SB2Yq4+ApISeNLM0bX9ZRZqC+wNty+NtuqEN5jAX6PRUodIoL4Va50g8W7U0F8C8mJMbEWclKBfRhGKEnbK78M3ncvucXqI5GB1trISJ3pCGsagKJS2OqVgzR3uBTRpSCnHDjTckfLJEOM6mBB+3JUBpRdYQqKAyRztAeFYhoCVweLa7gKwmoMQPlMOak2Xm+wfS/RxrcFyyr1+kmAGng48hg41PVLYoqlwKgwEBUTOzDOM9UFmZq/0wzpUCNAUo+nga4D5HAi/yuJ7le0WmmplzURR17HoDHSsLC7rgpMrXifkWKhIxaaXY19w4GYZ588ZGc64c2ITxMMcXWmloPhraLmrHrjeouHiJ0kbTc9aA58ziSvzRDTnG4XAih9PjgN64K+amXSwd57ysnXNDxsm+jpXPs9hLiUxFVnWgIRbPJD1jFq0iTJd05nfQLocbZ1pH8BIrGByKGLGAXMpBqyMUFQky5A6yccBCNonAbP+ZhUDn+vpgS+NwBINslOUUj57+tLqmu5uNd8dE/7o1VHOID7X3hqk1XeudKY3NeGLd4ZBmGC1xJRBq6TEeHpC0tSovA9rj8Hq8DEV5AU1SJFYpcDEpb5ILrW00WtJNVKC1IRTs9darQW8kJnUE6Hi/t51R1DUBt0RFpai7K6FCUW/qc3Kcm2v3BrsMkmwWtCZBbpd7Okl7Z1v3OjvVj7OBetJvqyNwb/K4vv6KQgBWCCjpg/bZ5/RRR3DlGvjJhdOwjliLx3lIEjJ1RKRUTISfUEQRXkf+9bKEijtxDYw+nvNT9UxnbzoQbWXCSTfk0qlA2Ej2t1MsGWxIe5L1altTfVci3GNXEvOK4KY9gKrUwU05veUuvAb9/0R1tAPMFzwIKbPnVkGSNYmPx/MRpGIBmcOsIBscHuwqyjc2gXB9pznBUJyLjdspDrkYbxzGQQMemXPe/jcecqVToQAF3GN9rHk46fBbfE6nw1JHiNDvdWM5lU+3n+VLPSkl3q9+Z8WvaqvKV81j4bef+D31vcnzD928f9nOXeTfvhr8fPA7Ny0ia8dGpo6sOml9Qizu0DeOzNSpZ8TxmHns63+cPX15i+Pys9W1Z2K3Nxx5uv18cGbmxOvLVs8Iz01v+f7Z20Z/8aM9D927evmJrd88uOGVd5dfbLsU+Th1zic/dbL4yw3V97Hhzc/IfecuHr//lPnXvU07Pr33N9/88dzyzlOe9+N7lqITZ59/Y1PzrdlP3j24uOrz3kd3hEYuPfLlix3+Hyz/4P5ld37buuSntR+tDEzd/cD4hHWD45W9aCJ7ZcWFmuSZt0BD1P7CrRdrm8cJMJVd/K/PHp8cW9F2crVlorb5nrev7A24ijdPNbQu6WnIdoOfLOr458P53FHP9GP7yaOPpx5Vtqenbengrtim7+78XZGjxy/HuNA9b13w7pvpjP152xc/3Pjlj7PBwS+urFw1PWSd+mw39+GZ019PD1wZ7Boy79t97O6DbeEjL5//KLN0/OO/3BU4OvXv13Y/M1H4E7fnqrl/0dBvHX8/dcsfcvtu2ffrm/I7/rOxiD7IwiX04tyh2DtPffXC2K7J3be1vLd96pN2jHLb7c7NkQsj6rbjO7ePvHpH8YFva6qqrl6tqbq0btXWTfj9v/DeoiM=
|
||||
eNqdVX1sG+UZTxaJFnWoSFSDsqLezCRGydl39p3PjmVYcJI2TRwnsfM1VqLX7722r7mv3EccG0JZAG2ihepKNVGto6xxbMjSpPmAprRdB6JSK5Ao6YcwaKu0TlNBKfso65BAdO85zpqo/Wsn+c73Ps/7fPx+z++94cIA0nRBkSvHBdlAGoAGftGt4YKG+k2kG8/nJWSkFD7XGonGRkxNKG5KGYaq17hcQBWciopkIDihIrkGaBdMAcOF/6siKoXJxRU+U8w+5ZCQroMk0h01xBNPOaCCU8kGfnHEkCgSEiIAsV3pQ45qwqEpIrItpo40x9A2vCIpPBLtpaRqkIxCSoIs2J66oSEgYUMCiDrCCwaSVNyCYWp2BMpJDRVSCPC4wd25lKIb1sTKkicBhAjHRDJUeEFOWoeSWUGtJniUEIGBxnCdMioBYo31IaSSQBQGUH5xl3UYqKooQGDbXdt1RR4v90UaGRXdah6zOyIxCrJhzUZwEbWNrtYMxlYmaCfrd7KHB0ndAIIsYrBIEeB68mrJfmy5QQWwDwchy7xZ+cXNE8t9FN0aDQMYia4ICTSYskaBJnmZmeXrmikbgoSsQqj11nRl4810HidNO7mpFYH1jAyt0RINR1ZsRoaWIaGCY1i/oyaW8BGRnDRS1gjtdr+hIV3Fk4Key+NthqkP5zAX6MPThfLIHIw0LZH454p7c3WYF+tELGVWE5Sf2Apkwk25WXyr8XA1eGVzODYeKqeJ3ZaGqZgGZD2Bqahfor0AU6bch/ix0G0JP2ETjruxy8fTSaJBVdERWa7KGu8m2xe1QjbWzSxOF6loSSAL2VJa60SJ+XR2MM1Dk+dTA2mJ8mcZjxBHJkzMlreommKnwQWRkm6NeFhmomxZwn4M90qRNEVS9DuDJB50JAqSgPEs3cuC1a0cS1HU3K0OBlYYlnaBoUrXH5Z7aEjCpNm5b4Zh/H7/8ds7LYXyYBc/539npZeOlldDuyV97laHcoiDlD4+uORNCrxV/DF+6U1wHOulaR5RPOemkZv1+aCbA3EK8hSKe6mjWPwCxFFsMlVFM0gdQXw6GRmrWC2BQVtnQQ/Nery40wAhyFA0eRQ143WK3YMeIFQNiQrgJ0MNZAjAFCKjpfmzCnU9LbXhxtBYFBcZUpQ+Ae35tLKqtxcmeuNS0BfNMqlMZ0fWyekN3nTP5u62uuZEUghzBhVi0jFZYZuiaj1lhsMkzXm8DMtSbj9JOykn7aTJ3tbWDp7a0mlmerIUq5st8Vj91sZaprktokFuC+evZ/Vw93bJ7G9iBKqluSEd06RwJ+S7I4JW10b1iamtzva2x4HQtcU9YIKWdEe9O427AUYq6AoQeDYFjG+wrBASK4S09UHXUEv6CBB8CYOgc+VpGCC24IM9IouZABG1wUT4CSQUFQwUbFFkVNyLMTAHBD7Y0+13N7B9mxv6QXNXe0dHA6r3DPp+5mY0M8Elm5uMNo71d0i17u6eZSBQLJ7cMg5eivGVpvBm6f9nVW93k8sFT0bUxS9YQVZ0WUgk8lGkYQFZY1BUTB4f7BrKY87ba3usWV8iwXn8Hp8X0hwDeUQ+jo/MpWj/Ox5y9lehAEQ8YwPQmkl5go4ahvE4AoQEgj4vllPpO/eLvD2TcvJU5bGNO1dXlK4q/LtxY1f7uy9+Rq07/s0jT5ovN/0lPF38+dY1ZLWYfXh29k/xdZ8XnzlUfefn+xam/3pp5wZl4axzof6X+7iRfQ8FUcXw7CfDrS/xF7KvnDzyLRzCXT129cu/ScfWXDrpPRD6Z/9bn+3I3D2TU7+59MbXPf/uPHrxh7x2ve73z+6hjzonJi9+cGH7k2L2oHl/eM+F166NfvDE9N57r8X3fTR57VSilXt5XcPUCwfWV/wxuiO/v/Pyc5+89/T1XbP3jYy3iR+FKhy+V+7+Ecydnp9+9aWc9uuPr398kt39emMVt7rtzLOb5qo2nB6eJda6r6RV5e9XV8Wmd66+673aBvWBh7rCb98xfbb1+RfPXuHO9X2vc9veN8eu7lr/j8v+I/1vrZU3rvpuVxf/sBf9dP1vw+sfDLiy11+t/PaL/5zbFvvBo3MX9TNfrv2Un+vev/D+15vu+VeCKE4xDz7DLnw1f+j86T3N+xe6fvLm/edHtKe/XxS0exy/Mb7jPrxxPHLizMXJqY13jp71fDG/6nzOuWY3rDx3YEhYOH750cO/2m1NkY9Ms3dsvPIV3NTb+cL8qcTQzF5HILGj0maoqmJ+/L4Nj2G6/gt1fZuE
|
||||
@@ -1 +1 @@
|
||||
eNqdVWtsHNUVdhpoU2FSUihUahGrVUod8F3P7Mw+zbZarxO/Ym/s3djYKKzuztzZGe/M3Mk89uGIhrokkZIINFFTIqgMdTa7rWOchAQCAUe8qhoRMGobVSZpaEgLgiY84qCUVm16d71ubCW/OtI+Zu6553zn+853Z7iUQbohYXXJuKSaSIecSW4Me7iko40WMsxHigoyRcwX1kVj8b2WLs3cI5qmZgQbGqAmubCGVCi5OKw0ZOgGToRmA/mvyaiSppDEfH5m/SanggwDppDhDD6wyclhUkk1nUGniGQZO+udOpYRubUMpDsf2lDvVDCPZPIgpZmAxUCRVIlEGaaOoOIMClA20EMlEUGeYH+sIGLDtCcWozkAOQ6R3UjlMC+pKfuZ1JCk1Tt4JMjQRGMEg4oqvdpjaYQ0AGUpg4pzu+yDUNNkiYPl9YZBA6vjVczAzGvo2uWxMnJAGlRN+0iUgAi3NazLE9pUB+3y0i76YA4YJpRUmfAAZEjwFLXK+ksLFzTIpUkSUJXELs5tnlgYgw17XyfkorFFKaHOifY+qCte9vDC57qlmpKC7FJk3bXlqotXyzEu2u3yH1qU2MirnL2vQvnRRZuRqecBh0kO+1fUxDw/MlJTpmiP+j2/1pGhkRlAPyuSXaZlDBeIFOjEVKk6DKPRjnkNz9TcUWgmstiTfYivd1AexxqUdLgpt4d8BT3eIMs4Wjrj45Fqlfh1VTgU16FqCESJ1fOqlzjRUtOIH4tcV+/Jst6kmTJ6MoMA5TRsIFBFZY/fD3rmXADamg/PDRfAegqq0lClrD1ZET47lMvynMXzYiarUIEhlpGSyOKEI9Utmo7LZQggoBj2Xh8bmKiuzFM/RnqlAE0Bij6WAzqhQpYUidBZ+a5a0bALHoqiXrg2wMRpRExbYqnKdXxhhI4Uolm59tU0bCAQePn6QfOpGBIS8C9GQxRFC9HQbsV44dqAaopRyhjPzUcDibdnVpKbBAVplnUzEAr+pJvxeb1ulmME5Pb7hCTjY+gXic0ljmQpi6lh3QQG4si5Y+btmXoF5so2CzG0h/GSThsdksrJFo9iVrIZl3swGh2ajmQM+QOcADjIiQjMzZ9dau7vCne2RcZiBGQE47SEdr23ZGkiwQmJpBIa6G9xZ1NCJBHvGWznk7pgxo127FtvxTw8k1VFRWDD3uxQphUTpXyM3+cPUBQDaBflIiYFjNjd0pfLr3Fb/lhfot9Mu30GN9gd7fatH8hBSxc3BuQ1HUJ3humQI76Bge6wIeDuDrNTNtlmvre/i17XHkvyeSiv1RK9am6wI632ZEk30BRDDY0OMpsS4TdUdQggDgFlf9BB97w/Gh18hYOQa/Fh2OhoJUd2VJXzjY5YmUxEfqGCYpKJQl1YRTM/JxxYGYkPyYkmd75zYC3l8WCTNSjezHSqOEp0s1oja1t9vWxTqqd9MN5ihheQ4KM9gKry4KVYf2UKr0L/P1E9fz9YaHgQ1ebeTSUVG6okCMUY0omB7DFOxhZPznUdFSNrQE+43z4SoHgPJ1B0QAhAvxCgQBM5Meez/e94KJRfCiUokxnLcPZhkQk5gyzLOBsdCgz5vcROlTfYT4vlmVRTv11SvGvHsprKtZR8rlzZ2fPqo3+gbpn86N6tAV/dzRu+PRRetvyt3d9rqtMvtH2y4XecnDqKTm5u6Xrv7u/seOMH2y5/OPmyb+Tx2prnXgNPn95Rtyl1Mbrt07N94EenLieWv38cD2aPbgo+8dHFoycfu3Ni13D2WyvPrf6yV/+sdvXbmdF9P1yx6sWJ/Vlr/ea67frKoV5+17hBv6+dSe+ffbbOl+/+29//OX7m8C9u0276sbvm4ce/eFIfWP2N002HjjlCWxzB1y5Yy2pCTz7y9LLwtjr5jZNr25+3z3/33yM7z/q06WnHli0HqG/+ZWppc+8H/9i46tIfl4cLn2eoD3c+NdFWm31n1vXLr+49M7j/9eNfrPzaqUt/umXm7NSI++GnuHNnizcEpyd3dP615dk3D46c1h6cPfHM9FuvTHXvufEnK/68x7N99N2762/f2jMV+k1/APfVts8OT92++8ry2Scuxx849enm86mmdt2On7tvFcA3TXMjv1/xn/T5Yx/Q1NcvRgd6bvV8dal2+wC952OW+n6kcFx67sJU44HSv8QNsd2Few69dKr1RNfMXRUxltZ8tnXjXkCU+S+tood0
|
||||
eNqdVX1sE+cZTxapoKJJ1YrUKuqG65VqWvLad77z2RfXa12bAPlyiPPFgGWv7177jtzde7kPx06F2rLuowFKj34ghJoKEmzkZdBA2PjeAKWb0KZprbY2dGVbUbV+bIWKbG23MPbacUYi+Gv3x9n3Ps/7PL/n93ue991ayCDDlLFWPS5rFjKgYJEP09laMNCAjUzrmbyKLAmLY+3xROeobcjT35QsSzcbvF6oyx6sIw3KHgGr3gztFSRoecl/XUHlMGNJLOamNz3hVpFpwjQy3Q2uDU+4BUxSaRb5cEtIUbC73uU2sIJKC7aJDPeWTWRFxSJSSktp3QIsBqqsySVP0zIQVIkhBRUTbSlICIqkhp1jEjYt59BiVIehICCyH2kCFmUt7fwkPSTr9S4RpRRooSKBoqFyzU6xHyEdQEXOoPzcLuc1qOuKLMCS3bvZxNp4BTqwcjq63VwsoQekUM1yJuMERGSttz1H6NNctMfPe/yvZYFpQVlTCB9AgQRPXi/bTy006FDoJ0FARRonP7f50EIfbDoHWqEQTywKCQ1Bcg5AQ+XYowvXDVuzZBU5hWj77ekqxlvpGA9NewITiwKbOU1wDpQp/9mizcgyckDAJIazjzo0z4+CtLQlOft55qCBTJ30AvpenuyybHPrGJEC/fpXhUpT7I83z2t4ueq+sRiRxTnTKdn1Lop3NUHN5aN8fvJqYAINFOda3do5Hq1k6byjChOdBtTMFFFi1bzqBUGytX4kFqN31PtMSW9STAk9aUSAsjo2EaigcsZ7QcfcNIC1saNzzQWwkYaaPFRO65wpCz84lB0UBVsUpcygSvFDLCMnkS2kJitbdAOX0hBAQDWdUcYXPFSxzFNfJLVSgKYARZ/MAoNQociqTOgsvysjaTpjfoqijt/uYOF+RIa3wFLl5+xCDwOpRLNS7lthWJ7nT9/ZaT4UQ1z44GI0RFG0EA3tU83jtztUQuynzPHsvDeQRWf6IfLRB5mACAXex/j4QBAmRZ+PRwzHpAIslxJTgnCCzLkskCglMXVsWMBEAjl/rJwzXa/CbGnMwgztZzhSacgla4JiiyhhJ2O4VIMZcukGUjAUD0cbQRQKEgKJcv85hdj6tkjr2mgxQUBGMe6X0a5L1TV9fUKqL6mGWbuno7utqzmX7KBWb8br+mJJKt1p9+iJYAS2mP4Bri2JIuyaoJEGdIDhWL+f8nGA9lAe2kODQKRLTvfEsusYoynSRfvWs2vszdkhke7tZJt7u+wB2GrHcfeQKlJDEh5oirc1qauDba1NXXp0TbSbi6uretqTvDAY6Mlls6KtsVJvLkKqgZYU9oZcpDdlwm+4MiGATAgozQfdQM3PR8glljkIexYfhiHXGnJ0xzUlF3IlSmQi8gtVlJAtFG7DGpp+kXBgZ2Qx7FezJh9L+KW1nuaBboM2jP5MB90bibCbGwPxjl4qJa0aWN3S1DowuIAE2scDqsIDR7HBchfegv5/ovppL1g48CCuz91RBQ2bmpxK5RPIIAPkFAUF2yI51w2UJ5p3RNY7k8FUKsDwPo5LUSxPUX7wODkx56P973gYK10KBaiQHssIzlGJCbsbWJZxh1wqDAc5Mk7lm+zpfKkntfRU9Ysrti2tKj812zvODb9DLT/977rv2M81v9fKyZ6VPwQbXh65e9PbiB6e+dtB5x/PzLTs9Xzx2arMV0LKsjOfU9dTjzAf+GvuadzmX3rshfFafH3HlcLeq7Pvwzdnrz159sSWv6z4bOrdd41TW6jkruWTN54d/m1x53Atdr58YqS7eqVgvAKw9tdtU5O9Vixaq+76/ch1/v0dAw9781OPvOq5Pv3R2GVn3f1TtSNM1VNvfbLXaNjx86na/AvMG9/dfqRx4uwD1T96jHvw8dcld/Mvk77nhx8Y/Xj/n0aPzVR/8uF93/jwz3UzXwq2L2v59te+tfP71T/mLi6/fOwPI9eurHz2wKUll16/98lzdVda/vhSNXpo939q95z+4G3hrmtv+LXDTUtXHPx0o+/vv1t5oevqwz/Ys0TL1Eapicmvz9af9NX5j7Dcc3fd21T86kcRfrxugyrFxNGLVy/M/ubUYXXjjUf/WbNv+VDjpn89dengx+cvrP/FzNKbdmbP8WX17z3o4xDauOPckjcfffri7q591sTJG+xudyiFCf03b9ZUnb8xu/2emqqq/wK0XX0A
|
||||
@@ -1 +1 @@
|
||||
eNrtWctu20YUbbdZdVOgS5boqtDIpEQ9DaOwLTsxEkeOH0icthBGM0NxbJJDzwwtyYEXTfsDXHXd1JEKw01bJOg7XXfRH3AX/Yh+QS8lObKR9KF1qYUgzty5j3MfhyIfDg+ZVFyEr5/xUDOJiYYLlTwcSnYQM6U/GQRMe4KebDS3tj+PJT9/19M6UvW5ORzxvIhYiHmeiGDu0J4jHtZz8Dvy2UjNSVvQ/vmnD8yAKYU7TJn19x+YRIClUJt1c5v5vhEwAxt7Yp+ZOVMKn8F6rJg0jz/MmYGgzIeFTqSRI1DAQw5SSkuGA7OuZcwurloiGpk06w9MHhI/pqwVpzbHYsc5U7MgggB1LGHNylvHQ49hCtH/8dobJ55QOnlyNaKvMCEMDLOQCMrDTvJl54hHOYMy18eanUIcIRvhlZzuMxYh7PNDNhifSr7GUeRzgtP9uT0lwrNJ3Ej3I/by9mkaNAKHQ508a4ITi2tzG32APjTsfNnO21/3kNKYhz5giXwM/gyi0f5PlzciTPZBCZqkNRmMDz+5LCNU8ngdk+bWFZVYEi95jGVQdp5eXpdxqHnAkuHyxsvmJptTc8W8XchXv7miWPVDkjx2sa/Yd1cOMy37iAjQkXxmDYgQ+5wl53+2WsRttYOFWyvW9SZv7hxsWiv3bzv7ivQr7dZmU8Xxmhs1mmt78Z2jUN2QXCC7UqxWqjXLqiI7b+UhZFSq7Vc92ryxHK/1Vq3GzaXm+kF1Ua5Xt9rhvYbtdqs7tUiuNMqhtR1ul+93JA706qFDaZnuVNfvx/n8dV+t+mJH7O3mC7sbrVsFqztvgHfxIacL1mJt96DX2L5Z2/TKmPb2G5ux192xSH59qVfxFuXG6uL9zuZeIepccq9sV5A18bBsOVUr/Ty5qA2fhR3tJZ/bJfsLyVQEFc0+HgBkOlYPT6AO2W+/Difd9Kh5c1rCb540oCaT53cZzRlWyVhlbaNgFUrwVS+V6yXbuL6+fbY8MbOdluC5oVlPz7HDdGXcRfMGtLBUTC/E2kXVb7YlDpULdbly0QND4sXhPqOny6+s/udp9UNq03igmRHrRUIxNHEzObuHNsdzBa01no5bDQnZwSE/GrVC8nzUBt2jXpeSmFLvsBtYtSOnyNssJu6zyZFIitQMOIQClZzYVs1+Mtm6qMRTiN5CtoUs+8cegr5nPg84IDz6nkw3OFsC+L9/WUDDQII5OHRG+bF+uSwhWQAlnBqfqnFqtdrPrxa6UFUEkVql9uNVKQD7khq7EKjvXxaYqHhkqbPehTTiNDl/By5aNsYVWnZsWrHLAAUp1+y2XXAYcZyS7VScHyC5nICWNJuRkJBtRmCU635yngtwL506C0W7VCxDpPPGZHxuxe2GSGNQ80YkmS8w/Yq4iGDiMTSuyGTY2L29uL62/O09dLm0UHM8jJNhKFTIXXewxSQkJjklvogpjE/JBsuraHNxN3lWs2iJuEWnVGtXqy520VJza4h9cPKQJE+94oJZd5yiOW8EeKFahnyMWOWjQRpU2Pn9rVOKNa4bMPYpMEVKQQQICC32Onv9u/3Npe7OjTtyxbWPogOvqA5uNSv9AEhEtPegfCcn8lPSyo8KHAQINIRmoPOid23rlYSEoNAcZFWQXU25CQLlhLU0Bwqrm8AVOPZ1utFXwEAtF3xmMgLXU9tu1KoUGK3gdskhqU1PwOExTfKQsh4wVQ6U+BqnxDahRwyVDwkIU7VTMk3pk7nAeeBfGPs+UJ4vOtApbTVeyJlgnCuvBYEB4UykgGUnPDm6vHbt/wPnFLu7Xt/M8PrveBkUos0AmwEw7bEMsFkAUwRIIoNsBsiIFN0MsFlqrMvDDLBZAMMZXrPh1cUyI8pZIHvvg/CDrMhmQWyJEQx/mjPMZmnM7G5sRqbEKgNsFsBErNO/6Okzswy4WYDLbslmnGQ868yZAHM587N7slkgeztD69/Q+neATKVFZP5PIJpG+cAEUIJIt8avMMAfO31ofeHvdLmaM7XQ2H+xUqjlrp5tUaYx90eveEdvIegLWYgNx5SL6cLxK6xcVjBOC4TzDzpgYfQuFwxFklFOrnhspc/b0yz8zfbx8Yvsvt9o3l758Nq1vwCNNtHn
|
||||
eNrtWVtv28gV7r7mqS+LvrJEnwpRJiVSEm0YheNLLr7IFzmxkw2E0XAoTkxymJmhLjb80LR/gD+hG0daGN7sLhK0223T5wLtH3Af+iP6C3oo0ZEMc9v3Bf0geM6cOZdvvnMORb0e9wgXlIWfXdFQEo6whIVIXo85eRUTIX8/Coj0mHOx2zxovYk5vf61J2UkFhcWUETLLCIhomXMgoWesYA9JBfg/8gnEzMXHeYMry/O1IAIgbpEqIvK8zMVM3AVSlioLeL7SkAUpLxkJ0QtKSpnPkl3YkG4ev4CJAFziJ+KupHUTKYFNKSpppCcoAA2JI/Jp3WbRRPfID9TaYj92CHtOPWeaZ6DqiRBBMnKmKdSvayfjz2CHIDi3z/7+YXHhEze3U7vG4QxAf8kxMyhYTf5untKo5LiENdHklxCTiGZgJdcnhASacinPTKankq+RVHkU4zS/YWXgoVXGQaaHEbk7vZlmr0GMYcy+dCEIFYeLewO4R5CxShbdtn6dqAJiWjoA7CajyCeUTTZ/8v8RoTwCRjRsjtORtPD7+Z1mEjebiPcPLhlEnHsJW8RD2rm+3k5j0NJA5KMV3fvuss2Z+6qZcMo17+7ZVgMQ5y8dZEvyJ9uHSaSDzXMwEbyB32EGTuhJLn+T7uN3XYnWG4cnJre8MnhabkuNmr94wdHe2tbbpdu16W+avZbIbM2D6J1Pd7e1ox6tWZall6xNaOsl42yobV3dw8d/eGTeHh8qlsi3um01h8/WjG39poc1x/W7XVLbB+9DOJXmybVd7Y2+i0ebD/BzlGT8rU9/cT3Hpf39+4j+vRhpRejnf7heqW/pEB0cY86y8dHdmXDOnmw8QptPd0/PNwg69VB41nF5LFb725tyr26ZR8GK5Wj47nwdEvX9CzCmm429PTv3Q03fBJ2pZe8MWrmV5yICFhNfjcCyGQsXl8AD8k//z7OSuvL5uaMwp9frAEnk48tLy4puq08RqFS0SsWfCxW64uGrjzYbl2tZm5aKQWvFUkGcoH0Usm0kpYUqGcuiFyOpas1vmtxFAoXeLl+UwNj7MXhCXEuV3PZ/zFlP1xtmg+UtUYGERNEy8JMro60/WmT0R6tvZ+WmsZ4F4X0dFIKycdJGfRPB30Hx47j9fqBbp+aVdohMXY/ZEcizlI3EJAWiORNpdJ4l+3cEPESktc1A7A2fhhoUPfEpwEFgCefWacTyYUF6H9/V0FCa4KeODYn16P/bV6DkwAYnPqemTFt2/5rvtKNqSqo2HX7h9tagPWcGaMSiO/vKmQmvtTF1eBGW6NOcv0rWLSrSLfqtY7V6diogg3TMWqGU23oVaNh6VbD/jPcLcVgJb3MiHG4bIKhrcthcl0K0CBtOstVw6rWINMlJWugB3FnjaU5iCUl4sRnyPlmdUNbRdgj2sGEkMl47XhnZfvR6h+PtHlmac1pP07GIRMhdd3RAeFwMckl9lnsQPfkZAS29leOkw8N161XbdPsdKodEztEu988GCMfguzh5L1XXVYXTbOqLikBWm7U4D4mE+a3ozSpsPuvX/zDQRJNGr+jLqrpOMIwjLSVUD5r7HNhDPq7veiZd/+YbQah6a1EtLaullTWeQnszU6UZwOsPOE3KGCoB0nA5k3pVvXSzVyaH0saEM3U9LpmNOCUGAoYNG0XQiM8gghTF27UdivYqTRqtolS0x6jOJ2Mz9Nx5ZCBugi2wbJE6uJZNg5VBPyGwgcLpdn4VGHBiQvDDcIIY98/L6k+60I9dMRUUFLBORVeG+KHsZJpvSip2UCcLO/d+8mhNoPoqTdUC1juwKI4kFSBy11cpEcKXHJwERj6dIHMXWQwZ/0ClxzG9GlY4JKDCypgyYWlj3gxknKQ+U0BSg5dvgi/KOooB5j7BCP41lxAk8OZ4tEuf1AjUeCSgwuLZfqVO33TVeCTg0/xfJffZWhRTnm4uJT4xQNeDjK/LEDJQPn/OKhCskj9aSExS+ZMhdyDSLan7/jBrZG+7r0JayZulFTJJPI/SSp26fbZtkMkor6YUC19Te980oUUUOxQNhOc53iZNzBFH9L5HzZAMPmtExxFnDgU34pYT99Up2D/yPb5+adLfL7W3Fl/ce/efwHj3HJ7
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
eNp9Vg1sE9cdh9CIbZoKdB2FrixXtxtblbPPn7GdpltiJ8FhTpw4IQkpzZ7vnn0X31fu3cV2sqwt0HXqWNtjVTe129pBsKsQAgg6KEmAjFQqtBtrx5CCtApNaM1EVxWKVlE02LuzDc4S7WSf7t3/6/f/vq25AaggThKXjnGiChVAq/iA9K05BfZrEKnbswJUWYkZibRE23drCjf7CKuqMvLbbEDmrJIMRcBZaUmwDdhtNAtUG36WeWiqGYlJTObCsm1DFgEiBBIQWfw9QxZawqZE1eK3dGIBok7hgLgeESzkEqz6vcdFAl/5e1TSFBr6CXv+GBLjkiIAQ7efiGqDGYJDhGe943HLYkKORYSaoAIFDrCGYIyXRAYuJulcRNJEaYg5CU6kWYgIxEoKDhmhsphgoLFUWhSJh9gvDUHFMryl0iJIDOTxi4Sski6JFDiRM7ggknF4YG9ev8U/ZFEzsiHYhySxF2H1AsB8pSfMc+eJgYhWONlAhoVqRZQycUDCMEyYicMkIgYQZAhJ5DP4ZtIT3AAUCWT6iSoJIDIEzanQpBXeGjoYK7YuKzi7isoZWRuyANPIQuPtWBIUAEgLMVQSKZaj8/H+/2gMkyqnmvHLe2S8yIcFqQonJizDlRaM1kwIWhyJUcQJbD4URIQUN41EI/WBUEMocNu/PKA+DeOLZ0yWPH4DAI6FgEryUdBnWC5iC9xGcBseUBSQsQwPG5nt1zgFMrjOixErxbxlnhrI/K+fUqwP0io+A4bhDBHAR0qyEAc8gtiKCISFKowYYVm/qmjQgILPEAhFoRwLAYNb/cMlq0ZYCan6+Pz23Q9oGuIihSItMTjW+r7EICdXEgyM80CFo7hnRWgOB300CaFMAh4nL5uX0g8AWeY52vTSZlTtWKHHScOxheRRo0ZIPBBEVT/cgkHUhmyRDJ4zImG3euxW+4E0iVTAiTyeGyQPMJ6sbNInSgkyoJNYCVmYYXo2LzxeyiMhfU8Y0C3ReSqBQrP6HqAIHteh0veKJqqcAPVcILLQXIF4x5zTandYvQfnKUYZkdb3mEF/q5TAQh7nkcxPU30iBlVgNealtWReWmWgIHhknk2oKhmSlrBp/XdUlpakJAf12au9vXS8NybUdDs76+0+r+COdgxGumJMCrbI0YBHSfY7+EiHS+sOJUErnY4FUgnSXuX0VvmoKqebtFspK44UaVXbEl3ugbpw7SYqCJrrI43xwRiAbdoAG2psSFDhDbW8VAU43ueNuewQdDUGqbiXjjWk3GHkVtLWzeIPaiWqzdvPB5iOViWsdGRSrdUERqcNcEyNK+iKapvSkXZ3fZKFDg6m6xpDstCJegN9nU2ovmHD4EYPH0kpqXAJPIfLTlIFhB7K5aWMa7xYUjwUEyqr7/b6fG8UJ+m2LA6ZqqGtI7h84Xvv5AoLZ1fLxjuVv3okiEtZn+qETCVBuYkGGCMclMNNOOx+h9vv9BCN4faxQMFM+6KVe7BdwX0dx5msL3ZKjmY1MQmZ0cCiPTJl9AjOpAEfrwcSpmUJQbKASh/rItvyq5YMBQ/lG5KUlAQQuUHTrD5lNktqMJ1iaI1h2IGUQPkGXU4uBjU6frgggqe1YQYDIgWk767yeccLlGK5jmJfKdJOkZT9WJpUcCh4TuBwPM17Yd8jfcSNg310IYMqJSH+Msi5zGxQx0s58FLFBWvYvqPG5fP5JhdnKqpyYhaf03VsPheCpWjsDgEdXchQULGLQmPpIjfJMfrsw/jQC6qq3LgrPJCCTqeT9lH45/ABr8cbhx6Xx/NWfliSqpFMGa9xEkEaf9yoGX22UgBpYzTVOO1upwd7Wm0sfF5jYFSLBSXDB1RNyArkJcDsp+MkDfBeJvP1p+eC3c214VDg911kaSGRLXL+wyonSkjk4vFsFCo4MfoozUsag2esArOBBrKttls/7KMYd9znise8HtpJebxkHZ5eRW23y27EGNA5wGPsA7R+iHXWWPwul9NSTQigxuvBaTI/v57O5pfn20tPVPzsS0vMaxn+37q1o21a/DV17+RnXz8/cans/bKJjX/Y16o8d66n5cG6J24stSX5dz3C6F9vDpXzq3dl22yPTWacr6w9EZ1Z8+L2nU/PpVdOdZw+8vkntZ99fOXK0JGzTz56+vylB177/JN/sS+sm3no1AMfRravOL+PmupY887c2tV3Z062usmrE3//cfTSxJfnfh50f7D5Gzv3hQY/eIl5lX+Vf+7hqYkD1YGympc+/lZsx8G/XHv+sTdnUt+Z/fbNX12bq5FWnbl8D31xx/3rT06trzh690rb65T/9d+OvfbIxQu36jpru6e3PTQuPXr5xQtW6iqf+kW2o2NaKO8v31pW8bUvvnLml64/9Z5Ndj/7x4vX14RXNG35x0d7W+Dmy+9N9356efLB8PG96xTp2tzp5TSoO7uK3h+tOJX76Q9/80/hjW2+Xf+Ovn2je8XLUtefK6eemr5rfNPYDcvNuv0vr/F+90TfvXfdt+7MM8fOnV0eipcH9557cuJ601Bz87H/VFwvX1kmfL+759mdMx/NvPvKqStNt94cEDKOr37zby33dC2vq7q5feYnK/qOr/v0xPDJ4+KGI3Nju68/dX/PfWvPbK0Y6Fn6vu+LZUaWli0hn/jRZDN+/i9JH4XO
|
||||
eNp9VmtsFNcV5lVBUlTVFaRJq8J4i00CnvU8dtdeI5eYtY0XsL1g1w8K3dydubMz9ryYO+PdtbEiIJUquT86aUSjVCQh2LvIGAewRQ0uRFUT59WSkLSJjExTwQ+qFIggoQ1Ugt6ZXZt17XalHc3c8/rO+c499+7PdEEDSZq6cEhSTWgAzsQfyN6fMeAeCyLzubQCTVHj+yONTc1HLEOaXCeapo4qSkuBLnk1HapA8nKaUtpFl3IiMEvxuy5D101/TONTlxaf7PEoECEQh8hTQfykx8NpOJZq4g9PKzYhNhkSUNciQoRSXDQ37lIJ/Ms+mzTL4GAFQWc/w6qgGQpwvFcQTVZ3ipAQEVjL7PLMZ8TMY7QFGlCRgOgYxmRN5eF8luw8li5Kx4wlJJUTISKQqBm4aIQpYoGDxlNCeAxNhk5mFoKGp3c3XlE0HsrOUlw3SZ9GKpIquZoQ6bhKMJoNgjV6PGZKd607kKZGEY6iAEc1/9NRy3vlIeIMSXcwOoZVKkq4kCDhICBcFrGMiAEEeUJT5RR+uPK41AVVArkpoxICqDzBSSZ0ZblVxwfvdRDoBubaMCWXwx4PcMPMB6AZW4McCG0ujhIiIUpctvz/H5Eb1pTMbDmzeblLuRIh05DUuKcXL2HYLknofwFymjuOUYSrEaEJbqymSE0oXBsOzaSaxdVhYZhCylXJpuHiwHVR0CyGci7d+DMoQzNA8oACwwApT2+vS/keSzIg7+yD6RrOwr/7v7xBfm7iWqwDcqazAnhecuyAHMmnRwAygk44FSjzOXIqxzn9ZhoWdHHhFQiUGdOMCAGPB8NfFxT0ixoy7eHZm/11wHEQNzNUOY3HJNjH492SXkLwUJCBCQfxBlehO0rswU4IdRLImNl01so+AXRdljg341Kns4dyA4F0MpwrHnQaiMTjQzXt0UYMoipcGknhqaQStNcf9PpPJElkAkmV8ZQhZYDxpHVXPp4v0AHXiZ2QuYlnp7PGw/k6GrIH6gHX2DTLJTA40R4AhhLwjeSvG5ZqSgq0M6HI3HA54cNwrJemvWUnZzlGKZWzB9yin8kXiFDGdJLZ2WuPx6AJvM509eZNV68ODAR/OysmNI0UyWk4tH2YSnOa1ilBe/J2NMoJ0ZhSabG8Gd0mhONt4Y4Ohm5goqEWth5GNm+uE7tam6JaVO+uboHeKp0i6TI24PP7GRa/eikv7aVJDSBBqKna1hII8F6l29fQyYFYCtS30+3xiKTxfG1bNayFlhBSpGqG3hRvSWBgotKhCfFkZHu54vXHuxEjJzq3b02KrfFWocanb99AYHRWl8RX/phNQBElqsq2dDFiOxXepjE7+IDFbe8oDwXrYQ1VTdV5YUphO9vz4NG+IEnlEAYoXznl/IanW0qGatwU7SNBn//o9MA9kMYlMy20vx+3L/zjO5nc8fRa49aHnf9YfzVuZftcs2iVEFSQ2IJnPEMxfvyo8FEVLE1srm8eCuXCNM/buSebDbzJBcxkzfROyXCipXZCfjA07x455+wRzKQDH58kJEzqGoJkDpU91EbuyB7MZLh6JLshSc2IA1XqdsPa59zNkuhOJnjO4nmxK6FQwW4fK8WgxQmjORM8zZ0wGBCpIPtIoLxsOCeZbtdBnCtmniIp+mySNHApZEmRcD3dZ+52gOx+Py722FwFU+uE+B6R8blsUOfzNfABjBvWif3QjS8YDP5ufqVpVyxWCbK+s7O1EMxHQzMKGpurkHPxGoWGktPapMTbk2vwR1SgWY71lZcFIFPO4I7meMEP+CAb5HmGCVLCmey8JE2HTB0f+SSCHL4KmSl7skQBSWc0VbK0nw3gTDc4lwPZ4mGTFavWnBzQBkI3oKwB/vVQLRkC+Ogmm9z+szPV7Q1V9eHQ6TYyv5HIRj17DcuoGlIlQUg3QQMTYw9ysmbxeMYaMI197ahqt0fLBaGMhYyfjnEB/E6Tm/D0mvY203b9zoDOABlj7+LsEZGt9FT4fKxnA6GAyvIApsm9rO1LZ0/VtxYeX923bIH7W4z/Dx78Yse4epX69vU7K98MfNq2r2jjlaPjZmt/MNO3aF9h6+3iFe/+cv2u5X94sKquq+SJM4uHwckR9sUnbhYWLKoLFT59Y82xT37zVQbeOXHvTsPlA/eTU+eVZ99QP6v0/1v/7Ab4hJ16f/26+7HLT37afWDbhZefWRlh/jk4eSN2btfx22/tXbAzc/Gr0Ql01pS2Hvxo78ulH99VXlIuXD/0w58VPHrrhWWFb4JkzystV5euiH34vWJy4snvP3fl1JolS+L3Hnnn8adOjS4fKWi+wFz6/b3DS6Srq5cXrSiaWvp8S2TFsdXtLQeLlh2cmHqBWl9V8P6PdpccTr4n9F272sw8c7lm8u1Th+hrd29duTg2sfRf3/nga/jTXw2MHVv03t/lL9ae+pJ69e3qn1873Hdz3aqv/7bzg5XjZaN7jiZ6Pzpw65u1vo3f/cb1tpce+cfNPmugVeLDxQ2XlhT95Vt36n5w7NeHissWPh/+/Av1frp3S8Ppu6vvFkQerTwyvPfp+s8rbo09duKND589vnvnquWhqc0TpwffDTy+99WPe/98fhANfPmne+pThcHiV1xOFi8Ye7H0IokJ+g+b7YS9
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
eNrtVn1wVNUVTxREQSrDRxUZ4RGa1cK+3beb7G6y6wPSkA80X23YEkdjfPv27u5z3777ePfuV5BWIjINhQ5PPlIm1AoJCQQCIpSxok1Q0xJrDDJlmC0j0trWtiPDlPpRinR63m4WJINTp+O0OpLZP/LuPffc3/md3zn3tHTHkEYkrOTukRSKNEGk8EE2tnRraFkUEbqqK4JoCPs7KsqWdEQ1KbUuRKnqtlqRYolLYUlFfkmwYC1ojVsFVbKoIXWBqmGVl5QAzneVqkIQGd/EJCnp9agmm1Q1/a9fIkLEJwWjgnGpSQNXGhIp4U1UojIifJWgBEtDgqSYAliLCJR/lIBdBiMP8LRkpw/7k/3dIST4IY5VPV6CNLYkiBSqpwou42PuMUATQB2UaCjqs4g4Yg1i2U8i8G1dmrWzfnNfiSgilbJlioj9khLUe4PNkmpm/CggCxR1Zbb1jrnWuT2lWFFQGoveE0ZIZQVZiqGdGiIqUIie6CJUoFHS0gmu0FAu2x1BhAAd+6txDPmZOgQhKYBUTmbxr+4Rr/jcIcqYoJ0yFtP06KkfZYP4YlC/F7BSgM/KSAnSkL6N606woiCGkN4jqjabjWNAUl2QEJCY3l1ZUqfhRHLviA2bIUfvBhuWyiSdyJVAmQasP/Pp8vuCkfCl0d/rR7Py2157fxbv6ZypPzcYFyEFmqDAOY2yBIlANE3qKTYiJFg4wds4Z0FRocvGeSClohz1o/qobxGOADPEw6gakrHg36Wh9HmK9dSvlzN5QQ1H1Tw3kxePNClIzjMzeeCvCfzBopMrLOI4WEKKX8WgAQKLD8IpSI9xJssX7AhhxMo4GAQ20hmPXM54zGZFMaCaLIAgkBDh4wWiJYMCjA01KIjGsRZuQpqGNRMB3UWEJoiOt4Kp9ZOm1qtMrTYLZ+HymBWNzIqsglOmSJwFSxYlqAUtkwS/xSCAdTpcxT6/zxVwsHGtOaYcAoWPVAZNqojFarql6t0KJooUCIyugE5VIKQzJmjA+OJR6TcvgbgoKxDWq1AtSijymxvYcqzFBc2P/CyUFMXmUozDEjKXRKFSNKk5reYDWQwgchUTKdNCIH2ypCAPE5CgaoUI4o2AQDFRmVoMce8GcLJknJNUfU9xkcVmKwIqbDZnbwa04VbDsp6ao2pSDCRpZiKAC3zEQH7+zMKIbLjeDHUslSKGllP5aR8ePyIin2eEDaIIYUJHVoyuwRXldcBZ6CbPZiNA2VroNGqh07hDf2kpMMFwDqYc+Rg7Z3cwtmJ3QYHbbmMqqpdcbkTPZ1wyEYkQMzPyYVy855Mp0vsEVZWlTJ+1GjR4GDEkaARRPkoDbFGvkEkL1EgQEX2HL0kRGd3/OuxOZwcIXU89DDrOaKuJ4v9YAQFBkqMaagqMPL6wB9pzwA6JwrWEjNpZsS/BwkrkirI6F5XVPNADaCE7hhI2phqWVpezVQLkpSTtguccLDDFGkx56gQa4q2eSiixWkVOetIFjzxlCRXaH+HrowowW8xUC9oIs3Y3x8HPYDYVHeWZrZCxT5CvdUGmQfBXt+n/8tqqCoQX1/Heend1ibsKJUAOFCvuQrul0OlmXTaL3e6OFXqYkauZEefMNTGkFtRkih3Kx4eqQJyUB2I5WxZ5feZ0PZBcL1EE70DCUw2ihsbOFzg57qrHaiB3/Kwf3pyT/rtx0/ra2leZSQN9E8bVvrX5X6e++9S6cftm8slS99efowvdJxwz+g4/MXinWLX2ocffbk/e+yfzTMf08MuT+Uk35y+a/bSrEn/rJ2W/KdhzgTt4iXuv/dLYFxbvnDBvjLbOPKb4fOp3Ne88Fv5Z6SF77i3rN3Mrg385cdddR1obBp1Pz3OdfiD34yNm9rR6Cv/2D5PF2tbiJ4cazty7Xa/u2zBUE/so2vtCyeIL/WMf3rfxTH7F3nZFRqfirNi/xXxg/+GZE/52MNm3Y+ubkwe83mmPbHmn/HvVbbOHvL/wvr3gV+Mvnagv3npmSfkMPP9d8g/788dXP9RfGhvccPyOY9MvDU899+L8nCk5E9cO5ObkfPrDjT/j3Ajd1ZA7MaGEKgvGUJqgvMnIOPSd65PhNSdD9bMORZ8Xuf+f2e9/HuaXfrrbnx7rAoD68iPaLYaiShj5rw9+n+PgF/FFipqvD35fjcHv+pD3VRzyZlwZ8rzrj9S8wt26umngtbHrX161UD6uTDl5/qbDiV3t5Z01rY33bUnEj9fsbvia/sG7w+eG57dP6Qk3/vWflfcF8FrHT2/84+B7w10VtfbbD7edfEu5+OJLa7Y5L+4OTLx9eIPjzsI3bpn23PfDNrV1Ox768/t940tySr7TMSfQYhIrxNdmPcPOPTh1+cqbGn9Q8u1dO8es3PRY+cTW2Y5JtuVzXRe3Pbuw7dE1N3zk/cBy2wFX7ocrblUuMDOKXp+Iz7r0eS1Pbjpf88ikZnrPN9pWL2t5v6zYd5t47NDj+dsmvMFWTn9zSv0vQ/ef/X3duA/r7nbRV8cfvePuLW39jqPHoj9+ZfgG35FN07Y+taZxZuPgg2fb58xKfbz57+NOLj/nPQhT378BkGvf4Q==
|
||||
eNrtVt1uG0UUpu/AA4wWYdrKY69/4ri2VlWU/oqkjZRULVCIxrvH9jSzO9uZWcdu1QtKxR1IK/UBgKRNCf0BgRBcILhBQgIewFzwLJzZtUMStWqBgoqUKIqyZ87MfOc735xzbm4NQGkuo0P3eWRAMd/gh759c0vB1QS0uXU3BNOXwcbpkysbieLjD/rGxK1yGaLSOl/jMQSclaTqldfLLOaluB8fj5WMPR515auz8zHrgf3WBR5l9kSJQhxn/wZcs7DDewmzlxYUHqXAN9orGG4EaG+BRb35PuNRoStVyIx3RaNfjtFDeGq02ZHB6IetPrAA47i1fUGDonM9iEw6ru3gI4ctaI2oe9z0k07Jl2G5J0WgQ/wuX5z6lY88mvN9iA09Gfky4FEvfdC7xuMiCaArmIG7+XK6cbR8dHteRhFkWNLtNYCYMsEHcE+BjpFCeO+uNswk+uYmHgW/HqJbIWiNdHyxKAcQkCXAkCJEKkZT/O/fRfiYkHTrzNySksPRwyH1md8Hmh+VbmGSqBF62//z7ju+kBruCelnNKbjD6fBvhgp2prEkG77caVScQnG8BDxGwydCoh6pp9+7GaJfBcpU8j6R0+W3wsW3P9Gf7/8NJXfJ+dfn+L9/aWXNwM8Nv3uIgRFUq2R874hVbdaJ5VGy51t1avk9OLKA5aDUkgH6PROZ2RA35+m0IxiSL9ncSx4rsCyZalN/D5TGoyXmC5tfmsT66MznoHwlKEafMynGaVjGrIhRWBexW3UmvXZittGjfgiCWA56ZyQISZAt0msQEgWfDm9F7MXS81zzeMGwSNoky5HSbEQPBQDRSoSYUoWzwZegDLbRkTUl3KNw+3xpYuLp+gC04ZaWrX2qjWKBFBLQHuJmb5Xbp/BxJ2PxKid4YX2yWGMItHechIhYXVyTg4mhFVbrou/lrBxsu9kelrIDhOPuyCPz9sr5r957cJpkGeXvAvLrcW51gIMUUBGRpjFUr3RorOVUrXaGtTbZHI1mRxOHothfPwcmHWp1rASdWCBo1Q9t+S6lSny5Xz3MrK9zA3gaxm2FzGVKH+v1nDdnYf/jX34bpOEXOsimXzETOsHebmjBg9HrY9fzfzbAWjfc6yDUyR9qc3Eku90pkVyXAjXqU0zDE0JrnIWlGwYdKZbP9aodzrdDu1fOTYTbA6YQpmd3fe0iisKmKFM0wuRUYk2+AQu0VNSrTMVQEAxbCOL85lWinMJViHFr2UC31+VNy3UjQhEOn7nOnEUWIGvGum0iLMeruICxuGgyldRhGhsuPWm66Kpy7hACle7k76La8jwDK7oJNPNvpUbD/J77RNQEq97JVZ8gA+4SEIMAPU+wBoQ5IbJo3IfDSmeEgKVcdbb080TJ8+98RnGILh9RzxO7x9rliqVZsnFv41Pc/zUyHT8M4bTUzKJnxoKREEssbJrNL6Fu7DM2j3TuocrbA2okL0eUp8JLdyp3INKGQYIRR/HKgEs9NZrfilHgc62qke5EldBKakKGjkI2SqWD6+MruXdruU9ruUKxuQ65Mbb5MbXQ7q7ZO3QsRVJHfFu9/PpKkxr76atvftb1Ua10djTrH479ON1p8OM38eqHgswyIvjFJ2sPzit645tQdr+M1ubmZ1tVBtTIw+c1tRWdCL0cYtO1nrwhJ3eg0dNIIRYx5FVx7JnUPS4Yo8R6Jlk6cAs7LP1TShs+X/cGnY7NAuj0G5kgqQiHsdWFVpxaaW5gtWiXmm5tTfRQTCbm4GFXKnWmzW3NlOvoTkjBW3N+rFm0ekmQmSZd57cpvGrvDs4XDFP24P6CWCYNfeMn12dedKN7SmWKRbJCPvQX0RxA3+ePHPIZxx5MSX2ueoCDGPB7Dw9NF7BlmF8rQdD7T8aauNnnfueVxKe79j6n8M/GEwPBtODwfSFHUwDMxtVDwbTg8H0eQ6mzdq+wfSrf3swnXSr3VbCNWFEy65BuQLJMmzjJqbPDOmDiDXpMp8LjhoFtILtrNBTmYKJ7BLBVA/IdEol2dSryeGFhUV9xPpKsqum6xKZsxfudd9z5A6EItlB+ZomiUa9Mg06v1CMCE4wSrCYrGPvQ2A4gORw9gLBELGNgmKiOOkGyDUJpJ+EmA3CIiZG2pIQBUQnYcimr7NoO4/pSIO1xi7a7rfjXrocXc7n0D8AkSIW2A==
|
||||
@@ -1 +1 @@
|
||||
eNrtWAt0E2UWbuX9EhaoAl1gKEtUyCSTtGnahAGypS2PvpZSqQulTmb+JEMnM8PMpE2oXaUCR14uAcrjFF1LSwuF8l6OCG5bKCtFscjKcrKsyMKCLiuyiyIignsnaUAQV84eD6yH5uScZP7H/e/97nfv/50prSlEkswKfOQmlleQRNEKPMgrSmskNNODZGVOtRspLoGpTE2eXOmR2IDsUhTRotcjXlfEFrAiYlhKJ0hOfZGeElmd6BJHc6yskDKiJNqlkSVREkQSfjnWzSqkQRP+laXQEpKjeCftolhe4xAkN6WQM2SB14Q8IcEJyVdlFxhfQ40LUQx4O6c2R0YSbnMiXvEHYm96gT2puiaDb05WcXnsOlpw650Cx8hwoEs/JbxO/9QWG00jUcGTeVpgWN7pr3POYkUtxiAHRymoOjTtrxyuH16bJPA8Cvriry1ASMQpji1E6yUkiwAUerFaVijFI5dWgSl0OBKvcSNZppxoW7pQiBgsC0FIPHjK+cL+z6ulb9lcR3OCjNZzAk0FnwNKOIj7CfBm8EgBJ3EO8U7F5a8garw4TdEu5K+lRYPBQGBAj2qAHejirxlny5IEr29z6xo8BIG/BtbgCicH0zUbgJEA29e+n0oPJNSfDJfeORim0trMiWF/T0ZE7VZxpQFoieJhn6TgMqIBTsXnD+BuyovDDtJAxMcmxJkNhBUSR3MeBmV77GMFNyAjWzFRQpxAMRskFNyvCP7A28VYjFMSPGKMBYspcufziIvRYjFgLx/swWA8EZdAEDCEeEYUINMyDE6FXR6JU/eE8YIZqgDhnOB0AhrBvLpv5rXQoEeFALU8GoJAlJssiqV1IS9gsZpzHilFglSQjyRJkDQysMtN5UN0pB6W6r+9VH/bUr1BR+iIGKwkDysJ8zSgcRfhsBJHXkWHZrIUo1MBwONN5kQ7Yzc7THiByxhn3AU8buW/4hMRLojBJuiv4QWZZx2OO3leJVKyXFVISYD4+DvSr50McSk4JeM5vCJ5ZAUx2lw8RZCKKIlBDA6FowjaJEEoYJHW5oF6kNhZwdLfEfaBYSGtMhtqB5A+juWRFXOwUJuUG5FqQMAYD6foVHJvBOc4Vt3Hiv5NiQk6gyEBoDAY4utCTqtmJYHzB4aKElsIlNRibvALbBQC/ZjQQCttiLoQdLgChQVcDgwL2rAySKbJGDVsIIVLkJXWEbU3EAkxlbAXesbWcAQoXAtVai1UqWf435wCSGCECUtBdsxIGE2YIdESG2sxGrDU9Mk3283rIZOYm5VlLdb6oB686dsp8tdTosixoZ6pV2GwYlD20AUU0qM48IQ6KpQWqBEnkv3r7D4FyQBVqE3gLOPfbRY4Nm6Wj3d7EoymOIFws2wi5Za86M5mWGkkEiqhHvyB6UD3EAXzFeEHC8VBsZxHQvmO1lsV5oCiJpiRPeCdLN8xU7LFi8OI+xYBq8YmZzxTC0FBElXCLA/kTklPwdMoSJ8taIIkTDgAiquAWrMoxUXqreOgEjN5zmcN9gVkTfaKLBCGzPbwkIBELJ2SWhNgtBAEfNUEBDx3WMZTOcFOcXc7INRHyNt79v94bFoqEsZnkTnZlnSbJQ15gTWKwFvijLq4eAtuNuiMRkthnBVrPRprNY7d1YfA6IxQT4Aqs6O04OUAwBKGsOfZod3ZAHI2qyAyjfJa04H70P/J2HiCuO3majoyeGHniOCnXfrL6Zn7ie7zvplwdujwzvHu5N2fHir7U1Pj2399f9L5OU1RpbpvSrqIHzdvP7nFOVeftu+0cVWUg2GvRHbfOzEmes2/eubteamLc9KE6s9G7hhle4qclnWB2f/zkrIZ7WdttS1HFX9emt3vrbSu59CIhjMXOpqu78766sbln2ms2xZVlAzoe73p17OnFx9cKC8vSzr7yG9PW6ymg3VL59Jc7NSkgV/+s4M83TbjhNzz0PnoxqmXX7lRHGM5PWIV9Xy/eWc+zR01YXe7pxc+eu6xyIiI77+KF/+wqgvevSzvEIaZk0Sguvosa1g+OA6XgEYUg3+hfVFuO+v0BItTI4EpCS49mdQorMIBIdKgJpPa9F6r3nv5HkTQfYT+wSjB/zMQfuoasW+bRvwRNSI9Q3QybRrx4dCI3xV/8fFt4u/hE38HIrveUn9lSzIzm7BeB+q7dcr8YMWNE08vXdxpyyDSl2R5bLsyxnLMFF2/58XmAXTaomkvfFjuG3lOO8jUv2Bfb7JX52Fjh7xqHif88pXk92M3XSV2Xic+Kb/e4Y3x67uNaC8t1rZPvBT4W8aZ5wp+n7TLGNllyQpitvMfxwYObJyf2xz/6gjzyWciv27U4ifFE8Jf/t6bzpyfOPdw7qmRa/3p9csOZxRe8dS9YRt/taHD9C3LTw1L3VzOc+hEEU43rNbu2LZnULd/7/TVr1vzXu8DOTlRz64+k/Kb9JVDDuf8IefD0W91vX4sO3HNqckp0cKoj+Qvja8fnTetIamwednRfkf6X2/pe3HvqIg+ET0WHfivulG4R90I3VWlu6xBXpGj1FeNXoXUqBmHvtOmDO+qDMV7FUU/FrgPRvvd9zDb1F2burul7hLNRndcm7p7WNVdQmybunsY1V30LXWXs6QxQ323l3/gUIcl++aM4Y7yfY5f6rjHu6E8pSpjft6E1d6ioxkbcx/1X/6o5WLLqPI+tQV5578aN8EhLDL9rt3Z5k9aqlMzjY/vWXn8A/7a3jcXVMRf2+jo8XjLMtOAuHe7RG1/vsAgzl8rHP748/qutgjbpMqhjlINnUofGvwaPnxn3+LZHfNesv1qw/r2s8ueS+kxf4ipl6F4uPlaxdYxK2cseORKzmVdzx3myC9KuvNXseiEd3oIF8z+EaVzyy5lPNtrlvLkL1bOm1n6eXKivSd9ZNcLwyq6vYuP6/9en+w/uiZeOJ3V6YusJ8xKU9eD/Z5YvbLBdPCIZ9X+lkfsjWVRa5YuyBuU1zz1QvnQwYGvV3zW6XjxxZydIPf+A38YqDU=
|
||||
eNrtWF1vG8cVrf9D2+fBFmETg0PukhRJk1gYguM4QSXbgGw4nxWGu7PLqWZn1jOz/LDChzhB3lKAQNHntlKsRLGTFi2K5qFoXwoUaPsD2If8ltzZJVlKsGOnVZ04ICCI3Jm7d86998y9B7x7NKBKMynO3WfCUEUCAw/6l3ePFL2dUW3eu5dQ05fhwZXLNw4yxWa6b0zaqVapqAzZHktpyEhFqrg6rJKUVdJ+epEzbXxNiQr6Ja1SJVMfPjlLmPG90uJTq8LE50TEQZ8wUYqkSojxf6alKBVIfAChxoc9GY7/etSnJAS07x3f1FThzZgKM53VlyjQ8xaaBmwxM/2sVwlkUo0lDzUc2K/eWthVX/hsMwhoavBlEciQiXj6IL7D0jIKacSJofeK7enB+er540tSCJpjmR7vUZpiwtmAfqSoTiFR9N172hCT6buH4Ir+6xw+SqjWJKa/25YDGqLrFEISgJSPF/jfvwfwIe3To5c3rys5Gn86wgEJ+hQXrqZHUApsuD4O/nP2hwGXmn7EZUDy55lZBPs0C3E0Rzo9DlLP81wESD8FlAYCxJyK2PSnv3bzcr0DiVGQ2189mkrfSAjPDJf++fcFlX5z7ScLvF987weHIbid/vkWDcuoVkfXAoNqbq2BvGbHbXUaHrqyfeMBKUApSAfV0w97Y0P1/UWhzDil07+QNOWsYFPVZqmLIHGQR+NnJsLtz235AjAGHwBPGaxpAFUz4+kMJ2SEAZjvuc16u9Hy3C4wIeBZSHey3osygQLoLkoV5ZKEv1+cGzJwpFnBX3iBM0G7KGJAHJJQH0qOIRUZNxWL5wAOADIdAyIcSLnH6C9mr97afglvEW2wTavWfq2OIQHYJqB7nZi+X+2+DIW7Jvi4m+Ol3cujlIFbfycTkLAGuioH84TVOq4LfzZhs+yUZ3yFyx7hDzugiM8/Sdn/8titK1S+ct2/udPZ3uxs0REQyEjRadQqjWYHt7xKrdYZNLpofjSaO0cPxTC7eJWaoVR70FV6dCu/G27Fdb0F8p3i7R3I9g4z1N8io+42lBLo79ebrru83n+y19tto4RpXUbzh5Ro/aBoXdiAc+D67LncvhtSHfiONXDKqC+1ma8UbzqLhjcrJUNsy0xHpkJvMxJWbBh4I2pcaDZ6vaiH9y7UAnU4IApo9sqpq1W+oSgxmGh8UxiVaQNX4FX8klRDokIaYgjbyPKlnCvlzQx6jWJ3coKf7rCHFuqBoHw6++k+chS1BN810ukgZ5jswgbE4QDLd4GEsNh0G23XhaWIMA4p3I3mkxL2IMMbsKOznDendiYPinPtFVASjvtRqtgALnAZJRAA8H0APSAsFuaXyv1shMFLQrFM82k8PXzx8tXXPoEYOLP3iKXT+xfaFc9rV1z43/y4wI+NnM7+AeHESmbpY0OhIkwl9G8Ni2/AW5ni9p1F34Mdskcxl3EMqc+Jliz788Cr0gFA0RehS1CS+MN6UClQgLHt3aJg4i5VSqqShhwkZBfah18F0+qqafWEadWDmFwHTd5Ckz+O8GrLWqbjSEgtWBT9drFLF7330Pbe0wPpoOa2IXnFhMAsnH7eqpFICjYexnovaInBbeHWSUsxcmJy/fvcD/cdoJaA75Ckfcdy1X6edzrOTaFoIGPB7sCQT4ktF8inDsonUsWZTMpOj5igD3Mj5bAFrzhlxyJjIqPWi1YyiqC5OR1vdcPBb7/twNv5sMrtctxMRNI+GWkI7zNbtY0ykC6G/l7QzbGzD+XDz1ndgVSlqT1m1QAOKPw6nTf2HQHe3LJjmOEWwRbYXZr7SYE1LHQ6rfpGq9WsNSdvTSaPnugfPF4c5iPcRvNc65J1bp91iYl8HThYStP8KwwLkvRYnOUXuKTAFWTcaL+Uw9T+EuVaNn4N2fjzJ9BcT7FEZysov2XBPetS8/trqbmWmt9dqZnuNYcba6m5lppnKTWbzVMa8m/7DxOCS3VnR1AuLhcCa7G4KrrKX6XQ5hAS6OOQVcdmzwDp59rNar4sLwdU4dRa3yTctv+H7cG0s4rRKFg3MoOkAh7HdhXsudhr34Bu0fA6bv11MODE1mZgIXu1Rrvu1jcadVjOkwJr7caFdtmJMs7zyjuPHtPwVF0NDnbM494B/oR0lA/3PD8rk3k+ja0XmykiQPUH5GuimEy+SvLKJ5S8UBJ7XXWJjlJO7I+tI+OXbBuG27oWtf+TqE2fVPedVRHOVrY+dfjfdWFaWwvTtTB9doXp4E5jY7AWpmthepbCtF0/JUz/8P8WpvNptbqKmEYEaRkZoCtFeYVt3Mj0iUF9ylONIhIwzoCjFFapnaw0VjmDkYwQJyqmaKFSUa56NXp+a2tbv2BtJVrp6bqCNu2BJ81PuFxCKKMlyh9rlGngK9FUFwfyMQIFozhJ0RBmHwADAVLAOQkEQoQxShXh5fk0gFyjUAZZAtVARBA+1jYJIkQ6SxKyuJ1lO3lMTxroNXbTTr+leeVN8WahQ78EN9bjsA==
|
||||
@@ -3,11 +3,16 @@
|
||||
This guide walks through how to run the repository locally and check in your first code.
|
||||
For a [development container](https://containers.dev/), see the [.devcontainer folder](https://github.com/langchain-ai/langchain/tree/master/.devcontainer).
|
||||
|
||||
## Dependency Management: `uv` and other env/dependency managers
|
||||
## Dependency Management: Poetry and other env/dependency managers
|
||||
|
||||
This project utilizes [uv](https://docs.astral.sh/uv/) v0.5+ as a dependency manager.
|
||||
This project utilizes [Poetry](https://python-poetry.org/) v1.7.1+ as a dependency manager.
|
||||
|
||||
Install `uv`: **[documentation on how to install it](https://docs.astral.sh/uv/getting-started/installation/)**.
|
||||
❗Note: *Before installing Poetry*, if you use `Conda`, create and activate a new Conda env (e.g. `conda create -n langchain python=3.9`)
|
||||
|
||||
Install Poetry: **[documentation on how to install it](https://python-poetry.org/docs/#installation)**.
|
||||
|
||||
❗Note: If you use `Conda` or `Pyenv` as your environment/package manager, after installing Poetry,
|
||||
tell Poetry to use the virtualenv python environment (`poetry config virtualenvs.prefer-active-python true`)
|
||||
|
||||
## Different packages
|
||||
|
||||
@@ -32,7 +37,7 @@ cd libs/community
|
||||
Install langchain-community development requirements (for running langchain, running examples, linting, formatting, tests, and coverage):
|
||||
|
||||
```bash
|
||||
uv sync
|
||||
poetry install --with lint,typing,test,test_integration
|
||||
```
|
||||
|
||||
Then verify dependency installation:
|
||||
@@ -41,6 +46,12 @@ Then verify dependency installation:
|
||||
make test
|
||||
```
|
||||
|
||||
If during installation you receive a `WheelFileValidationError` for `debugpy`, please make sure you are running
|
||||
Poetry v1.6.1+. This bug was present in older versions of Poetry (e.g. 1.4.1) and has been resolved in newer releases.
|
||||
If you are still seeing this bug on v1.6.1+, you may also try disabling "modern installation"
|
||||
(`poetry config installer.modern-installation false`) and re-installing requirements.
|
||||
See [this `debugpy` issue](https://github.com/microsoft/debugpy/issues/1246) for more details.
|
||||
|
||||
## Testing
|
||||
|
||||
**Note:** In `langchain`, `langchain-community`, and `langchain-experimental`, some test dependencies are optional. See the following section about optional dependencies.
|
||||
@@ -68,6 +79,7 @@ If you are only developing `langchain_core` or `langchain_community`, you can si
|
||||
|
||||
```bash
|
||||
cd libs/core
|
||||
poetry install --with test
|
||||
make test
|
||||
```
|
||||
|
||||
@@ -75,6 +87,7 @@ Or:
|
||||
|
||||
```bash
|
||||
cd libs/community
|
||||
poetry install --with test
|
||||
make test
|
||||
```
|
||||
|
||||
@@ -166,7 +179,7 @@ ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogy
|
||||
|
||||
`langchain-core` and partner packages **do not use** optional dependencies in this way.
|
||||
|
||||
You'll notice that `pyproject.toml` and `uv.lock` are **not** touched when you add optional dependencies below.
|
||||
You'll notice that `pyproject.toml` and `poetry.lock` are **not** touched when you add optional dependencies below.
|
||||
|
||||
If you're adding a new dependency to Langchain, assume that it will be an optional dependency, and
|
||||
that most users won't have it installed.
|
||||
@@ -183,10 +196,18 @@ test makes use of lightweight fixtures to test the logic of the code.
|
||||
|
||||
## Adding a Jupyter Notebook
|
||||
|
||||
If you are adding a Jupyter Notebook example, you'll want to run with `test` dependencies:
|
||||
If you are adding a Jupyter Notebook example, you'll want to install the optional `dev` dependencies.
|
||||
|
||||
To install dev dependencies:
|
||||
|
||||
```bash
|
||||
uv run --group test jupyter notebook
|
||||
poetry install --with dev
|
||||
```
|
||||
|
||||
When you run `uv sync`, the `langchain` package is installed as editable in the virtualenv, so your new logic can be imported into the notebook.
|
||||
Launch a notebook:
|
||||
|
||||
```bash
|
||||
poetry run jupyter notebook
|
||||
```
|
||||
|
||||
When you run `poetry install`, the `langchain` package is installed as editable in the virtualenv, so your new logic can be imported into the notebook.
|
||||
|
||||
@@ -270,7 +270,7 @@
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs overrideParams={{openai: {model: \"gpt-4\"}}} />\n"
|
||||
"<ChatModelTabs openaiParams={`model=\"gpt-4\"`} />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -354,7 +354,7 @@
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
" overrideParams={{openai: {model: \"gpt-4-0125-preview\", kwargs: \"temperature=0\"}}}\n",
|
||||
" openaiParams={`model=\"gpt-4-0125-preview\", temperature=0`}\n",
|
||||
"/>\n"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -179,7 +179,7 @@
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
" overrideParams={{openai: {model: \"gpt-4o\", kwargs: \"temperature=0\"}}}\n",
|
||||
" openaiParams={`model=\"gpt-4o\", temperature=0`}\n",
|
||||
"/>\n"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -167,7 +167,7 @@
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
" overrideParams={{fireworks: {model: \"accounts/fireworks/models/firefunction-v1\", kwargs: \"temperature=0\"}}}\n",
|
||||
" fireworksParams={`model=\"accounts/fireworks/models/firefunction-v1\", temperature=0`}\n",
|
||||
"/>\n",
|
||||
"\n",
|
||||
"We can use the `bind_tools()` method to handle converting\n",
|
||||
|
||||
@@ -99,6 +99,8 @@
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"what is {a} + {b}\")\n",
|
||||
"\n",
|
||||
"chain1 = prompt | model\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\n",
|
||||
" \"a\": itemgetter(\"foo\") | RunnableLambda(length_function),\n",
|
||||
|
||||
@@ -200,12 +200,7 @@
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
" overrideParams={{\n",
|
||||
" fireworks: {\n",
|
||||
" model: \"accounts/fireworks/models/firefunction-v1\",\n",
|
||||
" kwargs: \"temperature=0\",\n",
|
||||
" }\n",
|
||||
" }}\n",
|
||||
" fireworksParams={`model=\"accounts/fireworks/models/firefunction-v1\", temperature=0`}\n",
|
||||
"/>\n"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
" overrideParams={{fireworks: {model: \"accounts/fireworks/models/firefunction-v1\", kwargs: \"temperature=0\"}}}\n",
|
||||
" fireworksParams={`model=\"accounts/fireworks/models/firefunction-v1\", temperature=0`}\n",
|
||||
"/>\n"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
" overrideParams={{fireworks: {model: \"accounts/fireworks/models/firefunction-v1\", kwargs: \"temperature=0\"}}}\n",
|
||||
" fireworksParams={`model=\"accounts/fireworks/models/firefunction-v1\", temperature=0`}\n",
|
||||
"/>\n"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs overrideParams={{openai: {model: \"gpt-4\"}}} />\n",
|
||||
"<ChatModelTabs openaiParams={`model=\"gpt-4\"`} />\n",
|
||||
"\n",
|
||||
"To illustrate the idea, we'll use `phi3` via Ollama, which does **NOT** have native support for tool calling. If you'd like to use `Ollama` as well follow [these instructions](/docs/integrations/chat/ollama/)."
|
||||
]
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Abso\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatAbso\n",
|
||||
"\n",
|
||||
"This will help you getting started with ChatAbso [chat models](https://python.langchain.com/docs/concepts/chat_models/). For detailed documentation of all ChatAbso features and configurations head to the [API reference](https://python.langchain.com/api_reference/en/latest/chat_models/langchain_abso.chat_models.ChatAbso.html).\n",
|
||||
"\n",
|
||||
"- You can find the full documentation for the Abso router [here] (https://abso.ai)\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/abso) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatAbso](https://python.langchain.com/api_reference/en/latest/chat_models/langchain_abso.chat_models.ChatAbso.html) | [langchain-abso](https://python.langchain.com/api_reference/en/latest/abso_api_reference.html) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"To access ChatAbso models you'll need to create an OpenAI account, get an API key, and install the `langchain-abso` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to ChatAbso and generate an API key. Once you've done this set the ABSO_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain ChatAbso integration lives in the `langchain-abso` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-abso"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_abso import ChatAbso\n",
|
||||
"\n",
|
||||
"llm = ChatAbso(fast_model=\"gpt-4o\", slow_model=\"o3-mini\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatAbso features and configurations head to the API reference: https://python.langchain.com/api_reference/en/latest/chat_models/langchain_abso.chat_models.ChatAbso.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -509,7 +509,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatDatabricks features and configurations head to the API reference: https://api-docs.databricks.com/python/databricks-ai-bridge/latest/databricks_langchain.html#databricks_langchain.ChatDatabricks"
|
||||
"For detailed documentation of all ChatDatabricks features and configurations head to the API reference: https://python.langchain.com/api_reference/databricks/chat_models/langchain_databricks.chat_models.ChatDatabricks.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/deepseek) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatDeepSeek](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html) | [langchain-deepseek](https://python.langchain.com/api_reference/deepseek/) | ❌ | beta | ✅ |  |  |\n",
|
||||
"| [ChatDeepSeek](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html) | [langchain-deepseek-official](https://python.langchain.com/api_reference/deepseek/) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
@@ -40,7 +40,7 @@
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access DeepSeek models you'll need to create a/an DeepSeek account, get an API key, and install the `langchain-deepseek` integration package.\n",
|
||||
"To access DeepSeek models you'll need to create a/an DeepSeek account, get an API key, and install the `langchain-deepseek-official` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
@@ -87,7 +87,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain DeepSeek integration lives in the `langchain-deepseek` package:"
|
||||
"The LangChain DeepSeek integration lives in the `langchain-deepseek-official` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -97,7 +97,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-deepseek"
|
||||
"%pip install -qU langchain-deepseek-official"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,354 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Goodfire\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGoodfire\n",
|
||||
"\n",
|
||||
"This will help you getting started with Goodfire [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatGoodfire features and configurations head to the [PyPI project page](https://pypi.org/project/langchain-goodfire/), or go directly to the [Goodfire SDK docs](https://docs.goodfire.ai/sdk-reference/example). All of the Goodfire-specific functionality (e.g. SAE features, variants, etc.) is available via the main `goodfire` package. This integration is a wrapper around the Goodfire SDK.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatGoodfire](https://python.langchain.com/api_reference/goodfire/chat_models/langchain_goodfire.chat_models.ChatGoodfire.html) | [langchain-goodfire](https://python.langchain.com/api_reference/goodfire/) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Goodfire models you'll need to create a/an Goodfire account, get an API key, and install the `langchain-goodfire` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [Goodfire Settings](https://platform.goodfire.ai/organization/settings/api-keys) to sign up to Goodfire and generate an API key. Once you've done this set the GOODFIRE_API_KEY environment variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"GOODFIRE_API_KEY\"):\n",
|
||||
" os.environ[\"GOODFIRE_API_KEY\"] = getpass.getpass(\"Enter your Goodfire API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Goodfire integration lives in the `langchain-goodfire` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-goodfire"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import goodfire\n",
|
||||
"from langchain_goodfire import ChatGoodfire\n",
|
||||
"\n",
|
||||
"base_variant = goodfire.Variant(\"meta-llama/Llama-3.3-70B-Instruct\")\n",
|
||||
"\n",
|
||||
"llm = ChatGoodfire(\n",
|
||||
" model=base_variant,\n",
|
||||
" temperature=0,\n",
|
||||
" max_completion_tokens=1000,\n",
|
||||
" seed=42,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, response_metadata={}, id='run-8d43cf35-bce8-4827-8935-c64f8fb78cd0-0', usage_metadata={'input_tokens': 51, 'output_tokens': 39, 'total_tokens': 90})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = await llm.ainvoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren. How can I help you with programming today?', additional_kwargs={}, response_metadata={}, id='run-03d1a585-8234-46f1-a8df-bf9143fe3309-0', usage_metadata={'input_tokens': 46, 'output_tokens': 46, 'total_tokens': 92})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"await chain.ainvoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Goodfire-specific functionality\n",
|
||||
"\n",
|
||||
"To use Goodfire-specific functionality such as SAE features and variants, you can use the `goodfire` package directly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3aef9e0a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"FeatureGroup([\n",
|
||||
" 0: \"The assistant should adopt the persona of a pirate\",\n",
|
||||
" 1: \"The assistant should roleplay as a pirate\",\n",
|
||||
" 2: \"The assistant should engage with pirate-themed content or roleplay as a pirate\",\n",
|
||||
" 3: \"The assistant should roleplay as a character\",\n",
|
||||
" 4: \"The assistant should roleplay as a specific character\",\n",
|
||||
" 5: \"The assistant should roleplay as a game character or NPC\",\n",
|
||||
" 6: \"The assistant should roleplay as a human character\",\n",
|
||||
" 7: \"Requests for the assistant to roleplay or pretend to be something else\",\n",
|
||||
" 8: \"Requests for the assistant to roleplay or pretend to be something\",\n",
|
||||
" 9: \"The assistant is being assigned a role or persona to roleplay\"\n",
|
||||
"])"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"client = goodfire.Client(api_key=os.environ[\"GOODFIRE_API_KEY\"])\n",
|
||||
"\n",
|
||||
"pirate_features = client.features.search(\n",
|
||||
" \"assistant should roleplay as a pirate\", base_variant\n",
|
||||
")\n",
|
||||
"pirate_features"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "52f03a00",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field! Arrr! Hope that made ye laugh, matey!', additional_kwargs={}, response_metadata={}, id='run-7d8bd30f-7f80-41cb-bdb6-25c29c22a7ce-0', usage_metadata={'input_tokens': 35, 'output_tokens': 60, 'total_tokens': 95})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pirate_variant = goodfire.Variant(\"meta-llama/Llama-3.3-70B-Instruct\")\n",
|
||||
"\n",
|
||||
"pirate_variant.set(pirate_features[0], 0.4)\n",
|
||||
"pirate_variant.set(pirate_features[1], 0.3)\n",
|
||||
"\n",
|
||||
"await llm.ainvoke(\"Tell me a joke\", model=pirate_variant)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatGoodfire features and configurations head to the [API reference](https://python.langchain.com/api_reference/goodfire/chat_models/langchain_goodfire.chat_models.ChatGoodfire.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -210,7 +210,7 @@
|
||||
"id": "96ed13d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Instead of `model_id`, you can also pass the `deployment_id` of the previously [deployed model with reference to a Prompt Template](https://cloud.ibm.com/apidocs/watsonx-ai#deployments-text-chat)."
|
||||
"Instead of `model_id`, you can also pass the `deployment_id` of the previously tuned model. The entire model tuning workflow is described in [Working with TuneExperiment and PromptTuner](https://ibm.github.io/watsonx-ai-python-sdk/pt_working_with_class_and_prompt_tuner.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -228,31 +228,6 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3d29767c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For certain requirements, there is an option to pass the IBM's [`APIClient`](https://ibm.github.io/watsonx-ai-python-sdk/base.html#apiclient) object into the `ChatWatsonx` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0ae9531e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from ibm_watsonx_ai import APIClient\n",
|
||||
"\n",
|
||||
"api_client = APIClient(...)\n",
|
||||
"\n",
|
||||
"chat = ChatWatsonx(\n",
|
||||
" model_id=\"ibm/granite-34b-code-instruct\",\n",
|
||||
" watsonx_client=api_client,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f571001d",
|
||||
@@ -473,7 +448,9 @@
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"### ChatWatsonx.bind_tools()"
|
||||
"### ChatWatsonx.bind_tools()\n",
|
||||
"\n",
|
||||
"Please note that `ChatWatsonx.bind_tools` is on beta state, so we recommend using `mistralai/mistral-large` model."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -586,7 +563,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain_ibm",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
|
||||
@@ -17,7 +17,7 @@ If you'd like to contribute an integration, see [Contributing integrations](/doc
|
||||
|
||||
import ChatModelTabs from "@theme/ChatModelTabs";
|
||||
|
||||
<ChatModelTabs overrideParams={{openai: {model: "gpt-4o-mini"}}} />
|
||||
<ChatModelTabs openaiParams={`model="gpt-4o-mini"`} />
|
||||
|
||||
```python
|
||||
model.invoke("Hello, world!")
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"source": [
|
||||
"# ChatSambaNovaCloud\n",
|
||||
"\n",
|
||||
"This will help you getting started with SambaNovaCloud [chat models](/docs/concepts/chat_models/). For detailed documentation of all ChatSambaNovaCloud features and configurations head to the [API reference](https://docs.sambanova.ai/cloud/docs/get-started/overview).\n",
|
||||
"This will help you getting started with SambaNovaCloud [chat models](/docs/concepts/chat_models/). For detailed documentation of all ChatSambaNovaCloud features and configurations head to the [API reference](https://python.langchain.com/api_reference/sambanova/chat_models/langchain_sambanova.ChatSambaNovaCloud.html).\n",
|
||||
"\n",
|
||||
"**[SambaNova](https://sambanova.ai/)'s** [SambaNova Cloud](https://cloud.sambanova.ai/) is a platform for performing inference with open-source models\n",
|
||||
"\n",
|
||||
@@ -28,7 +28,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatSambaNovaCloud](https://docs.sambanova.ai/cloud/docs/get-started/overview) | [langchain-sambanova](https://python.langchain.com/docs/integrations/providers/sambanova/) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"| [ChatSambaNovaCloud](https://python.langchain.com/api_reference/sambanova/chat_models/langchain_sambanova.ChatSambaNovaCloud.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"\n",
|
||||
@@ -545,7 +545,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all SambaNovaCloud features and configurations head to the API reference: https://docs.sambanova.ai/cloud/docs/get-started/overview"
|
||||
"For detailed documentation of all ChatSambaNovaCloud features and configurations head to the API reference: https://python.langchain.com/api_reference/sambanova/chat_models/langchain_sambanova.ChatSambaNovaCloud.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"source": [
|
||||
"# ChatSambaStudio\n",
|
||||
"\n",
|
||||
"This will help you getting started with SambaStudio [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatStudio features and configurations head to the [API reference](https://docs.sambanova.ai/sambastudio/latest/index.html).\n",
|
||||
"This will help you getting started with SambaStudio [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatStudio features and configurations head to the [API reference](https://python.langchain.com/api_reference/sambanova/chat_models/langchain_sambanova.chat_models.sambanova.ChatSambaStudio.html).\n",
|
||||
"\n",
|
||||
"**[SambaNova](https://sambanova.ai/)'s** [SambaStudio](https://docs.sambanova.ai/sambastudio/latest/sambastudio-intro.html) SambaStudio is a rich, GUI-based platform that provides the functionality to train, deploy, and manage models in SambaNova [DataScale](https://sambanova.ai/products/datascale) systems.\n",
|
||||
"\n",
|
||||
@@ -28,7 +28,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatSambaStudio](https://docs.sambanova.ai/sambastudio/latest/index.html) | [langchain-sambanova](https://python.langchain.com/docs/integrations/providers/sambanova/) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"| [ChatSambaStudio](https://python.langchain.com/api_reference/sambanova/chat_models/langchain_sambanova.chat_models.sambanova.ChatSambaStudio.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"\n",
|
||||
@@ -483,7 +483,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all SambaStudio features and configurations head to the API reference: https://docs.sambanova.ai/sambastudio/latest/api-ref-landing.html"
|
||||
"For detailed documentation of all ChatSambaStudio features and configurations head to the API reference: https://python.langchain.com/api_reference/sambanova/chat_models/langchain_sambanova.sambanova.chat_models.ChatSambaStudio.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -2,9 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "xwiDq5fOuoRn"
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Apify Dataset\n",
|
||||
"\n",
|
||||
@@ -22,63 +20,33 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "qRW2-mokuoRp",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-apify langchain-openai"
|
||||
"%pip install --upgrade --quiet apify-client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "8jRVq16LuoRq"
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, import `ApifyDatasetLoader` into your source code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "umXQHqIJuoRq"
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_apify import ApifyDatasetLoader\n",
|
||||
"from langchain_community.document_loaders import ApifyDatasetLoader\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "NjGwKy59vz1X"
|
||||
},
|
||||
"source": [
|
||||
"Find your [Apify API token](https://console.apify.com/account/integrations) and [OpenAI API key](https://platform.openai.com/account/api-keys) and initialize these into environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"id": "AvzNtyCxwDdr"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"APIFY_API_TOKEN\"] = \"your-apify-api-token\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "d1O-KL48uoRr"
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then provide a function that maps Apify dataset record fields to LangChain `Document` format.\n",
|
||||
"\n",
|
||||
@@ -96,10 +64,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"id": "m1SpA7XZuoRr"
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = ApifyDatasetLoader(\n",
|
||||
@@ -112,10 +78,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"id": "0hWX7ABsuoRs"
|
||||
},
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
@@ -123,9 +87,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "EJCVFVKNuoRs"
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## An example with question answering\n",
|
||||
"\n",
|
||||
@@ -134,26 +96,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {
|
||||
"id": "sNisJKzZuoRt"
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator\n",
|
||||
"from langchain_apify import ApifyWrapper\n",
|
||||
"from langchain_community.utilities import ApifyWrapper\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"from langchain_openai.embeddings import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {
|
||||
"id": "qcfmnbdDuoRu"
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = ApifyDatasetLoader(\n",
|
||||
@@ -166,47 +123,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {
|
||||
"id": "8b0xzKJxuoRv"
|
||||
},
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"index = VectorstoreIndexCreator(\n",
|
||||
" vectorstore_cls=InMemoryVectorStore, embedding=OpenAIEmbeddings()\n",
|
||||
").from_loaders([loader])"
|
||||
"index = VectorstoreIndexCreator(embedding=OpenAIEmbeddings()).from_loaders([loader])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {
|
||||
"id": "7zPXGsVFwUGA"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {
|
||||
"id": "ecWrdM4guoRv"
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What is Apify?\"\n",
|
||||
"result = index.query_with_sources(query, llm=llm)"
|
||||
"result = index.query_with_sources(query, llm=OpenAI())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "QH8r44e9uoRv",
|
||||
"outputId": "361fe050-f75d-4d5a-c327-5e7bd190fba5"
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -225,9 +162,6 @@
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
@@ -247,5 +181,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -443,7 +443,6 @@
|
||||
"llm = HuggingFaceEndpoint(\n",
|
||||
" repo_id=GEN_MODEL_ID,\n",
|
||||
" huggingfacehub_api_token=HF_TOKEN,\n",
|
||||
" task=\"text-generation\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -55,7 +55,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = ReadTheDocsLoader(\"rtdocs\")"
|
||||
"loader = ReadTheDocsLoader(\"rtdocs\", features=\"html.parser\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -195,7 +195,7 @@
|
||||
"id": "96ed13d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Instead of `model_id`, you can also pass the `deployment_id` of the previously tuned model. The entire model tuning workflow is described in [Working with TuneExperiment and PromptTuner](https://ibm.github.io/watsonx-ai-python-sdk/pt_tune_experiment_run.html)."
|
||||
"Instead of `model_id`, you can also pass the `deployment_id` of the previously tuned model. The entire model tuning workflow is described [here](https://ibm.github.io/watsonx-ai-python-sdk/pt_working_with_class_and_prompt_tuner.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -420,7 +420,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain_ibm",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!CMAKE_ARGS=\"-DGGML_CUDA=on\" FORCE_CMAKE=1 pip install llama-cpp-python"
|
||||
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -81,7 +81,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!CMAKE_ARGS=\"-DGGML_CUDA=on\" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir"
|
||||
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -149,9 +149,9 @@
|
||||
"\n",
|
||||
"```\n",
|
||||
"set FORCE_CMAKE=1\n",
|
||||
"set CMAKE_ARGS=-DGGML_CUDA=OFF\n",
|
||||
"set CMAKE_ARGS=-DLLAMA_CUBLAS=OFF\n",
|
||||
"```\n",
|
||||
"If you have an NVIDIA GPU make sure `DGGML_CUDA` is set to `ON`\n",
|
||||
"If you have an NVIDIA GPU make sure `DLLAMA_CUBLAS` is set to `ON`\n",
|
||||
"\n",
|
||||
"#### Compiling and installing\n",
|
||||
"\n",
|
||||
|
||||
@@ -135,7 +135,6 @@
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" auth_type=\"SECURITY_TOKEN\",\n",
|
||||
" auth_profile=\"MY_PROFILE\", # replace with your profile name\n",
|
||||
" auth_file_location=\"MY_CONFIG_FILE_LOCATION\", # replace with file location where profile name configs present\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -160,7 +159,6 @@
|
||||
" service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n",
|
||||
" compartment_id=\"DEDICATED_COMPARTMENT_OCID\",\n",
|
||||
" auth_profile=\"MY_PROFILE\", # replace with your profile name,\n",
|
||||
" auth_file_location=\"MY_CONFIG_FILE_LOCATION\", # replace with file location where profile name configs present\n",
|
||||
" provider=\"MODEL_PROVIDER\", # e.g., \"cohere\" or \"meta\"\n",
|
||||
" context_size=\"MODEL_CONTEXT_SIZE\", # e.g., 128000\n",
|
||||
")"
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
# Abso
|
||||
|
||||
[Abso](https://abso.ai/#router) is an open-source LLM proxy that automatically routes requests between fast and slow models based on prompt complexity. It uses various heuristics to chose the proper model. It's very fast and has low latency.
|
||||
|
||||
|
||||
## Installation and setup
|
||||
|
||||
```bash
|
||||
pip install langchain-abso
|
||||
```
|
||||
|
||||
## Chat Model
|
||||
|
||||
See usage details [here](/docs/integrations/chat/abso)
|
||||
@@ -14,34 +14,20 @@ blogs, or knowledge bases.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install the LangChain Apify package for Python with:
|
||||
```bash
|
||||
pip install langchain-apify
|
||||
```
|
||||
- Install the Apify API client for Python with `pip install apify-client`
|
||||
- Get your [Apify API token](https://console.apify.com/account/integrations) and either set it as
|
||||
an environment variable (`APIFY_API_TOKEN`) or pass it as `apify_api_token` in the constructor.
|
||||
an environment variable (`APIFY_API_TOKEN`) or pass it to the `ApifyWrapper` as `apify_api_token` in the constructor.
|
||||
|
||||
## Tool
|
||||
|
||||
You can use the `ApifyActorsTool` to use Apify Actors with agents.
|
||||
|
||||
```python
|
||||
from langchain_apify import ApifyActorsTool
|
||||
```
|
||||
|
||||
See [this notebook](/docs/integrations/tools/apify_actors) for example usage.
|
||||
|
||||
For more information on how to use this tool, visit [the Apify integration documentation](https://docs.apify.com/platform/integrations/langgraph).
|
||||
|
||||
## Wrapper
|
||||
## Utility
|
||||
|
||||
You can use the `ApifyWrapper` to run Actors on the Apify platform.
|
||||
|
||||
```python
|
||||
from langchain_apify import ApifyWrapper
|
||||
from langchain_community.utilities import ApifyWrapper
|
||||
```
|
||||
|
||||
For more information on how to use this wrapper, see [the Apify integration documentation](https://docs.apify.com/platform/integrations/langchain).
|
||||
For more information on this wrapper, see [the API reference](https://python.langchain.com/api_reference/community/utilities/langchain_community.utilities.apify.ApifyWrapper.html).
|
||||
|
||||
|
||||
## Document loader
|
||||
@@ -49,10 +35,7 @@ For more information on how to use this wrapper, see [the Apify integration docu
|
||||
You can also use our `ApifyDatasetLoader` to get data from Apify dataset.
|
||||
|
||||
```python
|
||||
from langchain_apify import ApifyDatasetLoader
|
||||
from langchain_community.document_loaders import ApifyDatasetLoader
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of this loader, see [this notebook](/docs/integrations/document_loaders/apify_dataset).
|
||||
|
||||
|
||||
Source code for this integration can be found in the [LangChain Apify repository](https://github.com/apify/langchain-apify).
|
||||
|
||||
@@ -103,7 +103,14 @@ See [MLflow LangChain Integration](/docs/integrations/providers/mlflow_tracking)
|
||||
|
||||
SQLDatabase
|
||||
-----------
|
||||
To connect to Databricks SQL or query structured data, see the [Databricks structured retriever tool documentation](https://docs.databricks.com/en/generative-ai/agent-framework/structured-retrieval-tools.html#table-query-tool) and to create an agent using the above created SQL UDF see [Databricks UC Integration](https://docs.unitycatalog.io/ai/integrations/langchain/).
|
||||
You can connect to Databricks SQL using the SQLDatabase wrapper of LangChain.
|
||||
```
|
||||
from langchain.sql_database import SQLDatabase
|
||||
|
||||
db = SQLDatabase.from_databricks(catalog="samples", schema="nyctaxi")
|
||||
```
|
||||
|
||||
See [Databricks SQL Agent](https://docs.databricks.com/en/large-language-models/langchain.html#databricks-sql-agent) for how to connect Databricks SQL with your LangChain Agent as a powerful querying tool.
|
||||
|
||||
Open Models
|
||||
-----------
|
||||
|
||||
34
docs/docs/integrations/providers/falkordb.mdx
Normal file
34
docs/docs/integrations/providers/falkordb.mdx
Normal file
@@ -0,0 +1,34 @@
|
||||
# FalkorDB
|
||||
|
||||
>[FalkorDB](https://www.falkordb.com/) is a creator of the [FalkorDB](https://docs.falkordb.com/),
|
||||
> a low-latency Graph Database that delivers knowledge to GenAI.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
See [installation instructions here](/docs/integrations/graphs/falkordb/).
|
||||
|
||||
|
||||
## Graphs
|
||||
|
||||
See a [usage example](/docs/integrations/graphs/falkordb).
|
||||
|
||||
```python
|
||||
from langchain_community.graphs import FalkorDBGraph
|
||||
```
|
||||
|
||||
## Chains
|
||||
|
||||
See a [usage example](/docs/integrations/graphs/falkordb).
|
||||
|
||||
```python
|
||||
from langchain_community.chains.graph_qa.falkordb import FalkorDBQAChain
|
||||
```
|
||||
|
||||
## Memory
|
||||
|
||||
See a [usage example](/docs/integrations/memory/falkordb_chat_message_history).
|
||||
|
||||
```python
|
||||
from langchain_falkordb import FalkorDBChatMessageHistory
|
||||
```
|
||||
@@ -1,14 +0,0 @@
|
||||
# Goodfire
|
||||
|
||||
[Goodfire](https://www.goodfire.ai/) is a research lab focused on AI safety and
|
||||
interpretability.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install langchain-goodfire
|
||||
```
|
||||
|
||||
## Chat models
|
||||
|
||||
See detail on available chat models [here](/docs/integrations/chat/goodfire).
|
||||
@@ -1,22 +0,0 @@
|
||||
# Graph RAG
|
||||
|
||||
## Overview
|
||||
|
||||
[Graph RAG](https://datastax.github.io/graph-rag/) provides a retriever interface
|
||||
that combines **unstructured** similarity search on vectors with **structured**
|
||||
traversal of metadata properties. This enables graph-based retrieval over **existing**
|
||||
vector stores.
|
||||
|
||||
## Installation and setup
|
||||
|
||||
```bash
|
||||
pip install langchain-graph-retriever
|
||||
```
|
||||
|
||||
## Retrievers
|
||||
|
||||
```python
|
||||
from langchain_graph_retriever import GraphRetriever
|
||||
```
|
||||
|
||||
For more information, see the [Graph RAG Integration Guide](/docs/integrations/retrievers/graph_rag).
|
||||
@@ -1,129 +0,0 @@
|
||||
# LangFair: Use-Case Level LLM Bias and Fairness Assessments
|
||||
|
||||
LangFair is a comprehensive Python library designed for conducting bias and fairness assessments of large language model (LLM) use cases. The LangFair [repository](https://github.com/cvs-health/langfair) includes a comprehensive framework for [choosing bias and fairness metrics](https://github.com/cvs-health/langfair/tree/main#-choosing-bias-and-fairness-metrics-for-an-llm-use-case), along with [demo notebooks](https://github.com/cvs-health/langfair/tree/main/examples) and a [technical playbook](https://arxiv.org/abs/2407.10853) that discusses LLM bias and fairness risks, evaluation metrics, and best practices.
|
||||
|
||||
Explore our [documentation site](https://cvs-health.github.io/langfair/) for detailed instructions on using LangFair.
|
||||
|
||||
## ⚡ Quickstart Guide
|
||||
### (Optional) Create a virtual environment for using LangFair
|
||||
We recommend creating a new virtual environment using venv before installing LangFair. To do so, please follow instructions [here](https://docs.python.org/3/library/venv.html).
|
||||
|
||||
### Installing LangFair
|
||||
The latest version can be installed from PyPI:
|
||||
|
||||
```bash
|
||||
pip install langfair
|
||||
```
|
||||
|
||||
### Usage Examples
|
||||
Below are code samples illustrating how to use LangFair to assess bias and fairness risks in text generation and summarization use cases. The below examples assume the user has already defined a list of prompts from their use case, `prompts`.
|
||||
|
||||
##### Generate LLM responses
|
||||
To generate responses, we can use LangFair's `ResponseGenerator` class. First, we must create a `langchain` LLM object. Below we use `ChatVertexAI`, but **any of [LangChain’s LLM classes](https://js.langchain.com/docs/integrations/chat/) may be used instead**. Note that `InMemoryRateLimiter` is to used to avoid rate limit errors.
|
||||
```python
|
||||
from langchain_google_vertexai import ChatVertexAI
|
||||
from langchain_core.rate_limiters import InMemoryRateLimiter
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=4.5, check_every_n_seconds=0.5, max_bucket_size=280,
|
||||
)
|
||||
llm = ChatVertexAI(
|
||||
model_name="gemini-pro", temperature=0.3, rate_limiter=rate_limiter
|
||||
)
|
||||
```
|
||||
We can use `ResponseGenerator.generate_responses` to generate 25 responses for each prompt, as is convention for toxicity evaluation.
|
||||
```python
|
||||
from langfair.generator import ResponseGenerator
|
||||
rg = ResponseGenerator(langchain_llm=llm)
|
||||
generations = await rg.generate_responses(prompts=prompts, count=25)
|
||||
responses = generations["data"]["response"]
|
||||
duplicated_prompts = generations["data"]["prompt"] # so prompts correspond to responses
|
||||
```
|
||||
|
||||
##### Compute toxicity metrics
|
||||
Toxicity metrics can be computed with `ToxicityMetrics`. Note that use of `torch.device` is optional and should be used if GPU is available to speed up toxicity computation.
|
||||
```python
|
||||
# import torch # uncomment if GPU is available
|
||||
# device = torch.device("cuda") # uncomment if GPU is available
|
||||
from langfair.metrics.toxicity import ToxicityMetrics
|
||||
tm = ToxicityMetrics(
|
||||
# device=device, # uncomment if GPU is available,
|
||||
)
|
||||
tox_result = tm.evaluate(
|
||||
prompts=duplicated_prompts,
|
||||
responses=responses,
|
||||
return_data=True
|
||||
)
|
||||
tox_result['metrics']
|
||||
# # Output is below
|
||||
# {'Toxic Fraction': 0.0004,
|
||||
# 'Expected Maximum Toxicity': 0.013845130120171235,
|
||||
# 'Toxicity Probability': 0.01}
|
||||
```
|
||||
|
||||
##### Compute stereotype metrics
|
||||
Stereotype metrics can be computed with `StereotypeMetrics`.
|
||||
```python
|
||||
from langfair.metrics.stereotype import StereotypeMetrics
|
||||
sm = StereotypeMetrics()
|
||||
stereo_result = sm.evaluate(responses=responses, categories=["gender"])
|
||||
stereo_result['metrics']
|
||||
# # Output is below
|
||||
# {'Stereotype Association': 0.3172750176745329,
|
||||
# 'Cooccurrence Bias': 0.44766333654278373,
|
||||
# 'Stereotype Fraction - gender': 0.08}
|
||||
```
|
||||
|
||||
##### Generate counterfactual responses and compute metrics
|
||||
We can generate counterfactual responses with `CounterfactualGenerator`.
|
||||
```python
|
||||
from langfair.generator.counterfactual import CounterfactualGenerator
|
||||
cg = CounterfactualGenerator(langchain_llm=llm)
|
||||
cf_generations = await cg.generate_responses(
|
||||
prompts=prompts, attribute='gender', count=25
|
||||
)
|
||||
male_responses = cf_generations['data']['male_response']
|
||||
female_responses = cf_generations['data']['female_response']
|
||||
```
|
||||
|
||||
Counterfactual metrics can be easily computed with `CounterfactualMetrics`.
|
||||
```python
|
||||
from langfair.metrics.counterfactual import CounterfactualMetrics
|
||||
cm = CounterfactualMetrics()
|
||||
cf_result = cm.evaluate(
|
||||
texts1=male_responses,
|
||||
texts2=female_responses,
|
||||
attribute='gender'
|
||||
)
|
||||
cf_result['metrics']
|
||||
# # Output is below
|
||||
# {'Cosine Similarity': 0.8318708,
|
||||
# 'RougeL Similarity': 0.5195852482361165,
|
||||
# 'Bleu Similarity': 0.3278433712872481,
|
||||
# 'Sentiment Bias': 0.0009947145187601957}
|
||||
```
|
||||
|
||||
##### Alternative approach: Semi-automated evaluation with `AutoEval`
|
||||
To streamline assessments for text generation and summarization use cases, the `AutoEval` class conducts a multi-step process that completes all of the aforementioned steps with two lines of code.
|
||||
```python
|
||||
from langfair.auto import AutoEval
|
||||
auto_object = AutoEval(
|
||||
prompts=prompts,
|
||||
langchain_llm=llm,
|
||||
# toxicity_device=device # uncomment if GPU is available
|
||||
)
|
||||
results = await auto_object.evaluate()
|
||||
results['metrics']
|
||||
# # Output is below
|
||||
# {'Toxicity': {'Toxic Fraction': 0.0004,
|
||||
# 'Expected Maximum Toxicity': 0.013845130120171235,
|
||||
# 'Toxicity Probability': 0.01},
|
||||
# 'Stereotype': {'Stereotype Association': 0.3172750176745329,
|
||||
# 'Cooccurrence Bias': 0.44766333654278373,
|
||||
# 'Stereotype Fraction - gender': 0.08,
|
||||
# 'Expected Maximum Stereotype - gender': 0.60355167388916,
|
||||
# 'Stereotype Probability - gender': 0.27036},
|
||||
# 'Counterfactual': {'male-female': {'Cosine Similarity': 0.8318708,
|
||||
# 'RougeL Similarity': 0.5195852482361165,
|
||||
# 'Bleu Similarity': 0.3278433712872481,
|
||||
# 'Sentiment Bias': 0.0009947145187601957}}}
|
||||
```
|
||||
@@ -1,110 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {
|
||||
"id": "afaf8039"
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Nimble\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210"
|
||||
},
|
||||
"source": [
|
||||
"# Nimble\n",
|
||||
"\n",
|
||||
" [Nimble](https://www.linkedin.com/company/nimbledata) is the first business external data platform, making data decision-making easier than ever, with our award-winning AI-powered data structuring technology Nimble connects business users with the public web knowledge.\n",
|
||||
"We empower businesses with mission-critical real-time external data to unlock advanced business intelligence, price comparison, and other public data for sales and marketing. We translate data into immediate business value.\n",
|
||||
"\n",
|
||||
"If you'd like to learn more about Nimble, visit us at [nimbleway.com](https://www.nimbleway.com/).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Retrievers:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": "### NimbleSearchRetriever",
|
||||
"metadata": {
|
||||
"id": "AuMFgVFrKbNH"
|
||||
},
|
||||
"id": "AuMFgVFrKbNH"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Enables developers to build RAG applications and AI Agents that can search, access, and retrieve online information from anywhere on the web.\n",
|
||||
"\n",
|
||||
"We need to install the `langchain-nimble` python package."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "sFlPjZX9KdK6"
|
||||
},
|
||||
"id": "sFlPjZX9KdK6"
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": "%pip install -U langchain-nimble",
|
||||
"id": "65f237c852aa3885"
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": "See a [usage example](/docs/integrations/retrievers/nimble/).",
|
||||
"id": "77bd7b9a6a8e381b"
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"```python\n",
|
||||
"from langchain_nimble import NimbeSearchRetriever\n",
|
||||
"```"
|
||||
],
|
||||
"id": "511f9d569c21a5d2"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": "Note that authentication is required, please refer to the [Setup section in the documentation](/docs/integrations/retrievers/nimble/#setup).",
|
||||
"metadata": {
|
||||
"id": "WfwnI_RS8PO5"
|
||||
},
|
||||
"id": "WfwnI_RS8PO5"
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
},
|
||||
"colab": {
|
||||
"provenance": []
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
# PaymanAI
|
||||
|
||||
PaymanAI provides functionality to send and receive payments (fiat and crypto) on behalf of an AI Agent. To get started:
|
||||
@@ -23,16 +24,16 @@ These can be wrapped as **LangChain Tools** for an LLM-based agent to call them
|
||||
|
||||
| Class | Package | Serializable | JS support | Package latest |
|
||||
| :--- | :--- | :---: | :---: | :--- |
|
||||
| PaymanAI | `langchain-payman-tool` | ❌ | ❌ | [PyPI Version] |
|
||||
| PaymanAI | `langchain_community` | ❌ | ❌ | [PyPI Version] |
|
||||
|
||||
If you're simply calling the PaymanAI SDK, you can do it directly or via the **Tool** interface in LangChain.
|
||||
|
||||
## Setup
|
||||
|
||||
1. **Install** the PaymanAI tool package:
|
||||
1. **Install** the `langchain-community` (or equivalent) package:
|
||||
|
||||
```bash
|
||||
pip install langchain-payman-tool
|
||||
pip install --quiet -U langchain-community
|
||||
```
|
||||
|
||||
2. **Install** the PaymanAI SDK:
|
||||
@@ -53,7 +54,7 @@ Your `PAYMAN_API_SECRET` should be the secret key from app.paymanai.com. The `PA
|
||||
Here is an example of instantiating a PaymanAI tool. If you have multiple Payman methods, you can create multiple tools.
|
||||
|
||||
```python
|
||||
from langchain_payman_tool.tool import PaymanAI
|
||||
from langchain_community.tools.langchain_payman_tool.tool import PaymanAI
|
||||
|
||||
# Instantiate the PaymanAI tool (example)
|
||||
tool = PaymanAI(
|
||||
@@ -103,7 +104,7 @@ You can bind a PaymanAI tool to a LangChain agent or chain that supports tool-ca
|
||||
1. **Sign up** at app.paymanai.com to get your **API Key**.
|
||||
2. **Install** dependencies:
|
||||
```bash
|
||||
pip install paymanai langchain-payman-tool
|
||||
pip install paymanai langchain-community
|
||||
```
|
||||
3. **Export** environment variables:
|
||||
```bash
|
||||
@@ -111,4 +112,4 @@ You can bind a PaymanAI tool to a LangChain agent or chain that supports tool-ca
|
||||
export PAYMAN_ENVIRONMENT="sandbox"
|
||||
```
|
||||
4. **Instantiate** a PaymanAI tool, passing your desired name/description.
|
||||
5. **Call** the tool with `.invoke(...)` or integrate it into a chain or agent.
|
||||
5. **Call** the tool with `.invoke(...)` or integrate it into a chain or agent.
|
||||
@@ -81,13 +81,6 @@
|
||||
"llm.invoke(\"Tell me a joke about artificial intelligence.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For a more detailed walkthrough of the ChatSambaNovaCloud component, see [this notebook](https://python.langchain.com/docs/integrations/chat/sambanova/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -100,13 +93,6 @@
|
||||
"llm.invoke(\"Tell me a joke about artificial intelligence.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For a more detailed walkthrough of the ChatSambaStudio component, see [this notebook](https://python.langchain.com/docs/integrations/chat/sambastudio/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -130,14 +116,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For a more detailed walkthrough of the SambaStudioEmbeddings component, see [this notebook](https://python.langchain.com/docs/integrations/text_embedding/sambanova/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"API Reference [langchain-sambanova](https://docs.sambanova.ai/cloud/api-reference)"
|
||||
"API Reference [langchain-sambanova](https://python.langchain.com/api_reference/sambanova/index.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -1,379 +0,0 @@
|
||||
---
|
||||
sidebar_label: Graph RAG
|
||||
description: Graph traversal over any Vector Store using document metadata.
|
||||
---
|
||||
|
||||
import ChatModelTabs from "@theme/ChatModelTabs";
|
||||
import EmbeddingTabs from "@theme/EmbeddingTabs";
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
|
||||
# Graph RAG
|
||||
|
||||
This guide provides an introduction to Graph RAG. For detailed documentation of all
|
||||
supported features and configurations, refer to the
|
||||
[Graph RAG Project Page](https://datastax.github.io/graph-rag/).
|
||||
|
||||
## Overview
|
||||
|
||||
The `GraphRetriever` from the `langchain-graph-retriever` package provides a LangChain
|
||||
[retriever](/docs/concepts/retrievers/) that combines **unstructured** similarity search
|
||||
on vectors with **structured** traversal of metadata properties. This enables graph-based
|
||||
retrieval over an **existing** vector store.
|
||||
|
||||
### Integration details
|
||||
|
||||
| Retriever | Source | PyPI Package | Latest | Project Page |
|
||||
| :--- | :--- | :---: | :---: | :---: |
|
||||
| GraphRetriever | [github.com/datastax/graph-rag](https://github.com/datastax/graph-rag/tree/main/packages/langchain-graph-retriever) | [langchain-graph-retriever](https://pypi.org/project/langchain-graph-retriever/) |  | [Graph RAG](https://datastax.github.io/graph-rag/) |
|
||||
|
||||
|
||||
## Benefits
|
||||
|
||||
* [**Link based on existing metadata:**](https://datastax.github.io/graph-rag/get-started/)
|
||||
Use existing metadata fields without additional processing. Retrieve more from an
|
||||
existing vector store!
|
||||
|
||||
* [**Change links on demand:**](https://datastax.github.io/graph-rag/get-started/edges/)
|
||||
Edges can be specified on-the-fly, allowing different relationships to be traversed
|
||||
based on the question.
|
||||
|
||||
|
||||
* [**Pluggable Traversal Strategies:**](https://datastax.github.io/graph-rag/get-started/strategies/)
|
||||
Use built-in traversal strategies like Eager or MMR, or define custom logic to select
|
||||
which nodes to explore.
|
||||
|
||||
* [**Broad compatibility:**](https://datastax.github.io/graph-rag/get-started/adapters/)
|
||||
Adapters are available for a variety of vector stores with support for additional
|
||||
stores easily added.
|
||||
|
||||
## Setup
|
||||
|
||||
### Installation
|
||||
|
||||
This retriever lives in the `langchain-graph-retriever` package.
|
||||
|
||||
```bash
|
||||
pip install -qU langchain-graph-retriever
|
||||
```
|
||||
## Instantiation
|
||||
|
||||
The following examples will show how to perform graph traversal over some sample
|
||||
Documents about animals.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
<details>
|
||||
<summary>Toggle for Details</summary>
|
||||
<div>
|
||||
1. Ensure you have Python 3.10+ installed
|
||||
|
||||
1. Install the following package that provides sample data.
|
||||
```bash
|
||||
pip install -qU graph_rag_example_helpers
|
||||
```
|
||||
|
||||
1. Download the test documents:
|
||||
```python
|
||||
from graph_rag_example_helpers.datasets.animals import fetch_documents
|
||||
animals = fetch_documents()
|
||||
```
|
||||
|
||||
1. <EmbeddingTabs/>
|
||||
</div>
|
||||
</details>
|
||||
|
||||
### Populating the Vector store
|
||||
|
||||
This section shows how to populate a variety of vector stores with the sample data.
|
||||
|
||||
For help on choosing one of the vector stores below, or to add support for your
|
||||
vector store, consult the documentation about
|
||||
[Adapters and Supported Stores](https://datastax.github.io/graph-rag/guide/adapters/).
|
||||
|
||||
<Tabs groupId="vector-store" queryString>
|
||||
<TabItem value="astra-db" label="AstraDB" default>
|
||||
<div style={{ paddingLeft: '30px' }}>
|
||||
Install the `langchain-graph-retriever` package with the `astra` extra:
|
||||
|
||||
```bash
|
||||
pip install "langchain-graph-retriever[astra]"
|
||||
```
|
||||
|
||||
Then create a vector store and load the test documents:
|
||||
|
||||
```python
|
||||
from langchain_astradb import AstraDBVectorStore
|
||||
|
||||
vector_store = AstraDBVectorStore.from_documents(
|
||||
documents=animals,
|
||||
embedding=embeddings,
|
||||
collection_name="animals",
|
||||
api_endpoint=ASTRA_DB_API_ENDPOINT,
|
||||
token=ASTRA_DB_APPLICATION_TOKEN,
|
||||
)
|
||||
```
|
||||
For the `ASTRA_DB_API_ENDPOINT` and `ASTRA_DB_APPLICATION_TOKEN` credentials,
|
||||
consult the [AstraDB Vector Store Guide](/docs/integrations/vectorstores/astradb).
|
||||
|
||||
:::note
|
||||
For faster initial testing, consider using the **InMemory** Vector Store.
|
||||
:::
|
||||
</div>
|
||||
</TabItem>
|
||||
<TabItem value="cassandra" label="Apache Cassandra">
|
||||
<div style={{ paddingLeft: '30px' }}>
|
||||
Install the `langchain-graph-retriever` package with the `cassandra` extra:
|
||||
|
||||
```bash
|
||||
pip install "langchain-graph-retriever[cassandra]"
|
||||
```
|
||||
|
||||
Then create a vector store and load the test documents:
|
||||
|
||||
```python
|
||||
from langchain_community.vectorstores.cassandra import Cassandra
|
||||
from langchain_graph_retriever.transformers import ShreddingTransformer
|
||||
|
||||
vector_store = Cassandra.from_documents(
|
||||
documents=list(ShreddingTransformer().transform_documents(animals)),
|
||||
embedding=embeddings,
|
||||
table_name="animals",
|
||||
)
|
||||
```
|
||||
|
||||
For help creating a Cassandra connection, consult the
|
||||
[Apache Cassandra Vector Store Guide](/docs/integrations/vectorstores/cassandra#connection-parameters)
|
||||
|
||||
:::note
|
||||
Apache Cassandra doesn't support searching in nested metadata. Because of this
|
||||
it is necessary to use the [`ShreddingTransformer`](https://datastax.github.io/graph-rag/reference/langchain_graph_retriever/transformers/#langchain_graph_retriever.transformers.shredding.ShreddingTransformer)
|
||||
when inserting documents.
|
||||
:::
|
||||
</div>
|
||||
</TabItem>
|
||||
<TabItem value="opensearch" label="OpenSearch">
|
||||
<div style={{ paddingLeft: '30px' }}>
|
||||
Install the `langchain-graph-retriever` package with the `opensearch` extra:
|
||||
|
||||
```bash
|
||||
pip install "langchain-graph-retriever[opensearch]"
|
||||
```
|
||||
|
||||
Then create a vector store and load the test documents:
|
||||
|
||||
```python
|
||||
from langchain_community.vectorstores import OpenSearchVectorSearch
|
||||
|
||||
vector_store = OpenSearchVectorSearch.from_documents(
|
||||
documents=animals,
|
||||
embedding=embeddings,
|
||||
engine="faiss",
|
||||
index_name="animals",
|
||||
opensearch_url=OPEN_SEARCH_URL,
|
||||
bulk_size=500,
|
||||
)
|
||||
```
|
||||
|
||||
For help creating an OpenSearch connection, consult the
|
||||
[OpenSearch Vector Store Guide](/docs/integrations/vectorstores/opensearch).
|
||||
</div>
|
||||
</TabItem>
|
||||
<TabItem value="chroma" label="Chroma">
|
||||
<div style={{ paddingLeft: '30px' }}>
|
||||
Install the `langchain-graph-retriever` package with the `chroma` extra:
|
||||
|
||||
```bash
|
||||
pip install "langchain-graph-retriever[chroma]"
|
||||
```
|
||||
|
||||
Then create a vector store and load the test documents:
|
||||
|
||||
```python
|
||||
from langchain_chroma.vectorstores import Chroma
|
||||
from langchain_graph_retriever.transformers import ShreddingTransformer
|
||||
|
||||
vector_store = Chroma.from_documents(
|
||||
documents=list(ShreddingTransformer().transform_documents(animals)),
|
||||
embedding=embeddings,
|
||||
collection_name="animals",
|
||||
)
|
||||
```
|
||||
|
||||
For help creating an Chroma connection, consult the
|
||||
[Chroma Vector Store Guide](/docs/integrations/vectorstores/chroma).
|
||||
|
||||
:::note
|
||||
Chroma doesn't support searching in nested metadata. Because of this
|
||||
it is necessary to use the [`ShreddingTransformer`](https://datastax.github.io/graph-rag/reference/langchain_graph_retriever/transformers/#langchain_graph_retriever.transformers.shredding.ShreddingTransformer)
|
||||
when inserting documents.
|
||||
:::
|
||||
</div>
|
||||
</TabItem>
|
||||
<TabItem value="in-memory" label="InMemory" default>
|
||||
<div style={{ paddingLeft: '30px' }}>
|
||||
Install the `langchain-graph-retriever` package:
|
||||
|
||||
```bash
|
||||
pip install "langchain-graph-retriever"
|
||||
```
|
||||
|
||||
Then create a vector store and load the test documents:
|
||||
|
||||
```python
|
||||
from langchain_core.vectorstores import InMemoryVectorStore
|
||||
|
||||
vector_store = InMemoryVectorStore.from_documents(
|
||||
documents=animals,
|
||||
embedding=embeddings,
|
||||
)
|
||||
```
|
||||
|
||||
:::tip
|
||||
Using the `InMemoryVectorStore` is the fastest way to get started with Graph RAG
|
||||
but it isn't recommended for production use. Instead it is recommended to use
|
||||
**AstraDB** or **OpenSearch**.
|
||||
:::
|
||||
</div>
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Graph Traversal
|
||||
|
||||
This graph retriever starts with a single animal that best matches the query, then
|
||||
traverses to other animals sharing the same `habitat` and/or `origin`.
|
||||
|
||||
```python
|
||||
from graph_retriever.strategies import Eager
|
||||
from langchain_graph_retriever import GraphRetriever
|
||||
|
||||
traversal_retriever = GraphRetriever(
|
||||
store = vector_store,
|
||||
edges = [("habitat", "habitat"), ("origin", "origin")],
|
||||
strategy = Eager(k=5, start_k=1, max_depth=2),
|
||||
)
|
||||
```
|
||||
|
||||
The above creates a graph traversing retriever that starts with the nearest
|
||||
animal (`start_k=1`), retrieves 5 documents (`k=5`) and limits the search to documents
|
||||
that are at most 2 steps away from the first animal (`max_depth=2`).
|
||||
|
||||
The `edges` define how metadata values can be used for traversal. In this case, every
|
||||
animal is connected to other animals with the same `habitat` and/or `origin`.
|
||||
|
||||
```python
|
||||
results = traversal_retriever.invoke("what animals could be found near a capybara?")
|
||||
|
||||
for doc in results:
|
||||
print(f"{doc.id}: {doc.page_content}")
|
||||
```
|
||||
|
||||
```output
|
||||
capybara: capybaras are the largest rodents in the world and are highly social animals.
|
||||
heron: herons are wading birds known for their long legs and necks, often seen near water.
|
||||
crocodile: crocodiles are large reptiles with powerful jaws and a long lifespan, often living over 70 years.
|
||||
frog: frogs are amphibians known for their jumping ability and croaking sounds.
|
||||
duck: ducks are waterfowl birds known for their webbed feet and quacking sounds.
|
||||
```
|
||||
|
||||
Graph traversal improves retrieval quality by leveraging structured relationships in
|
||||
the data. Unlike standard similarity search (see below), it provides a clear,
|
||||
explainable rationale for why documents are selected.
|
||||
|
||||
In this case, the documents `capybara`, `heron`, `frog`, `crocodile`, and `newt` all
|
||||
share the same `habitat=wetlands`, as defined by their metadata. This should increase
|
||||
Document Relevance and the quality of the answer from the LLM.
|
||||
|
||||
### Comparison to Standard Retrieval
|
||||
|
||||
When `max_depth=0`, the graph traversing retriever behaves like a standard retriever:
|
||||
|
||||
```python
|
||||
standard_retriever = GraphRetriever(
|
||||
store = vector_store,
|
||||
edges = [("habitat", "habitat"), ("origin", "origin")],
|
||||
strategy = Eager(k=5, start_k=5, max_depth=0),
|
||||
)
|
||||
```
|
||||
|
||||
This creates a retriever that starts with the nearest 5 animals (`start_k=5`),
|
||||
and returns them without any traversal (`max_depth=0`). The edge definitions
|
||||
are ignored in this case.
|
||||
|
||||
This is essentially the same as:
|
||||
|
||||
```python
|
||||
standard_retriever = vector_store.as_retriever(search_kwargs={"k":5})
|
||||
```
|
||||
|
||||
For either case, invoking the retriever returns:
|
||||
|
||||
```python
|
||||
results = standard_retriever.invoke("what animals could be found near a capybara?")
|
||||
|
||||
for doc in results:
|
||||
print(f"{doc.id}: {doc.page_content}")
|
||||
```
|
||||
|
||||
```output
|
||||
capybara: capybaras are the largest rodents in the world and are highly social animals.
|
||||
iguana: iguanas are large herbivorous lizards often found basking in trees and near water.
|
||||
guinea pig: guinea pigs are small rodents often kept as pets due to their gentle and social nature.
|
||||
hippopotamus: hippopotamuses are large semi-aquatic mammals known for their massive size and territorial behavior.
|
||||
boar: boars are wild relatives of pigs, known for their tough hides and tusks.
|
||||
```
|
||||
|
||||
These documents are joined based on similarity alone. Any structural data that existed
|
||||
in the store is ignored. As compared to graph retrieval, this can decrease Document
|
||||
Relevance because the returned results have a lower chance of being helpful to answer
|
||||
the query.
|
||||
|
||||
## Usage
|
||||
|
||||
Following the examples above, `.invoke` is used to initiate retrieval on a query.
|
||||
|
||||
## Use within a chain
|
||||
|
||||
Like other retrievers, `GraphRetriever` can be incorporated into LLM applications
|
||||
via [chains](/docs/how_to/sequence/).
|
||||
|
||||
<ChatModelTabs customVarName="llm" />
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
|
||||
prompt = ChatPromptTemplate.from_template(
|
||||
"""Answer the question based only on the context provided.
|
||||
|
||||
Context: {context}
|
||||
|
||||
Question: {question}"""
|
||||
)
|
||||
|
||||
def format_docs(docs):
|
||||
return "\n\n".join(f"text: {doc.page_content} metadata: {doc.metadata}" for doc in docs)
|
||||
|
||||
chain = (
|
||||
{"context": traversal_retriever | format_docs, "question": RunnablePassthrough()}
|
||||
| prompt
|
||||
| llm
|
||||
| StrOutputParser()
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
chain.invoke("what animals could be found near a capybara?")
|
||||
```
|
||||
|
||||
```output
|
||||
Animals that could be found near a capybara include herons, crocodiles, frogs,
|
||||
and ducks, as they all inhabit wetlands.
|
||||
```
|
||||
|
||||
## API reference
|
||||
|
||||
To explore all available parameters and advanced configurations, refer to the
|
||||
[Graph RAG API reference](https://datastax.github.io/graph-rag/reference/).
|
||||
File diff suppressed because one or more lines are too long
@@ -103,7 +103,6 @@
|
||||
" compartment_id=\"MY_OCID\",\n",
|
||||
" auth_type=\"SECURITY_TOKEN\",\n",
|
||||
" auth_profile=\"MY_PROFILE\", # replace with your profile name\n",
|
||||
" auth_file_location=\"MY_CONFIG_FILE_LOCATION\", # replace with file location where profile name configs present\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -21,16 +21,16 @@
|
||||
"source": [
|
||||
"# SambaStudioEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with SambaNova's SambaStudio embedding models using LangChain. For detailed documentation on `SambaStudioEmbeddings` features and configuration options, please refer to the [API reference](https://docs.sambanova.ai/sambastudio/latest/index.html).\n",
|
||||
"This will help you get started with SambaNova's SambaStudio embedding models using LangChain. For detailed documentation on `SambaStudioEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/sambanova/embeddings/langchain_sambanova.embeddingsSambaStudioEmbeddings.html).\n",
|
||||
"\n",
|
||||
"**[SambaNova](https://sambanova.ai/)'s** [SambaStudio](https://sambanova.ai/technology/full-stack-ai-platform) is a platform for running your own open-source models\n",
|
||||
"**[SambaNova](https://sambanova.ai/)'s** [Sambastudio](https://sambanova.ai/technology/full-stack-ai-platform) is a platform for running your own open-source models\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Provider | Package |\n",
|
||||
"|:--------:|:-------:|\n",
|
||||
"| [SambaNova](/docs/integrations/providers/sambanova/) | [langchain-sambanova](https://python.langchain.com/docs/integrations/providers/sambanova/) |\n",
|
||||
"| [SambaNova](/docs/integrations/providers/sambanova/) | [langchain-sambanova](https://python.langchain.com/api_reference/langchain_sambanova/embeddings/langchain_sambanova.embeddings.SambaStudioEmbeddings.html) |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
@@ -227,7 +227,7 @@
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `SambaStudio` features and configuration options, please refer to the [API reference](https://docs.sambanova.ai/sambastudio/latest/api-ref-landing.html).\n"
|
||||
"For detailed documentation on `SambaNovaEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/langchain_sambanova/embeddings/langchain_sambanova.embeddings.SambaStudioEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -1,256 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "_9MNj58sIkGN"
|
||||
},
|
||||
"source": [
|
||||
"# Apify Actor\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
">[Apify Actors](https://docs.apify.com/platform/actors) are cloud programs designed for a wide range of web scraping, crawling, and data extraction tasks. These actors facilitate automated data gathering from the web, enabling users to extract, process, and store information efficiently. Actors can be used to perform tasks like scraping e-commerce sites for product details, monitoring price changes, or gathering search engine results. They integrate seamlessly with [Apify Datasets](https://docs.apify.com/platform/storage/dataset), allowing the structured data collected by actors to be stored, managed, and exported in formats like JSON, CSV, or Excel for further analysis or use.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "OHLF9t9v9HCb"
|
||||
},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"This integration lives in the [langchain-apify](https://pypi.org/project/langchain-apify/) package. The package can be installed using pip.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "4DdGmBn5IbXz"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install langchain-apify"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "rEAwonXqwggR"
|
||||
},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"- **Apify account**: Register your free Apify account [here](https://console.apify.com/sign-up).\n",
|
||||
"- **Apify API token**: Learn how to get your API token in the [Apify documentation](https://docs.apify.com/platform/integrations/api)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "9nJOl4MBMkcR"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"APIFY_API_TOKEN\"] = \"your-apify-api-token\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"your-openai-api-key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "UfoQxAlCxR9q"
|
||||
},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "qG9KtXtLM8i7"
|
||||
},
|
||||
"source": [
|
||||
"Here we instantiate the `ApifyActorsTool` to be able to call [RAG Web Browser](https://apify.com/apify/rag-web-browser) Apify Actor. This Actor provides web browsing functionality for AI and LLM applications, similar to the web browsing feature in ChatGPT. Any Actor from the [Apify Store](https://apify.com/store) can be used in this way."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"metadata": {
|
||||
"id": "cyxeTlPnM4Ya"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_apify import ApifyActorsTool\n",
|
||||
"\n",
|
||||
"tool = ApifyActorsTool(\"apify/rag-web-browser\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "fGDLvDCqyKWO"
|
||||
},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"The `ApifyActorsTool` takes a single argument, which is `run_input` - a dictionary that is passed as a run input to the Actor. Run input schema documentation can be found in the input section of the Actor details page. See [RAG Web Browser input schema](https://apify.com/apify/rag-web-browser/input-schema).\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "nTWy6Hx1yk04"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool.invoke({\"run_input\": {\"query\": \"what is apify?\", \"maxResults\": 2}})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "kQsa27hoO58S"
|
||||
},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can provide the created tool to an [agent](https://python.langchain.com/docs/tutorials/agents/). When asked to search for information, the agent will call the Apify Actor, which will search the web, and then retrieve the search results.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "YySvLskW72Y8"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install langgraph langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"metadata": {
|
||||
"id": "QEDz07btO5Gi"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import ToolMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\")\n",
|
||||
"tools = [tool]\n",
|
||||
"graph = create_react_agent(model, tools=tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "XS1GEyNkQxGu",
|
||||
"outputId": "195273d7-034c-425b-f3f9-95c0a9fb0c9e"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"search for what is Apify\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" apify_actor_apify_rag-web-browser (call_27mjHLzDzwa5ZaHWCMH510lm)\n",
|
||||
" Call ID: call_27mjHLzDzwa5ZaHWCMH510lm\n",
|
||||
" Args:\n",
|
||||
" run_input: {\"run_input\":{\"query\":\"Apify\",\"maxResults\":3,\"outputFormats\":[\"markdown\"]}}\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Apify is a comprehensive platform for web scraping, browser automation, and data extraction. It offers a wide array of tools and services that cater to developers and businesses looking to extract data from websites efficiently and effectively. Here's an overview of Apify:\n",
|
||||
"\n",
|
||||
"1. **Ecosystem and Tools**:\n",
|
||||
" - Apify provides an ecosystem where developers can build, deploy, and publish data extraction and web automation tools called Actors.\n",
|
||||
" - The platform supports various use cases such as extracting data from social media platforms, conducting automated browser-based tasks, and more.\n",
|
||||
"\n",
|
||||
"2. **Offerings**:\n",
|
||||
" - Apify offers over 3,000 ready-made scraping tools and code templates.\n",
|
||||
" - Users can also build custom solutions or hire Apify's professional services for more tailored data extraction needs.\n",
|
||||
"\n",
|
||||
"3. **Technology and Integration**:\n",
|
||||
" - The platform supports integration with popular tools and services like Zapier, GitHub, Google Sheets, Pinecone, and more.\n",
|
||||
" - Apify supports open-source tools and technologies such as JavaScript, Python, Puppeteer, Playwright, Selenium, and its own Crawlee library for web crawling and browser automation.\n",
|
||||
"\n",
|
||||
"4. **Community and Learning**:\n",
|
||||
" - Apify hosts a community on Discord where developers can get help and share expertise.\n",
|
||||
" - It offers educational resources through the Web Scraping Academy to help users become proficient in data scraping and automation.\n",
|
||||
"\n",
|
||||
"5. **Enterprise Solutions**:\n",
|
||||
" - Apify provides enterprise-grade web data extraction solutions with high reliability, 99.95% uptime, and compliance with SOC2, GDPR, and CCPA standards.\n",
|
||||
"\n",
|
||||
"For more information, you can visit [Apify's official website](https://apify.com/) or their [GitHub page](https://github.com/apify) which contains their code repositories and further details about their projects.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"inputs = {\"messages\": [(\"user\", \"search for what is Apify\")]}\n",
|
||||
"for s in graph.stream(inputs, stream_mode=\"values\"):\n",
|
||||
" message = s[\"messages\"][-1]\n",
|
||||
" # skip tool messages\n",
|
||||
" if isinstance(message, ToolMessage):\n",
|
||||
" continue\n",
|
||||
" message.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "WYXuQIQx8AvG"
|
||||
},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more information on how to use this integration, see the [git repository](https://github.com/apify/langchain-apify) or the [Apify integration documentation](https://docs.apify.com/platform/integrations/langgraph)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "f1NnMik78oib"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"toc_visible": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -66,20 +66,21 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from databricks_langchain.uc_ai import (\n",
|
||||
" DatabricksFunctionClient,\n",
|
||||
" UCFunctionToolkit,\n",
|
||||
" set_uc_function_client,\n",
|
||||
")\n",
|
||||
"from databricks.sdk import WorkspaceClient\n",
|
||||
"from langchain_community.tools.databricks import UCFunctionToolkit\n",
|
||||
"\n",
|
||||
"client = DatabricksFunctionClient()\n",
|
||||
"set_uc_function_client(client)\n",
|
||||
"\n",
|
||||
"tools = UCFunctionToolkit(\n",
|
||||
" # Include functions as tools using their qualified names.\n",
|
||||
" # You can use \"{catalog_name}.{schema_name}.*\" to get all functions in a schema.\n",
|
||||
" function_names=[\"main.tools.python_exec\"]\n",
|
||||
").tools"
|
||||
"tools = (\n",
|
||||
" UCFunctionToolkit(\n",
|
||||
" # You can find the SQL warehouse ID in its UI after creation.\n",
|
||||
" warehouse_id=\"xxxx123456789\"\n",
|
||||
" )\n",
|
||||
" .include(\n",
|
||||
" # Include functions as tools using their qualified names.\n",
|
||||
" # You can use \"{catalog_name}.{schema_name}.*\" to get all functions in a schema.\n",
|
||||
" \"main.tools.python_exec\",\n",
|
||||
" )\n",
|
||||
" .get_tools()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "a991a6f8-1897-4f49-a191-ae3bdaeda856",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ElevenLabs Text2Speech\n",
|
||||
"# Eleven Labs Text2Speech\n",
|
||||
"\n",
|
||||
"This notebook shows how to interact with the `ElevenLabs API` to achieve text-to-speech capabilities."
|
||||
]
|
||||
@@ -37,7 +37,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ELEVENLABS_API_KEY\"] = \"\""
|
||||
"os.environ[\"ELEVEN_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -64,10 +64,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.environ.get(\"JINA_API_KEY\"):\n",
|
||||
" os.environ[\"JINA_API_KEY\"] = getpass.getpass(\"Jina API key:\\n\")"
|
||||
"import os"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -506,7 +506,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all DatabricksVectorSearch features and configurations head to the API reference: https://api-docs.databricks.com/python/databricks-ai-bridge/latest/databricks_langchain.html#databricks_langchain.DatabricksVectorSearch"
|
||||
"For detailed documentation of all DatabricksVectorSearch features and configurations head to the API reference: https://python.langchain.com/api_reference/databricks/vectorstores/langchain_databricks.vectorstores.DatabricksVectorSearch.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -331,7 +331,7 @@
|
||||
"- Dictionary-based Filters\n",
|
||||
" - You can pass a dictionary (dict) where the keys represent metadata fields and the values specify the filter condition. This method applies an equality filter between the key and the corresponding value. When multiple key-value pairs are provided, they are combined using a logical AND operation.\n",
|
||||
"- SQL-based Filters\n",
|
||||
" - Alternatively, you can provide a string representing an SQL WHERE clause to define more complex filtering conditions. This allows for greater flexibility, supporting SQL expressions such as comparison operators and logical operators. Learn more about [BigQuery operators](https://cloud.google.com/bigquery/docs/reference/standard-sql/operators)."
|
||||
" - Alternatively, you can provide a string representing an SQL WHERE clause to define more complex filtering conditions. This allows for greater flexibility, supporting SQL expressions such as comparison operators and logical operators."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -356,7 +356,7 @@
|
||||
"source": [
|
||||
"# SQL-based Filters\n",
|
||||
"# This should return \"Banana\", \"Apples and oranges\" and \"Cars and airplanes\" documents.\n",
|
||||
"docs = store.similarity_search_by_vector(query_vector, filter=\"len = 6 AND len > 17\")\n",
|
||||
"docs = store.similarity_search_by_vector(query_vector, filter={\"len = 6 AND len > 17\"})\n",
|
||||
"print(docs)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -156,15 +156,6 @@
|
||||
" db_name=\"vearch_cluster_langchian\",\n",
|
||||
" table_name=\"tobenumone\",\n",
|
||||
" flag=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# The vector data is usually already initialized, so we don’t need the document parameter and can directly create the object.\n",
|
||||
"vearch_cluster_b = Vearch(\n",
|
||||
" embeddings,\n",
|
||||
" path_or_url=\"http://test-vearch-langchain-router.vectorbase.svc.ht1.n.jd.local\",\n",
|
||||
" db_name=\"vearch_cluster_langchian\",\n",
|
||||
" table_name=\"tobenumone\",\n",
|
||||
" flag=1,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -253,7 +244,6 @@
|
||||
],
|
||||
"source": [
|
||||
"query = \"你知道凌波微步吗,你知道都有谁会凌波微步?\"\n",
|
||||
"# The second parameter is the top-n to retrieve, and its default value is 4.\n",
|
||||
"vearch_standalone_res = vearch_standalone.similarity_search(query, 3)\n",
|
||||
"for idx, tmp in enumerate(vearch_standalone_res):\n",
|
||||
" print(f\"{'#'*20}第{idx+1}段相关文档{'#'*20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
@@ -271,11 +261,6 @@
|
||||
"for idx, tmp in enumerate(cluster_res):\n",
|
||||
" print(f\"{'#'*20}第{idx+1}段相关文档{'#'*20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
"\n",
|
||||
"# In practical applications, we usually limit the boundary value of similarity. The following method can set this value.\n",
|
||||
"cluster_res_with_bound = vearch_cluster.similarity_search_with_score(\n",
|
||||
" query=query_c, k=3, min_score=0.5\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# combine your local knowleadge and query\n",
|
||||
"context_c = \"\".join([tmp.page_content for tmp in cluster_res])\n",
|
||||
"new_query_c = f\"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\\n {context_c} \\n 回答用户这个问题:{query_c}\\n\\n\"\n",
|
||||
|
||||
@@ -215,7 +215,7 @@
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs overrideParams={{openai: {model: \"gpt-4\"}}} />\n"
|
||||
"<ChatModelTabs openaiParams={`model=\"gpt-4\"`} />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -108,7 +108,7 @@
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs overrideParams={{openai: {model: \"gpt-4o-mini\"}}} />\n"
|
||||
"<ChatModelTabs openaiParams={`model=\"gpt-4o-mini\"`} />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -935,7 +935,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -949,7 +949,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user