mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-05 16:50:03 +00:00
Compare commits
1 Commits
v0.1.6
...
erick/rele
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7aec5970ee |
4
.github/actions/poetry_setup/action.yml
vendored
4
.github/actions/poetry_setup/action.yml
vendored
@@ -32,7 +32,7 @@ runs:
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- uses: actions/cache@v4
|
||||
- uses: actions/cache@v3
|
||||
id: cache-bin-poetry
|
||||
name: Cache Poetry binary - Python ${{ inputs.python-version }}
|
||||
env:
|
||||
@@ -79,7 +79,7 @@ runs:
|
||||
run: pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose
|
||||
|
||||
- name: Restore pip and poetry cached dependencies
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "4"
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
|
||||
8
.github/scripts/check_diff.py
vendored
8
.github/scripts/check_diff.py
vendored
@@ -36,7 +36,13 @@ if __name__ == "__main__":
|
||||
elif "libs/partners" in file:
|
||||
partner_dir = file.split("/")[2]
|
||||
if os.path.isdir(f"libs/partners/{partner_dir}"):
|
||||
dirs_to_run.add(f"libs/partners/{partner_dir}")
|
||||
dirs_to_run.update(
|
||||
(
|
||||
f"libs/partners/{partner_dir}",
|
||||
"libs/langchain",
|
||||
"libs/experimental",
|
||||
)
|
||||
)
|
||||
# Skip if the directory was deleted
|
||||
elif "libs/langchain" in file:
|
||||
dirs_to_run.update(("libs/langchain", "libs/experimental"))
|
||||
|
||||
65
.github/scripts/get_min_versions.py
vendored
65
.github/scripts/get_min_versions.py
vendored
@@ -1,65 +0,0 @@
|
||||
import sys
|
||||
|
||||
import tomllib
|
||||
from packaging.version import parse as parse_version
|
||||
import re
|
||||
|
||||
MIN_VERSION_LIBS = ["langchain-core", "langchain-community", "langchain"]
|
||||
|
||||
|
||||
def get_min_version(version: str) -> str:
|
||||
# case ^x.x.x
|
||||
_match = re.match(r"^\^(\d+(?:\.\d+){0,2})$", version)
|
||||
if _match:
|
||||
return _match.group(1)
|
||||
|
||||
# case >=x.x.x,<y.y.y
|
||||
_match = re.match(r"^>=(\d+(?:\.\d+){0,2}),<(\d+(?:\.\d+){0,2})$", version)
|
||||
if _match:
|
||||
_min = _match.group(1)
|
||||
_max = _match.group(2)
|
||||
assert parse_version(_min) < parse_version(_max)
|
||||
return _min
|
||||
|
||||
# case x.x.x
|
||||
_match = re.match(r"^(\d+(?:\.\d+){0,2})$", version)
|
||||
if _match:
|
||||
return _match.group(1)
|
||||
|
||||
raise ValueError(f"Unrecognized version format: {version}")
|
||||
|
||||
|
||||
def get_min_version_from_toml(toml_path: str):
|
||||
# Parse the TOML file
|
||||
with open(toml_path, "rb") as file:
|
||||
toml_data = tomllib.load(file)
|
||||
|
||||
# Get the dependencies from tool.poetry.dependencies
|
||||
dependencies = toml_data["tool"]["poetry"]["dependencies"]
|
||||
|
||||
# Initialize a dictionary to store the minimum versions
|
||||
min_versions = {}
|
||||
|
||||
# Iterate over the libs in MIN_VERSION_LIBS
|
||||
for lib in MIN_VERSION_LIBS:
|
||||
# Check if the lib is present in the dependencies
|
||||
if lib in dependencies:
|
||||
# Get the version string
|
||||
version_string = dependencies[lib]
|
||||
|
||||
# Use parse_version to get the minimum supported version from version_string
|
||||
min_version = get_min_version(version_string)
|
||||
|
||||
# Store the minimum version in the min_versions dictionary
|
||||
min_versions[lib] = min_version
|
||||
|
||||
return min_versions
|
||||
|
||||
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
|
||||
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
|
||||
6
.github/workflows/_all_ci.yml
vendored
6
.github/workflows/_all_ci.yml
vendored
@@ -36,35 +36,30 @@ env:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
dependencies:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_dependencies.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
name: "make extended_tests #${{ matrix.python-version }}"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -73,6 +68,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}"
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
2
.github/workflows/_dependencies.yml
vendored
2
.github/workflows/_dependencies.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: dependency checks ${{ matrix.python-version }}
|
||||
name: dependencies - Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
8
.github/workflows/_integration_test.yml
vendored
8
.github/workflows/_integration_test.yml
vendored
@@ -38,11 +38,6 @@ jobs:
|
||||
shell: bash
|
||||
run: poetry install --with test,test_integration
|
||||
|
||||
- name: Install deps outside pyproject
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/community/') }}
|
||||
shell: bash
|
||||
run: poetry run pip install "boto3<2" "google-cloud-aiplatform<2"
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
@@ -61,9 +56,6 @@ jobs:
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||
PINECONE_ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||
run: |
|
||||
make integration_tests
|
||||
|
||||
|
||||
17
.github/workflows/_lint.yml
vendored
17
.github/workflows/_lint.yml
vendored
@@ -21,7 +21,6 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: "make lint #${{ matrix.python-version }}"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -80,13 +79,13 @@ jobs:
|
||||
poetry run pip install -e "$LANGCHAIN_LOCATION"
|
||||
|
||||
- name: Get .mypy_cache to speed up mypy
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache
|
||||
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
@@ -94,7 +93,7 @@ jobs:
|
||||
run: |
|
||||
make lint_package
|
||||
|
||||
- name: Install unit test dependencies
|
||||
- name: Install test dependencies
|
||||
# Also installs dev/lint/test/typing dependencies, to ensure we have
|
||||
# type hints for as many of our libraries as possible.
|
||||
# This helps catch errors that require dependencies to be spotted, for example:
|
||||
@@ -103,24 +102,18 @@ jobs:
|
||||
# If you change this configuration, make sure to change the `cache-key`
|
||||
# in the `poetry_setup` action above to stop using the old cache.
|
||||
# It doesn't matter how you change it, any change will cause a cache-bust.
|
||||
if: ${{ ! startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry install --with test
|
||||
- name: Install unit+integration test dependencies
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry install --with test,test_integration
|
||||
|
||||
- name: Get .mypy_cache_test to speed up mypy
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache_test
|
||||
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
20
.github/workflows/_release.yml
vendored
20
.github/workflows/_release.yml
vendored
@@ -15,7 +15,7 @@ on:
|
||||
default: 'libs/langchain'
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
PYTHON_VERSION: "3.10"
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
@@ -175,27 +175,13 @@ jobs:
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||
PINECONE_ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
- name: Run unit tests with minimum dependency versions
|
||||
if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
if: ${{ (inputs.working-directory == 'libs/langchain') || (inputs.working-directory == 'libs/community') || (inputs.working-directory == 'libs/experimental') }}
|
||||
run: |
|
||||
poetry run pip install $MIN_VERSIONS
|
||||
poetry run pip install -r _test_minimum_requirements.txt
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
|
||||
2
.github/workflows/_test.yml
vendored
2
.github/workflows/_test.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: "make test #${{ matrix.python-version }}"
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
3
.github/workflows/check_diffs.yml
vendored
3
.github/workflows/check_diffs.yml
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: CI
|
||||
name: Check library diffs
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -32,7 +32,6 @@ jobs:
|
||||
outputs:
|
||||
dirs-to-run: ${{ steps.set-matrix.outputs.dirs-to-run }}
|
||||
ci:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
strategy:
|
||||
matrix:
|
||||
|
||||
4
.github/workflows/codespell.yml
vendored
4
.github/workflows/codespell.yml
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: CI / cd . / make spell_check
|
||||
name: Codespell
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -12,7 +12,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
codespell:
|
||||
name: (Check for spelling errors)
|
||||
name: Check for spelling errors
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
4
.github/workflows/doc_lint.yml
vendored
4
.github/workflows/doc_lint.yml
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: CI / cd .
|
||||
name: Docs, templates, cookbook lint
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -15,7 +15,6 @@ on:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check for "from langchain import x" imports
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -29,7 +28,6 @@ jobs:
|
||||
git grep 'from langchain import' {docs/docs,templates,cookbook} | grep -vE 'from langchain import (hub)' && exit 1 || exit 0
|
||||
|
||||
lint:
|
||||
name: "-"
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
|
||||
5
.github/workflows/scheduled_test.yml
vendored
5
.github/workflows/scheduled_test.yml
vendored
@@ -54,11 +54,6 @@ jobs:
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: Install deps outside pyproject
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/community/') }}
|
||||
shell: bash
|
||||
run: poetry run pip install "boto3<2" "google-cloud-aiplatform<2"
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
env:
|
||||
|
||||
36
.github/workflows/templates_ci.yml
vendored
Normal file
36
.github/workflows/templates_ci.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: templates CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/actions/poetry_setup/action.yml'
|
||||
- '.github/tools/**'
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/templates_ci.yml'
|
||||
- 'templates/**'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
WORKDIR: "templates"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: templates
|
||||
secrets: inherit
|
||||
19
.release-please-manifest.json
Normal file
19
.release-please-manifest.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"libs/core": "0.1.17",
|
||||
"libs/community": "0.0.16",
|
||||
"libs/langchain": "0.1.4",
|
||||
"libs/experimental": "0.0.49",
|
||||
"libs/cli": "0.0.21",
|
||||
"libs/partners/anthropic": "0.0.1.post1",
|
||||
"libs/partners/exa": "0.0.1",
|
||||
"libs/partners/google-genai": "0.0.6",
|
||||
"libs/partners/google-vertexai": "0.0.3",
|
||||
"libs/partners/mistralai": "0.0.3",
|
||||
"libs/partners/nomic": "0.0.1",
|
||||
"libs/partners/nvidia-ai-endpoints": "0.0.1",
|
||||
"libs/partners/nvidia-trt": "0.0.1rc0",
|
||||
"libs/partners/openai": "0.0.5",
|
||||
"libs/partners/pinecone": "0.0.1",
|
||||
"libs/partners/robocorp": "0.0.2",
|
||||
"libs/partners/together": "0.0.2.post1"
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
# 🦜️🔗 LangChain
|
||||
|
||||
⚡ Build context-aware reasoning applications ⚡
|
||||
⚡ Building applications with LLMs through composability ⚡
|
||||
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/actions/workflows/check_diffs.yml)
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,17 +0,0 @@
|
||||
# docker-compose to make it easier to spin up integration tests.
|
||||
# Services should use NON standard ports to avoid collision with
|
||||
version: "3"
|
||||
name: langchain-tests
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: redis/redis-stack-server:latest
|
||||
# We use non standard ports since
|
||||
# these instances are used for testing
|
||||
# and users may already have existing
|
||||
# redis instances set up locally
|
||||
# for other projects
|
||||
ports:
|
||||
- "6020:6379"
|
||||
volumes:
|
||||
- ./redis-volume:/data
|
||||
@@ -16,8 +16,7 @@ cp ../cookbook/README.md src/pages/cookbook.mdx
|
||||
mkdir -p docs/templates
|
||||
cp ../templates/docs/INDEX.md docs/templates/index.md
|
||||
poetry run python scripts/copy_templates.py
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langgraph/main/README.md -O docs/langgraph.md
|
||||
wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
|
||||
|
||||
yarn
|
||||
|
||||
|
||||
@@ -146,7 +146,6 @@ partners = [
|
||||
(p.name, p.name.replace("-", "_") + "_api_reference")
|
||||
for p in partners_dir.iterdir()
|
||||
]
|
||||
partners = sorted(partners)
|
||||
|
||||
html_context = {
|
||||
"display_github": True, # Integrate GitHub
|
||||
|
||||
File diff suppressed because one or more lines are too long
4
docs/docs/_templates/integration.mdx
vendored
4
docs/docs/_templates/integration.mdx
vendored
@@ -37,7 +37,7 @@ from langchain_community.llms import integration_class_REPLACE_ME
|
||||
|
||||
## Text Embedding Models
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/INCLUDE_REAL_NAME).
|
||||
See a [usage example](/docs/integrations/text_embedding/INCLUDE_REAL_NAME)
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import integration_class_REPLACE_ME
|
||||
@@ -45,7 +45,7 @@ from langchain_community.embeddings import integration_class_REPLACE_ME
|
||||
|
||||
## Chat models
|
||||
|
||||
See a [usage example](/docs/integrations/chat/INCLUDE_REAL_NAME).
|
||||
See a [usage example](/docs/integrations/chat/INCLUDE_REAL_NAME)
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import integration_class_REPLACE_ME
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Below are links to tutorials and courses on LangChain. For written guides on common use cases for LangChain, check out the [use cases guides](/docs/use_cases).
|
||||
|
||||
⛓ icon marks a new addition [last update 2024-02-06]
|
||||
⛓ icon marks a new addition [last update 2023-09-21]
|
||||
|
||||
---------------------
|
||||
|
||||
@@ -10,20 +10,18 @@ Below are links to tutorials and courses on LangChain. For written guides on com
|
||||
|
||||
### Books
|
||||
|
||||
#### [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
|
||||
#### ⛓[Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
|
||||
|
||||
|
||||
### DeepLearning.AI courses
|
||||
by [Harrison Chase](https://en.wikipedia.org/wiki/LangChain) and [Andrew Ng](https://en.wikipedia.org/wiki/Andrew_Ng)
|
||||
- [LangChain for LLM Application Development](https://learn.deeplearning.ai/langchain)
|
||||
- [LangChain Chat with Your Data](https://learn.deeplearning.ai/langchain-chat-with-your-data)
|
||||
- [Functions, Tools and Agents with LangChain](https://learn.deeplearning.ai/functions-tools-agents-langchain)
|
||||
- ⛓ [Functions, Tools and Agents with LangChain](https://learn.deeplearning.ai/functions-tools-agents-langchain)
|
||||
|
||||
### Handbook
|
||||
[LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
|
||||
|
||||
⛓ [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
|
||||
|
||||
### Short Tutorials
|
||||
[LangChain Explained in 13 Minutes | QuickStart Tutorial for Beginners](https://youtu.be/aywZrzNaKjs) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
|
||||
@@ -31,8 +29,6 @@ Below are links to tutorials and courses on LangChain. For written guides on com
|
||||
|
||||
[LangChain Crash Course - Build apps with language models](https://youtu.be/LbT1yp6quS8) by [Patrick Loeber](https://www.youtube.com/@patloeber)
|
||||
|
||||
⛓ [LangChain 101 Course](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb) by **Ivan Reznikov**
|
||||
|
||||
## Tutorials
|
||||
|
||||
### [LangChain for Gen AI and LLMs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F) by [James Briggs](https://www.youtube.com/@jamesbriggs)
|
||||
@@ -48,8 +44,8 @@ Below are links to tutorials and courses on LangChain. For written guides on com
|
||||
- #9 [Build Conversational Agents with Vector DBs](https://youtu.be/H6bCqqw9xyI)
|
||||
- [Using NEW `MPT-7B` in Hugging Face and LangChain](https://youtu.be/DXpk9K7DgMo)
|
||||
- [`MPT-30B` Chatbot with LangChain](https://youtu.be/pnem-EhT6VI)
|
||||
- [Fine-tuning OpenAI's `GPT 3.5` for LangChain Agents](https://youtu.be/boHXgQ5eQic?si=OOOfK-GhsgZGBqSr)
|
||||
- [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=N7k6xy4RQksbWwsQ)
|
||||
- ⛓ [Fine-tuning OpenAI's `GPT 3.5` for LangChain Agents](https://youtu.be/boHXgQ5eQic?si=OOOfK-GhsgZGBqSr)
|
||||
- ⛓ [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=N7k6xy4RQksbWwsQ)
|
||||
|
||||
|
||||
### [LangChain 101](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5) by [Greg Kamradt (Data Indy)](https://www.youtube.com/@DataIndependent)
|
||||
@@ -113,16 +109,16 @@ Below are links to tutorials and courses on LangChain. For written guides on com
|
||||
- [What can you do with 16K tokens in LangChain?](https://youtu.be/z2aCZBAtWXs)
|
||||
- [Tagging and Extraction - Classification using `OpenAI Functions`](https://youtu.be/a8hMgIcUEnE)
|
||||
- [HOW to Make Conversational Form with LangChain](https://youtu.be/IT93On2LB5k)
|
||||
- [`Claude-2` meets LangChain!](https://youtu.be/Hb_D3p0bK2U?si=j96Kc7oJoeRI5-iC)
|
||||
- [`PaLM 2` Meets LangChain](https://youtu.be/orPwLibLqm4?si=KgJjpEbAD9YBPqT4)
|
||||
- [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=v3Hwxk1m3fksBIHN)
|
||||
- [Serving `LLaMA2` with `Replicate`](https://youtu.be/JIF4nNi26DE?si=dSazFyC4UQmaR-rJ)
|
||||
- [NEW LangChain Expression Language](https://youtu.be/ud7HJ2p3gp0?si=8pJ9O6hGbXrCX5G9)
|
||||
- [Building a RCI Chain for Agents with LangChain Expression Language](https://youtu.be/QaKM5s0TnsY?si=0miEj-o17AHcGfLG)
|
||||
- [How to Run `LLaMA-2-70B` on the `Together AI`](https://youtu.be/Tc2DHfzHeYE?si=Xku3S9dlBxWQukpe)
|
||||
- [`RetrievalQA` with `LLaMA 2 70b` & `Chroma` DB](https://youtu.be/93yueQQnqpM?si=ZMwj-eS_CGLnNMXZ)
|
||||
- [How to use `BGE Embeddings` for LangChain](https://youtu.be/sWRvSG7vL4g?si=85jnvnmTCF9YIWXI)
|
||||
- [How to use Custom Prompts for `RetrievalQA` on `LLaMA-2 7B`](https://youtu.be/PDwUKves9GY?si=sMF99TWU0p4eiK80)
|
||||
- ⛓ [`Claude-2` meets LangChain!](https://youtu.be/Hb_D3p0bK2U?si=j96Kc7oJoeRI5-iC)
|
||||
- ⛓ [`PaLM 2` Meets LangChain](https://youtu.be/orPwLibLqm4?si=KgJjpEbAD9YBPqT4)
|
||||
- ⛓ [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=v3Hwxk1m3fksBIHN)
|
||||
- ⛓ [Serving `LLaMA2` with `Replicate`](https://youtu.be/JIF4nNi26DE?si=dSazFyC4UQmaR-rJ)
|
||||
- ⛓ [NEW LangChain Expression Language](https://youtu.be/ud7HJ2p3gp0?si=8pJ9O6hGbXrCX5G9)
|
||||
- ⛓ [Building a RCI Chain for Agents with LangChain Expression Language](https://youtu.be/QaKM5s0TnsY?si=0miEj-o17AHcGfLG)
|
||||
- ⛓ [How to Run `LLaMA-2-70B` on the `Together AI`](https://youtu.be/Tc2DHfzHeYE?si=Xku3S9dlBxWQukpe)
|
||||
- ⛓ [`RetrievalQA` with `LLaMA 2 70b` & `Chroma` DB](https://youtu.be/93yueQQnqpM?si=ZMwj-eS_CGLnNMXZ)
|
||||
- ⛓ [How to use `BGE Embeddings` for LangChain](https://youtu.be/sWRvSG7vL4g?si=85jnvnmTCF9YIWXI)
|
||||
- ⛓ [How to use Custom Prompts for `RetrievalQA` on `LLaMA-2 7B`](https://youtu.be/PDwUKves9GY?si=sMF99TWU0p4eiK80)
|
||||
|
||||
|
||||
### [LangChain](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr) by [Prompt Engineering](https://www.youtube.com/@engineerprompt)
|
||||
@@ -135,8 +131,8 @@ Below are links to tutorials and courses on LangChain. For written guides on com
|
||||
- [LangChain: Giving Memory to LLMs](https://youtu.be/dxO6pzlgJiY)
|
||||
- [BEST OPEN Alternative to `OPENAI's EMBEDDINGs` for Retrieval QA: LangChain](https://youtu.be/ogEalPMUCSY)
|
||||
- [LangChain: Run Language Models Locally - `Hugging Face Models`](https://youtu.be/Xxxuw4_iCzw)
|
||||
- [Slash API Costs: Mastering Caching for LLM Applications](https://youtu.be/EQOznhaJWR0?si=AXoI7f3-SVFRvQUl)
|
||||
- [Avoid PROMPT INJECTION with `Constitutional AI` - LangChain](https://youtu.be/tyKSkPFHVX8?si=9mgcB5Y1kkotkBGB)
|
||||
- ⛓ [Slash API Costs: Mastering Caching for LLM Applications](https://youtu.be/EQOznhaJWR0?si=AXoI7f3-SVFRvQUl)
|
||||
- ⛓ [Avoid PROMPT INJECTION with `Constitutional AI` - LangChain](https://youtu.be/tyKSkPFHVX8?si=9mgcB5Y1kkotkBGB)
|
||||
|
||||
|
||||
### LangChain by [Chat with data](https://www.youtube.com/@chatwithdata)
|
||||
@@ -152,4 +148,4 @@ Below are links to tutorials and courses on LangChain. For written guides on com
|
||||
|
||||
|
||||
---------------------
|
||||
⛓ icon marks a new addition [last update 2024-02-061]
|
||||
⛓ icon marks a new addition [last update 2023-09-21]
|
||||
|
||||
@@ -120,8 +120,6 @@
|
||||
- ⛓ [Use ANY language in `LangSmith` with REST](https://youtu.be/7BL0GEdMmgY?si=iXfOEdBLqXF6hqRM) by [Nerding I/O](https://www.youtube.com/@nerding_io)
|
||||
- ⛓ [How to Leverage the Full Potential of LLMs for Your Business with Langchain - Leon Ruddat](https://youtu.be/vZmoEa7oWMg?si=ZhMmydq7RtkZd56Q) by [PyData](https://www.youtube.com/@PyDataTV)
|
||||
- ⛓ [`ChatCSV` App: Chat with CSV files using LangChain and `Llama 2`](https://youtu.be/PvsMg6jFs8E?si=Qzg5u5gijxj933Ya) by [Muhammad Moin](https://www.youtube.com/@muhammadmoinfaisal)
|
||||
- ⛓ [Build Chat PDF app in Python with LangChain, OpenAI, Streamlit | Full project | Learn Coding](https://www.youtube.com/watch?v=WYzFzZg4YZI) by [Jutsupoint](https://www.youtube.com/@JutsuPoint)
|
||||
- ⛓ [Build Eminem Bot App with LangChain, Streamlit, OpenAI | Full Python Project | Tutorial | AI ChatBot](https://www.youtube.com/watch?v=a2shHB4MRZ4) by [Jutsupoint](https://www.youtube.com/@JutsuPoint)
|
||||
|
||||
|
||||
### [Prompt Engineering and LangChain](https://www.youtube.com/watch?v=muXbPpG_ys4&list=PLEJK-H61Xlwzm5FYLDdKt_6yibO33zoMW) by [Venelin Valkov](https://www.youtube.com/@venelin_valkov)
|
||||
@@ -134,4 +132,4 @@
|
||||
|
||||
|
||||
---------------------
|
||||
⛓ icon marks a new addition [last update 2024-02-04]
|
||||
⛓ icon marks a new addition [last update 2023-09-21]
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Add message history (memory)\n",
|
||||
"\n",
|
||||
"The `RunnableWithMessageHistory` let us add message history to certain types of chains.\n",
|
||||
"The `RunnableWithMessageHistory` let's us add message history to certain types of chains.\n",
|
||||
"\n",
|
||||
"Specifically, it can be used for any Runnable that takes as input one of\n",
|
||||
"\n",
|
||||
|
||||
@@ -66,8 +66,6 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Showing the example using anthropic, but you can use\n",
|
||||
"# your favorite chat model!\n",
|
||||
"from langchain.chat_models import ChatAnthropic\n",
|
||||
"\n",
|
||||
"model = ChatAnthropic()\n",
|
||||
@@ -166,9 +164,9 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Here|'s| a| silly| joke| about| a| par|rot|:|\n",
|
||||
" Sure|,| here|'s| a| funny| joke| about| a| par|rot|:|\n",
|
||||
"\n",
|
||||
"What| kind| of| teacher| gives| good| advice|?| An| ap|-|parent| (|app|arent|)| one|!||"
|
||||
"Why| doesn|'t| a| par|rot| ever| get| hungry| at| night|?| Because| it| has| a| light| snack| before| bed|!||"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -230,34 +228,40 @@
|
||||
"{'countries': [{}]}\n",
|
||||
"{'countries': [{'name': ''}]}\n",
|
||||
"{'countries': [{'name': 'France'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 6739}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 673915}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': ''}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Sp'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 46}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 4675}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 467547}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 46754778}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 46754778}, {}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 46754778}, {'name': ''}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 46754778}, {'name': 'Japan'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 46754778}, {'name': 'Japan', 'population': 12}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 46754778}, {'name': 'Japan', 'population': 12647}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 46754778}, {'name': 'Japan', 'population': 1264764}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': 67391582}, {'name': 'Spain', 'population': 46754778}, {'name': 'Japan', 'population': 126476461}]}\n"
|
||||
"{'countries': [{'name': 'France', 'population': ''}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': ''}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': ''}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}, {}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}, {'name': ''}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}, {'name': 'Japan'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}, {'name': 'Japan', 'population': ''}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}, {'name': 'Japan', 'population': '126'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}, {'name': 'Japan', 'population': '126,'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}, {'name': 'Japan', 'population': '126,860'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}, {'name': 'Japan', 'population': '126,860,'}]}\n",
|
||||
"{'countries': [{'name': 'France', 'population': '67,022,000'}, {'name': 'Spain', 'population': '46,754,784'}, {'name': 'Japan', 'population': '126,860,301'}]}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" model | JsonOutputParser()\n",
|
||||
") # Due to a bug in older versions of Langchain, JsonOutputParser did not stream results from some models\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"chain = model | JsonOutputParser() # This parser only works with OpenAI right now\n",
|
||||
"async for text in chain.astream(\n",
|
||||
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'\n",
|
||||
"):\n",
|
||||
@@ -290,14 +294,12 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['France', 'Spain', 'Japan']|"
|
||||
"[None, None, None]|"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import (\n",
|
||||
" JsonOutputParser,\n",
|
||||
")\n",
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# A function that operates on finalized inputs\n",
|
||||
@@ -324,7 +326,7 @@
|
||||
"chain = model | JsonOutputParser() | _extract_country_names\n",
|
||||
"\n",
|
||||
"async for text in chain.astream(\n",
|
||||
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'\n",
|
||||
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\"'\n",
|
||||
"):\n",
|
||||
" print(text, end=\"|\", flush=True)"
|
||||
]
|
||||
@@ -346,7 +348,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 7,
|
||||
"id": "15984b2b-315a-4119-945b-2a3dabea3082",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -354,7 +356,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"France|Sp|Spain|Japan|"
|
||||
"France|Spain|Japan|"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -390,23 +392,11 @@
|
||||
"chain = model | JsonOutputParser() | _extract_country_names_streaming\n",
|
||||
"\n",
|
||||
"async for text in chain.astream(\n",
|
||||
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'\n",
|
||||
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\"'\n",
|
||||
"):\n",
|
||||
" print(text, end=\"|\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d59823f5-9b9a-43c5-a213-34644e2f1d3d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-note}\n",
|
||||
"Because the code above is relying on JSON auto-completion, you may see partial names of countries (e.g., `Sp` and `Spain`), which is not what one would want for an extraction result!\n",
|
||||
"\n",
|
||||
"We're focusing on streaming concepts, not necessarily the results of the chains.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6adf65b7-aa47-4321-98c7-a0abe43b833a",
|
||||
@@ -419,7 +409,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"id": "b9b1c00d-8b44-40d0-9e2b-8a70d238f82b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -430,7 +420,7 @@
|
||||
" Document(page_content='harrison likes spicy food')]]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -475,7 +465,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 9,
|
||||
"id": "957447e6-1e60-41ef-8c10-2654bd9e738d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -493,7 +483,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 10,
|
||||
"id": "94e50b5d-bf51-4eee-9da0-ee40dd9ce42b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -501,7 +491,9 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Based| on| the| given| context|,| the| only| information| provided| about| where| Harrison| worked| is| that| he| worked| at| Ken|sh|o|.| Since| there| are| no| other| details| provided| about| Ken|sh|o|,| I| do| not| have| enough| information| to| write| 3| additional| made| up| sentences| about| this| place|.| I| can| only| state| that| Harrison| worked| at| Ken|sh|o|.||"
|
||||
"|H|arrison| worked| at| Kens|ho|,| a| renowned| technology| company| known| for| revolution|izing| the| artificial| intelligence| industry|.\n",
|
||||
"|K|ens|ho|,| located| in| the| heart| of| Silicon| Valley|,| is| famous| for| its| cutting|-edge| research| and| development| in| machine| learning|.\n",
|
||||
"|With| its| state|-of|-the|-art| facilities| and| talented| team|,| Kens|ho| has| become| a| hub| for| innovation| and| a| sought|-after| workplace| for| tech| enthusiasts| like| Harrison|.||"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -536,17 +528,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 11,
|
||||
"id": "61348df9-ec58-401e-be89-68a70042f88e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'0.1.18'"
|
||||
"'0.1.14'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -612,7 +604,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 12,
|
||||
"id": "c00df46e-7f6b-4e06-8abf-801898c8d57f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -620,7 +612,7 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/eugene/src/langchain/libs/core/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: This API is in beta and may change in the future.\n",
|
||||
"/home/eugene/.pyenv/versions/3.11.4/envs/langchain_3_11_4/lib/python3.11/site-packages/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: This API is in beta and may change in the future.\n",
|
||||
" warn_beta(\n"
|
||||
]
|
||||
}
|
||||
@@ -658,7 +650,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 13,
|
||||
"id": "ce31b525-f47d-4828-85a7-912ce9f2e79b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -666,26 +658,26 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'event': 'on_chat_model_start',\n",
|
||||
" 'run_id': '555843ed-3d24-4774-af25-fbf030d5e8c4',\n",
|
||||
" 'name': 'ChatAnthropic',\n",
|
||||
" 'run_id': 'd78b4ffb-0eb1-499c-8a90-8e4a4aa2edae',\n",
|
||||
" 'name': 'ChatOpenAI',\n",
|
||||
" 'tags': [],\n",
|
||||
" 'metadata': {},\n",
|
||||
" 'data': {'input': 'hello'}},\n",
|
||||
" {'event': 'on_chat_model_stream',\n",
|
||||
" 'run_id': '555843ed-3d24-4774-af25-fbf030d5e8c4',\n",
|
||||
" 'run_id': 'd78b4ffb-0eb1-499c-8a90-8e4a4aa2edae',\n",
|
||||
" 'tags': [],\n",
|
||||
" 'metadata': {},\n",
|
||||
" 'name': 'ChatAnthropic',\n",
|
||||
" 'data': {'chunk': AIMessageChunk(content=' Hello')}},\n",
|
||||
" 'name': 'ChatOpenAI',\n",
|
||||
" 'data': {'chunk': AIMessageChunk(content='')}},\n",
|
||||
" {'event': 'on_chat_model_stream',\n",
|
||||
" 'run_id': '555843ed-3d24-4774-af25-fbf030d5e8c4',\n",
|
||||
" 'run_id': 'd78b4ffb-0eb1-499c-8a90-8e4a4aa2edae',\n",
|
||||
" 'tags': [],\n",
|
||||
" 'metadata': {},\n",
|
||||
" 'name': 'ChatAnthropic',\n",
|
||||
" 'data': {'chunk': AIMessageChunk(content='!')}}]"
|
||||
" 'name': 'ChatOpenAI',\n",
|
||||
" 'data': {'chunk': AIMessageChunk(content='Hello')}}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -696,7 +688,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 14,
|
||||
"id": "76cfe826-ee63-4310-ad48-55a95eb3b9d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -704,20 +696,20 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'event': 'on_chat_model_stream',\n",
|
||||
" 'run_id': '555843ed-3d24-4774-af25-fbf030d5e8c4',\n",
|
||||
" 'run_id': 'd78b4ffb-0eb1-499c-8a90-8e4a4aa2edae',\n",
|
||||
" 'tags': [],\n",
|
||||
" 'metadata': {},\n",
|
||||
" 'name': 'ChatAnthropic',\n",
|
||||
" 'name': 'ChatOpenAI',\n",
|
||||
" 'data': {'chunk': AIMessageChunk(content='')}},\n",
|
||||
" {'event': 'on_chat_model_end',\n",
|
||||
" 'name': 'ChatAnthropic',\n",
|
||||
" 'run_id': '555843ed-3d24-4774-af25-fbf030d5e8c4',\n",
|
||||
" 'name': 'ChatOpenAI',\n",
|
||||
" 'run_id': 'd78b4ffb-0eb1-499c-8a90-8e4a4aa2edae',\n",
|
||||
" 'tags': [],\n",
|
||||
" 'metadata': {},\n",
|
||||
" 'data': {'output': AIMessageChunk(content=' Hello!')}}]"
|
||||
" 'data': {'output': AIMessageChunk(content='Hello! How can I assist you today?')}}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -738,14 +730,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 15,
|
||||
"id": "4328c56c-a303-427b-b1f2-f354e9af555c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = (\n",
|
||||
" model | JsonOutputParser()\n",
|
||||
") # Due to a bug in older versions of Langchain, JsonOutputParser did not stream results from some models\n",
|
||||
"chain = model | JsonOutputParser() # This parser only works with OpenAI right now\n",
|
||||
"\n",
|
||||
"events = [\n",
|
||||
" event\n",
|
||||
@@ -772,7 +762,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 16,
|
||||
"id": "8e66ea3d-a450-436a-aaac-d9478abc6c28",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -780,26 +770,26 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'event': 'on_chain_start',\n",
|
||||
" 'run_id': 'b1074bff-2a17-458b-9e7b-625211710df4',\n",
|
||||
" 'run_id': 'aa992fb9-d79f-46f3-a857-ae4acad841c4',\n",
|
||||
" 'name': 'RunnableSequence',\n",
|
||||
" 'tags': [],\n",
|
||||
" 'metadata': {},\n",
|
||||
" 'data': {'input': 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'}},\n",
|
||||
" {'event': 'on_chat_model_start',\n",
|
||||
" 'name': 'ChatAnthropic',\n",
|
||||
" 'run_id': '6072be59-1f43-4f1c-9470-3b92e8406a99',\n",
|
||||
" 'name': 'ChatOpenAI',\n",
|
||||
" 'run_id': 'c5406de5-0880-4829-ae26-bb565b404e27',\n",
|
||||
" 'tags': ['seq:step:1'],\n",
|
||||
" 'metadata': {},\n",
|
||||
" 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}}},\n",
|
||||
" {'event': 'on_parser_start',\n",
|
||||
" 'name': 'JsonOutputParser',\n",
|
||||
" 'run_id': 'bf978194-0eda-4494-ad15-3a5bfe69cd59',\n",
|
||||
" 'run_id': '32b47794-8fb6-4ef4-8800-23ed6c3f4519',\n",
|
||||
" 'tags': ['seq:step:2'],\n",
|
||||
" 'metadata': {},\n",
|
||||
" 'data': {}}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -826,7 +816,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 17,
|
||||
"id": "630c71d6-8d94-4ce0-a78a-f20e90f628df",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -834,31 +824,29 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Chat model chunk: ' Here'\n",
|
||||
"Chat model chunk: ' is'\n",
|
||||
"Chat model chunk: ' the'\n",
|
||||
"Chat model chunk: ' JSON'\n",
|
||||
"Chat model chunk: ' with'\n",
|
||||
"Chat model chunk: ' the'\n",
|
||||
"Chat model chunk: ' requested'\n",
|
||||
"Chat model chunk: ' countries'\n",
|
||||
"Chat model chunk: ' and'\n",
|
||||
"Chat model chunk: ' their'\n",
|
||||
"Chat model chunk: ' populations'\n",
|
||||
"Chat model chunk: ':'\n",
|
||||
"Chat model chunk: '\\n\\n```'\n",
|
||||
"Chat model chunk: 'json'\n",
|
||||
"Chat model chunk: ''\n",
|
||||
"Parser chunk: {}\n",
|
||||
"Chat model chunk: '\\n{'\n",
|
||||
"Chat model chunk: '\\n '\n",
|
||||
"Chat model chunk: '{\\n'\n",
|
||||
"Chat model chunk: ' '\n",
|
||||
"Chat model chunk: ' \"'\n",
|
||||
"Chat model chunk: 'countries'\n",
|
||||
"Chat model chunk: '\":'\n",
|
||||
"Parser chunk: {'countries': []}\n",
|
||||
"Chat model chunk: ' ['\n",
|
||||
"Chat model chunk: '\\n '\n",
|
||||
"Chat model chunk: ' [\\n'\n",
|
||||
"Chat model chunk: ' '\n",
|
||||
"Parser chunk: {'countries': [{}]}\n",
|
||||
"Chat model chunk: ' {'\n",
|
||||
"Chat model chunk: ' {\\n'\n",
|
||||
"Chat model chunk: ' '\n",
|
||||
"Chat model chunk: ' \"'\n",
|
||||
"Chat model chunk: 'name'\n",
|
||||
"Chat model chunk: '\":'\n",
|
||||
"Parser chunk: {'countries': [{'name': ''}]}\n",
|
||||
"Chat model chunk: ' \"'\n",
|
||||
"Parser chunk: {'countries': [{'name': 'France'}]}\n",
|
||||
"Chat model chunk: 'France'\n",
|
||||
"Chat model chunk: '\",\\n'\n",
|
||||
"Chat model chunk: ' '\n",
|
||||
"Chat model chunk: ' \"'\n",
|
||||
"...\n"
|
||||
]
|
||||
}
|
||||
@@ -909,7 +897,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 18,
|
||||
"id": "4f0b581b-be63-4663-baba-c6d2b625cdf9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -917,17 +905,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_parser_start', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': []}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': ''}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France'}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 6739}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 673915}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67391582}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'f2ac1d1c-e14a-45fc-8990-e5c24e707299', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67391582}, {}]}}}\n",
|
||||
"{'event': 'on_parser_start', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': []}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': ''}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France'}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 670}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 670600}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67060000}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67060000}, {}]}}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': '450011c0-6f3b-4ec8-92d4-6603d9d1d603', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67060000}, {'name': ''}]}}}\n",
|
||||
"...\n"
|
||||
]
|
||||
}
|
||||
@@ -961,7 +949,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 19,
|
||||
"id": "096cd904-72f0-4ebe-a8b7-d0e730faea7f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -969,17 +957,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chat_model_start', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' Here')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' is')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' the')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' JSON')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' with')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' the')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' requested')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' countries')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' and')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '98a6e192-8159-460c-ba73-6dfc921e3777', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' their')}}\n",
|
||||
"{'event': 'on_chat_model_start', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='{\\n')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' ')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' \"')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='countries')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\":')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' [\\n')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' ')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' {\\n')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '9ba1ef9f-5954-4649-b3da-1171b6abb000', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' ')}}\n",
|
||||
"...\n"
|
||||
]
|
||||
}
|
||||
@@ -1020,7 +1008,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 20,
|
||||
"id": "26bac0d2-76d9-446e-b346-82790236b88d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -1028,17 +1016,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chain_start', 'run_id': '190875f3-3fb7-49ad-9b6e-f49da22f3e49', 'name': 'RunnableSequence', 'tags': ['my_chain'], 'metadata': {}, 'data': {'input': 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'}}\n",
|
||||
"{'event': 'on_chat_model_start', 'name': 'ChatAnthropic', 'run_id': 'ff58f732-b494-4ff9-852a-783d42f4455d', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}}}\n",
|
||||
"{'event': 'on_parser_start', 'name': 'JsonOutputParser', 'run_id': '3b5e4ca1-40fe-4a02-9a19-ba2a43a6115c', 'tags': ['seq:step:2', 'my_chain'], 'metadata': {}, 'data': {}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'ff58f732-b494-4ff9-852a-783d42f4455d', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' Here')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'ff58f732-b494-4ff9-852a-783d42f4455d', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' is')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'ff58f732-b494-4ff9-852a-783d42f4455d', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' the')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'ff58f732-b494-4ff9-852a-783d42f4455d', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' JSON')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'ff58f732-b494-4ff9-852a-783d42f4455d', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' with')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'ff58f732-b494-4ff9-852a-783d42f4455d', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' the')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'ff58f732-b494-4ff9-852a-783d42f4455d', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' requested')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'ff58f732-b494-4ff9-852a-783d42f4455d', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' countries')}}\n",
|
||||
"{'event': 'on_chain_start', 'run_id': 'd4c78db8-be20-4fa0-87d6-cb317822967a', 'name': 'RunnableSequence', 'tags': ['my_chain'], 'metadata': {}, 'data': {'input': 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'}}\n",
|
||||
"{'event': 'on_chat_model_start', 'name': 'ChatOpenAI', 'run_id': '15e46d9f-ccf5-4da2-b9e3-b2a85873ba4c', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}}}\n",
|
||||
"{'event': 'on_parser_start', 'name': 'JsonOutputParser', 'run_id': '91945f4f-0deb-4999-acf0-f6d191c89b34', 'tags': ['seq:step:2', 'my_chain'], 'metadata': {}, 'data': {}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatOpenAI', 'run_id': '15e46d9f-ccf5-4da2-b9e3-b2a85873ba4c', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='')}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'JsonOutputParser', 'run_id': '91945f4f-0deb-4999-acf0-f6d191c89b34', 'tags': ['seq:step:2', 'my_chain'], 'metadata': {}, 'data': {'chunk': {}}}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': 'd4c78db8-be20-4fa0-87d6-cb317822967a', 'tags': ['my_chain'], 'metadata': {}, 'name': 'RunnableSequence', 'data': {'chunk': {}}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatOpenAI', 'run_id': '15e46d9f-ccf5-4da2-b9e3-b2a85873ba4c', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='{\"')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatOpenAI', 'run_id': '15e46d9f-ccf5-4da2-b9e3-b2a85873ba4c', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='countries')}}\n",
|
||||
"{'event': 'on_chat_model_stream', 'name': 'ChatOpenAI', 'run_id': '15e46d9f-ccf5-4da2-b9e3-b2a85873ba4c', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\":')}}\n",
|
||||
"{'event': 'on_parser_stream', 'name': 'JsonOutputParser', 'run_id': '91945f4f-0deb-4999-acf0-f6d191c89b34', 'tags': ['seq:step:2', 'my_chain'], 'metadata': {}, 'data': {'chunk': {'countries': []}}}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': 'd4c78db8-be20-4fa0-87d6-cb317822967a', 'tags': ['my_chain'], 'metadata': {}, 'name': 'RunnableSequence', 'data': {'chunk': {'countries': []}}}\n",
|
||||
"...\n"
|
||||
]
|
||||
}
|
||||
@@ -1074,7 +1062,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 21,
|
||||
"id": "0e6451d3-3b11-4a71-ae19-998f4c10180f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -1116,7 +1104,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 22,
|
||||
"id": "f9a8fe35-faab-4970-b8c0-5c780845d98a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -1124,7 +1112,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['France', 'Spain', 'Japan']\n"
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1145,7 +1133,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 23,
|
||||
"id": "b08215cd-bffa-4e76-aaf3-c52ee34f152c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -1153,33 +1141,33 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Chat model chunk: ' Here'\n",
|
||||
"Chat model chunk: ' is'\n",
|
||||
"Chat model chunk: ' the'\n",
|
||||
"Chat model chunk: ' JSON'\n",
|
||||
"Chat model chunk: ' with'\n",
|
||||
"Chat model chunk: ' the'\n",
|
||||
"Chat model chunk: ' requested'\n",
|
||||
"Chat model chunk: ' countries'\n",
|
||||
"Chat model chunk: ' and'\n",
|
||||
"Chat model chunk: ' their'\n",
|
||||
"Chat model chunk: ' populations'\n",
|
||||
"Chat model chunk: ':'\n",
|
||||
"Chat model chunk: '\\n\\n```'\n",
|
||||
"Chat model chunk: 'json'\n",
|
||||
"Chat model chunk: ''\n",
|
||||
"Parser chunk: {}\n",
|
||||
"Chat model chunk: '\\n{'\n",
|
||||
"Chat model chunk: '\\n '\n",
|
||||
"Chat model chunk: ' \"'\n",
|
||||
"Chat model chunk: '{\"'\n",
|
||||
"Chat model chunk: 'countries'\n",
|
||||
"Chat model chunk: '\":'\n",
|
||||
"Parser chunk: {'countries': []}\n",
|
||||
"Chat model chunk: ' ['\n",
|
||||
"Chat model chunk: '\\n '\n",
|
||||
"Chat model chunk: ' [\\n'\n",
|
||||
"Chat model chunk: ' '\n",
|
||||
"Parser chunk: {'countries': [{}]}\n",
|
||||
"Chat model chunk: ' {'\n",
|
||||
"Chat model chunk: '\\n '\n",
|
||||
"Chat model chunk: ' {\"'\n",
|
||||
"Chat model chunk: 'name'\n",
|
||||
"Chat model chunk: '\":'\n",
|
||||
"Parser chunk: {'countries': [{'name': ''}]}\n",
|
||||
"Chat model chunk: ' \"'\n",
|
||||
"Parser chunk: {'countries': [{'name': 'France'}]}\n",
|
||||
"Chat model chunk: 'France'\n",
|
||||
"Chat model chunk: '\",'\n",
|
||||
"Chat model chunk: ' \"'\n",
|
||||
"Chat model chunk: 'population'\n",
|
||||
"Chat model chunk: '\":'\n",
|
||||
"Parser chunk: {'countries': [{'name': 'France', 'population': ''}]}\n",
|
||||
"Chat model chunk: ' \"'\n",
|
||||
"Parser chunk: {'countries': [{'name': 'France', 'population': '67'}]}\n",
|
||||
"Chat model chunk: '67'\n",
|
||||
"Parser chunk: {'countries': [{'name': 'France', 'population': '67 million'}]}\n",
|
||||
"Chat model chunk: ' million'\n",
|
||||
"Chat model chunk: '\"},\\n'\n",
|
||||
"...\n"
|
||||
]
|
||||
}
|
||||
@@ -1224,7 +1212,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 24,
|
||||
"id": "1854206d-b3a5-4f91-9e00-bccbaebac61f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -1232,9 +1220,9 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_tool_start', 'run_id': 'ae7690f8-ebc9-4886-9bbe-cb336ff274f2', 'name': 'bad_tool', 'tags': [], 'metadata': {}, 'data': {'input': 'hello'}}\n",
|
||||
"{'event': 'on_tool_stream', 'run_id': 'ae7690f8-ebc9-4886-9bbe-cb336ff274f2', 'tags': [], 'metadata': {}, 'name': 'bad_tool', 'data': {'chunk': 'olleh'}}\n",
|
||||
"{'event': 'on_tool_end', 'name': 'bad_tool', 'run_id': 'ae7690f8-ebc9-4886-9bbe-cb336ff274f2', 'tags': [], 'metadata': {}, 'data': {'output': 'olleh'}}\n"
|
||||
"{'event': 'on_tool_start', 'run_id': '39e4a7eb-c13d-46f0-99e7-75c2fa4aa6a6', 'name': 'bad_tool', 'tags': [], 'metadata': {}, 'data': {'input': 'hello'}}\n",
|
||||
"{'event': 'on_tool_stream', 'run_id': '39e4a7eb-c13d-46f0-99e7-75c2fa4aa6a6', 'tags': [], 'metadata': {}, 'name': 'bad_tool', 'data': {'chunk': 'olleh'}}\n",
|
||||
"{'event': 'on_tool_end', 'name': 'bad_tool', 'run_id': '39e4a7eb-c13d-46f0-99e7-75c2fa4aa6a6', 'tags': [], 'metadata': {}, 'data': {'output': 'olleh'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1270,7 +1258,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 25,
|
||||
"id": "a20a6cb3-bb43-465c-8cfc-0a7349d70968",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -1278,11 +1266,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_tool_start', 'run_id': '384f1710-612e-4022-a6d4-8a7bb0cc757e', 'name': 'correct_tool', 'tags': [], 'metadata': {}, 'data': {'input': 'hello'}}\n",
|
||||
"{'event': 'on_chain_start', 'name': 'reverse_word', 'run_id': 'c4882303-8867-4dff-b031-7d9499b39dda', 'tags': [], 'metadata': {}, 'data': {'input': 'hello'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_word', 'run_id': 'c4882303-8867-4dff-b031-7d9499b39dda', 'tags': [], 'metadata': {}, 'data': {'input': 'hello', 'output': 'olleh'}}\n",
|
||||
"{'event': 'on_tool_stream', 'run_id': '384f1710-612e-4022-a6d4-8a7bb0cc757e', 'tags': [], 'metadata': {}, 'name': 'correct_tool', 'data': {'chunk': 'olleh'}}\n",
|
||||
"{'event': 'on_tool_end', 'name': 'correct_tool', 'run_id': '384f1710-612e-4022-a6d4-8a7bb0cc757e', 'tags': [], 'metadata': {}, 'data': {'output': 'olleh'}}\n"
|
||||
"{'event': 'on_tool_start', 'run_id': '4263aca5-f221-4eb7-b07e-60a89fb76c5c', 'name': 'correct_tool', 'tags': [], 'metadata': {}, 'data': {'input': 'hello'}}\n",
|
||||
"{'event': 'on_chain_start', 'name': 'reverse_word', 'run_id': '65e3679b-e238-47ce-a875-ee74480e696e', 'tags': [], 'metadata': {}, 'data': {'input': 'hello'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_word', 'run_id': '65e3679b-e238-47ce-a875-ee74480e696e', 'tags': [], 'metadata': {}, 'data': {'input': 'hello', 'output': 'olleh'}}\n",
|
||||
"{'event': 'on_tool_stream', 'run_id': '4263aca5-f221-4eb7-b07e-60a89fb76c5c', 'tags': [], 'metadata': {}, 'name': 'correct_tool', 'data': {'chunk': 'olleh'}}\n",
|
||||
"{'event': 'on_tool_end', 'name': 'correct_tool', 'run_id': '4263aca5-f221-4eb7-b07e-60a89fb76c5c', 'tags': [], 'metadata': {}, 'data': {'output': 'olleh'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1307,7 +1295,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 26,
|
||||
"id": "0ac0a3c1-f3a4-4157-b053-4fec8d2e698c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -1315,11 +1303,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chain_start', 'run_id': '4fe56c7b-6982-4999-a42d-79ba56151176', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
|
||||
"{'event': 'on_chain_start', 'name': 'reverse_word', 'run_id': '335fe781-8944-4464-8d2e-81f61d1f85f5', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_word', 'run_id': '335fe781-8944-4464-8d2e-81f61d1f85f5', 'tags': [], 'metadata': {}, 'data': {'input': '1234', 'output': '4321'}}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': '4fe56c7b-6982-4999-a42d-79ba56151176', 'tags': [], 'metadata': {}, 'name': 'reverse_and_double', 'data': {'chunk': '43214321'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_and_double', 'run_id': '4fe56c7b-6982-4999-a42d-79ba56151176', 'tags': [], 'metadata': {}, 'data': {'output': '43214321'}}\n"
|
||||
"{'event': 'on_chain_start', 'run_id': '714d22d4-a3c3-45fc-b2f1-913aa7f0fc22', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
|
||||
"{'event': 'on_chain_start', 'name': 'reverse_word', 'run_id': '35a6470c-db65-4fe1-8dff-4e3418601d2f', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_word', 'run_id': '35a6470c-db65-4fe1-8dff-4e3418601d2f', 'tags': [], 'metadata': {}, 'data': {'input': '1234', 'output': '4321'}}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': '714d22d4-a3c3-45fc-b2f1-913aa7f0fc22', 'tags': [], 'metadata': {}, 'name': 'reverse_and_double', 'data': {'chunk': '43214321'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_and_double', 'run_id': '714d22d4-a3c3-45fc-b2f1-913aa7f0fc22', 'tags': [], 'metadata': {}, 'data': {'output': '43214321'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1349,7 +1337,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 27,
|
||||
"id": "c896bb94-9d10-41ff-8fe2-d6b05b1ed74b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -1357,11 +1345,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chain_start', 'run_id': '7485eedb-1854-429c-a2f8-03d01452daef', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
|
||||
"{'event': 'on_chain_start', 'name': 'reverse_word', 'run_id': 'e7cddab2-9b95-4e80-abaf-4b2429117835', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_word', 'run_id': 'e7cddab2-9b95-4e80-abaf-4b2429117835', 'tags': [], 'metadata': {}, 'data': {'input': '1234', 'output': '4321'}}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': '7485eedb-1854-429c-a2f8-03d01452daef', 'tags': [], 'metadata': {}, 'name': 'reverse_and_double', 'data': {'chunk': '43214321'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_and_double', 'run_id': '7485eedb-1854-429c-a2f8-03d01452daef', 'tags': [], 'metadata': {}, 'data': {'output': '43214321'}}\n"
|
||||
"{'event': 'on_chain_start', 'run_id': '17c89289-9c71-406d-90de-86f76b5e798b', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
|
||||
"{'event': 'on_chain_start', 'name': 'reverse_word', 'run_id': 'b1105188-9196-43c1-9603-4f2f58e51de4', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_word', 'run_id': 'b1105188-9196-43c1-9603-4f2f58e51de4', 'tags': [], 'metadata': {}, 'data': {'input': '1234', 'output': '4321'}}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': '17c89289-9c71-406d-90de-86f76b5e798b', 'tags': [], 'metadata': {}, 'name': 'reverse_and_double', 'data': {'chunk': '43214321'}}\n",
|
||||
"{'event': 'on_chain_end', 'name': 'reverse_and_double', 'run_id': '17c89289-9c71-406d-90de-86f76b5e798b', 'tags': [], 'metadata': {}, 'data': {'output': '43214321'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1397,7 +1385,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -93,3 +93,6 @@ Head to the reference section for full documentation of all classes and methods
|
||||
### [Developer's guide](/docs/contributing)
|
||||
Check out the developer's guide for guidelines on contributing and help getting your dev environment set up.
|
||||
|
||||
### [Community](/docs/community)
|
||||
Head to the [Community navigator](/docs/community) to find places to ask questions, share feedback, meet other developers, and dream about the future of LLM’s.
|
||||
|
||||
|
||||
@@ -184,6 +184,7 @@ A Retriever can be backed by anything - a SQL table, the internet, etc - but in
|
||||
|
||||
First, we need to load the data that we want to index. In order to do this, we will use the WebBaseLoader. This requires installing [BeautifulSoup](https://beautiful-soup-4.readthedocs.io/en/latest/):
|
||||
|
||||
```
|
||||
```shell
|
||||
pip install beautifulsoup4
|
||||
```
|
||||
@@ -581,10 +582,7 @@ Using this, we can interact with the served chain as if it were running client-s
|
||||
from langserve import RemoteRunnable
|
||||
|
||||
remote_chain = RemoteRunnable("http://localhost:8000/agent/")
|
||||
remote_chain.invoke({
|
||||
"input": "how can langsmith help with testing?",
|
||||
"chat_history": [] # Providing an empty list as this is the first call
|
||||
})
|
||||
remote_chain.invoke({"input": "how can langsmith help with testing?"})
|
||||
```
|
||||
|
||||
To learn more about the many other features of LangServe [head here](/docs/langserve).
|
||||
|
||||
@@ -98,7 +98,7 @@ The LLM landscape is evolving at an unprecedented pace, with new libraries and m
|
||||
|
||||
### Model composition
|
||||
|
||||
Deploying systems like LangChain demands the ability to piece together different models and connect them via logic. Take the example of building a natural language input SQL query engine. Querying an LLM and obtaining the SQL command is only part of the system. You need to extract metadata from the connected database, construct a prompt for the LLM, run the SQL query on an engine, collect and feedback the response to the LLM as the query runs, and present the results to the user. This demonstrates the need to seamlessly integrate various complex components built in Python into a dynamic chain of logical blocks that can be served together.
|
||||
Deploying systems like LangChain demands the ability to piece together different models and connect them via logic. Take the example of building a natural language input SQL query engine. Querying an LLM and obtaining the SQL command is only part of the system. You need to extract metadata from the connected database, construct a prompt for the LLM, run the SQL query on an engine, collect and feed back the response to the LLM as the query runs, and present the results to the user. This demonstrates the need to seamlessly integrate various complex components built in Python into a dynamic chain of logical blocks that can be served together.
|
||||
|
||||
## Cloud providers
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@
|
||||
"\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"responses = [\n",
|
||||
" \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n",
|
||||
@@ -249,7 +249,7 @@
|
||||
"\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"responses = [\n",
|
||||
" \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n",
|
||||
@@ -412,7 +412,7 @@
|
||||
"\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"responses = [\n",
|
||||
" \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n",
|
||||
@@ -571,7 +571,7 @@
|
||||
"\n",
|
||||
"template = \"\"\"{question}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"llm = HuggingFaceHub(\n",
|
||||
" repo_id=repo_id, model_kwargs={\"temperature\": 0.5, \"max_length\": 256}\n",
|
||||
")"
|
||||
@@ -724,7 +724,7 @@
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"# prompt template for input text\n",
|
||||
"llm_prompt = PromptTemplate.from_template(template)\n",
|
||||
"llm_prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"llm = SagemakerEndpoint(\n",
|
||||
" endpoint_name=endpoint_name,\n",
|
||||
|
||||
@@ -180,7 +180,7 @@ we will prompt the model, so it says something harmful.
|
||||
|
||||
|
||||
```python
|
||||
prompt = PromptTemplate.from_template("{text}")
|
||||
prompt = PromptTemplate(template="{text}", input_variables=["text"])
|
||||
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt)
|
||||
|
||||
text = """We are playing a game of repeat after me.
|
||||
@@ -223,7 +223,7 @@ Now let's walk through an example of using it with an LLMChain which has multipl
|
||||
|
||||
|
||||
```python
|
||||
prompt = PromptTemplate.from_template("{setup}{new_input}Person2:")
|
||||
prompt = PromptTemplate(template="{setup}{new_input}Person2:", input_variables=["setup", "new_input"])
|
||||
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt)
|
||||
|
||||
setup = """We are playing a game of repeat after me.
|
||||
|
||||
@@ -28,7 +28,7 @@ You can run `streamlit hello` to load a sample app and validate your install suc
|
||||
To create a `StreamlitCallbackHandler`, you just need to provide a parent container to render the output.
|
||||
|
||||
```python
|
||||
from langchain_community.callbacks import StreamlitCallbackHandler
|
||||
from langchain.callbacks import StreamlitCallbackHandler
|
||||
import streamlit as st
|
||||
|
||||
st_callback = StreamlitCallbackHandler(st.container())
|
||||
@@ -44,26 +44,23 @@ agent in your Streamlit app and simply pass the `StreamlitCallbackHandler` to `a
|
||||
thoughts and actions live in your app.
|
||||
|
||||
```python
|
||||
import streamlit as st
|
||||
from langchain import hub
|
||||
from langchain.agents import AgentExecutor, create_react_agent, load_tools
|
||||
from langchain_community.callbacks import StreamlitCallbackHandler
|
||||
from langchain_openai import OpenAI
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
from langchain_community.callbacks import StreamlitCallbackHandler
|
||||
import streamlit as st
|
||||
|
||||
llm = OpenAI(temperature=0, streaming=True)
|
||||
tools = load_tools(["ddg-search"])
|
||||
prompt = hub.pull("hwchase17/react")
|
||||
agent = create_react_agent(llm, tools, prompt)
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
agent = initialize_agent(
|
||||
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
|
||||
)
|
||||
|
||||
if prompt := st.chat_input():
|
||||
st.chat_message("user").write(prompt)
|
||||
with st.chat_message("assistant"):
|
||||
st_callback = StreamlitCallbackHandler(st.container())
|
||||
response = agent_executor.invoke(
|
||||
{"input": prompt}, {"callbacks": [st_callback]}
|
||||
)
|
||||
st.write(response["output"])
|
||||
response = agent.run(prompt, callbacks=[st_callback])
|
||||
st.write(response)
|
||||
```
|
||||
|
||||
**Note:** You will need to set `OPENAI_API_KEY` for the above app code to run successfully.
|
||||
|
||||
@@ -90,20 +90,16 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = (\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
")\n",
|
||||
"system = \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"Korean\",\n",
|
||||
" \"text\": \"I love Python\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
"chain.invoke({\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"Korean\",\n",
|
||||
" \"text\": \"I love Python\",\n",
|
||||
"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -51,18 +51,10 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, you can set your API key with:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"BAICHUAN_API_KEY\"] = \"YOUR_API_KEY\""
|
||||
"or you can set `api_key` in your environment variables\n",
|
||||
"```bash\n",
|
||||
"export BAICHUAN_API_KEY=YOUR_API_KEY\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -424,7 +424,9 @@
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI(model_name=\"chat-bison\", max_output_tokens=1000, temperature=0.5)\n",
|
||||
"chat = ChatVertexAI(\n",
|
||||
" model_name=\"chat-bison\", max_output_tokens=1000, temperature=0.5\n",
|
||||
")\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"asyncio.run(\n",
|
||||
|
||||
@@ -15,23 +15,39 @@
|
||||
"source": [
|
||||
"# ChatKonko\n",
|
||||
"\n",
|
||||
"# Konko\n",
|
||||
"\n",
|
||||
">[Konko](https://www.konko.ai/) API is a fully managed Web API designed to help application developers:\n",
|
||||
"\n",
|
||||
"Konko API is a fully managed API designed to help application developers:\n",
|
||||
"\n",
|
||||
"1. **Select** the right open source or proprietary LLMs for their application\n",
|
||||
"2. **Build** applications faster with integrations to leading application frameworks and fully managed APIs\n",
|
||||
"3. **Fine tune** smaller open-source LLMs to achieve industry-leading performance at a fraction of the cost\n",
|
||||
"4. **Deploy production-scale APIs** that meet security, privacy, throughput, and latency SLAs without infrastructure set-up or administration using Konko AI's SOC 2 compliant, multi-cloud infrastructure\n",
|
||||
"1. Select the right LLM(s) for their application\n",
|
||||
"2. Prototype with various open-source and proprietary LLMs\n",
|
||||
"3. Access Fine Tuning for open-source LLMs to get industry-leading performance at a fraction of the cost\n",
|
||||
"4. Setup low-cost production APIs according to security, privacy, throughput, latency SLAs without infrastructure set-up or administration using Konko AI's SOC 2 compliant, multi-cloud infrastructure\n",
|
||||
"\n",
|
||||
"### Steps to Access Models\n",
|
||||
"1. **Explore Available Models:** Start by browsing through the [available models](https://docs.konko.ai/docs/list-of-models) on Konko. Each model caters to different use cases and capabilities.\n",
|
||||
"\n",
|
||||
"2. **Identify Suitable Endpoints:** Determine which [endpoint](https://docs.konko.ai/docs/list-of-models#list-of-available-models) (ChatCompletion or Completion) supports your selected model.\n",
|
||||
"\n",
|
||||
"3. **Selecting a Model:** [Choose a model](https://docs.konko.ai/docs/list-of-models#list-of-available-models) based on its metadata and how well it fits your use case.\n",
|
||||
"\n",
|
||||
"4. **Prompting Guidelines:** Once a model is selected, refer to the [prompting guidelines](https://docs.konko.ai/docs/prompting) to effectively communicate with it.\n",
|
||||
"\n",
|
||||
"5. **Using the API:** Finally, use the appropriate Konko [API endpoint](https://docs.konko.ai/docs/quickstart-for-completion-and-chat-completion-endpoint) to call the model and receive responses.\n",
|
||||
"\n",
|
||||
"To run this notebook, you'll need Konko API key. You can create one by signing up on [Konko](https://www.konko.ai/).\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `Konko` ChatCompletion [models](https://docs.konko.ai/docs/list-of-models#konko-hosted-models-for-chatcompletion)\n",
|
||||
"\n",
|
||||
"To run this notebook, you'll need Konko API key. Sign in to our web app to [create an API key](https://platform.konko.ai/settings/api-keys) to access models\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To run this notebook, you'll need Konko API key. You can create one by signing up on [Konko](https://www.konko.ai/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@@ -48,7 +64,11 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Set Environment Variables\n",
|
||||
"## 2. Set API Keys\n",
|
||||
"\n",
|
||||
"<br />\n",
|
||||
"\n",
|
||||
"### Option 1: Set Environment Variables\n",
|
||||
"\n",
|
||||
"1. You can set environment variables for \n",
|
||||
" 1. KONKO_API_KEY (Required)\n",
|
||||
@@ -58,7 +78,18 @@
|
||||
"```shell\n",
|
||||
"export KONKO_API_KEY={your_KONKO_API_KEY_here}\n",
|
||||
"export OPENAI_API_KEY={your_OPENAI_API_KEY_here} #Optional\n",
|
||||
"```"
|
||||
"```\n",
|
||||
"\n",
|
||||
"Alternatively, you can add the above lines directly to your shell startup script (such as .bashrc or .bash_profile for Bash shell and .zshrc for Zsh shell) to have them set automatically every time a new shell session starts.\n",
|
||||
"\n",
|
||||
"### Option 2: Set API Keys Programmatically\n",
|
||||
"\n",
|
||||
"If you prefer to set your API keys directly within your Python script or Jupyter notebook, you can use the following commands:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"konko.set_api_key('your_KONKO_API_KEY_here') \n",
|
||||
"konko.set_openai_api_key('your_OPENAI_API_KEY_here') # Optional\n",
|
||||
"```\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -67,7 +98,7 @@
|
||||
"source": [
|
||||
"## Calling a model\n",
|
||||
"\n",
|
||||
"Find a model on the [Konko overview page](https://docs.konko.ai/docs/list-of-models)\n",
|
||||
"Find a model on the [Konko overview page](https://docs.konko.ai/v0.5.0/docs/list-of-models)\n",
|
||||
"\n",
|
||||
"Another way to find the list of models running on the Konko instance is through this [endpoint](https://docs.konko.ai/reference/get-models).\n",
|
||||
"\n",
|
||||
|
||||
@@ -15,53 +15,16 @@
|
||||
"id": "bf733a38-db84-4363-89e2-de6735c37230",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MistralAI\n",
|
||||
"# ChatMistralAI\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with MistralAI chat models, via their [API](https://docs.mistral.ai/api/).\n",
|
||||
"\n",
|
||||
"A valid [API key](https://console.mistral.ai/users/api-keys/) is needed to communicate with the API.\n",
|
||||
"\n",
|
||||
"Head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html) for detailed documentation of all attributes and methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cc686b8f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"You will need the `langchain-core` and `langchain-mistralai` package to use the API. You can install these with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install -U langchain-core langchain-mistralai\n",
|
||||
"\n",
|
||||
"We'll also need to get a [Mistral API key](https://console.mistral.ai/users/api-keys/)"
|
||||
"A valid [API key](https://console.mistral.ai/users/api-keys/) is needed to communicate with the API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "c3fd4184",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"mistral_api_key = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "502127fd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -74,20 +37,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 2,
|
||||
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"mistral_api_key = os.environ.get(\"MISTRAL_API_KEY\")\n",
|
||||
"# If mistral_api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.\n",
|
||||
"chat = ChatMistralAI(mistral_api_key=mistral_api_key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -96,16 +62,16 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Who's there? I was just about to ask the same thing! How can I assist you today?\")"
|
||||
"AIMessage(content=\"Hello! I'm here to assist you. How can I help you today? If you have any questions or need information on a particular topic, feel free to ask. I'm ready to provide accurate and helpful answers to the best of my ability.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [HumanMessage(content=\"knock knock\")]\n",
|
||||
"messages = [HumanMessage(content=\"say a brief hello\")]\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
@@ -114,12 +80,12 @@
|
||||
"id": "c361ab1e-8c0c-4206-9e3c-9d1424a12b9c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Async"
|
||||
"## `ChatMistralAI` also supports async and streaming functionality:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 4,
|
||||
"id": "c5fac0e9-05a4-4fc1-a3b3-e5bbb24b971b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -128,10 +94,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Who\\'s there?\\n\\n(You can then continue the \"knock knock\" joke by saying the name of the person or character who should be responding. For example, if I say \"Banana,\" you could respond with \"Banana who?\" and I would say \"Banana bunch! Get it? Because a group of bananas is called a \\'bunch\\'!\" and then we would both laugh and have a great time. But really, you can put anything you want in the spot where I put \"Banana\" and it will still technically be a \"knock knock\" joke. The possibilities are endless!)')"
|
||||
"AIMessage(content=\"Hello! I'm glad you're here. If you have any questions or need assistance with something related to programming or software development, feel free to ask. I'll do my best to help you out. Have a great day!\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -140,17 +106,9 @@
|
||||
"await chat.ainvoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "86ccef97",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 5,
|
||||
"id": "025be980-e50d-4a68-93dc-c9c7b500ce34",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -160,27 +118,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Who's there?\n",
|
||||
"\n",
|
||||
"(After this, the conversation can continue as a call and response \"who's there\" joke. Here is an example of how it could go:\n",
|
||||
"\n",
|
||||
"You say: Orange.\n",
|
||||
"I say: Orange who?\n",
|
||||
"You say: Orange you glad I didn't say banana!?)\n",
|
||||
"\n",
|
||||
"But since you asked for a knock knock joke specifically, here's one for you:\n",
|
||||
"\n",
|
||||
"Knock knock.\n",
|
||||
"\n",
|
||||
"Me: Who's there?\n",
|
||||
"\n",
|
||||
"You: Lettuce.\n",
|
||||
"\n",
|
||||
"Me: Lettuce who?\n",
|
||||
"\n",
|
||||
"You: Lettuce in, it's too cold out here!\n",
|
||||
"\n",
|
||||
"I hope this brings a smile to your face! Do you have a favorite knock knock joke you'd like to share? I'd love to hear it."
|
||||
"Hello! I'm happy to assist you. Is there a specific question or topic you would like to discuss? I can provide information and answer questions on a wide variety of subjects."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -188,79 +126,6 @@
|
||||
"for chunk in chat.stream(messages):\n",
|
||||
" print(chunk.content, end=\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6189577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Batch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "e63aebcb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\"Who's there? I was just about to ask the same thing! Go ahead and tell me who's there. I love a good knock-knock joke.\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat.batch([messages])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "38e39e71",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/expression_language)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "ee43a1ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
|
||||
"chain = prompt | chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "0dc49212",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Why do bears hate shoes so much? They like to run around in their bear feet.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"topic\": \"bears\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -279,7 +144,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# GitHub\n",
|
||||
"\n",
|
||||
"This notebooks shows how you can load issues and pull requests (PRs) for a given repository on [GitHub](https://github.com/). Also shows how you can load github files for a given repository on [GitHub](https://github.com/). We will use the LangChain Python repository as an example."
|
||||
"This notebooks shows how you can load issues and pull requests (PRs) for a given repository on [GitHub](https://github.com/). We will use the LangChain Python repository as an example."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -46,7 +46,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
@@ -57,7 +57,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -91,7 +91,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -100,9 +100,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"# Creates GitHubLoader (#5257)\r\n",
|
||||
"\r\n",
|
||||
"GitHubLoader is a DocumentLoader that loads issues and PRs from GitHub.\r\n",
|
||||
"\r\n",
|
||||
"Fixes #5257\r\n",
|
||||
"\r\n",
|
||||
"Community members can review the PR once tests pass. Tag maintainers/contributors who might be interested:\r\n",
|
||||
"DataLoaders\r\n",
|
||||
"- @eyurtsev\r\n",
|
||||
"\n",
|
||||
"{'url': 'https://github.com/langchain-ai/langchain/pull/5408', 'title': 'DocumentLoader for GitHub', 'creator': 'UmerHA', 'created_at': '2023-05-29T14:50:53Z', 'comments': 0, 'state': 'open', 'labels': ['enhancement', 'lgtm', 'doc loader'], 'assignee': None, 'milestone': None, 'locked': False, 'number': 5408, 'is_pull_request': True}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)\n",
|
||||
"print(docs[0].metadata)"
|
||||
@@ -124,7 +142,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -139,68 +157,84 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"### System Info\n",
|
||||
"\n",
|
||||
"LangChain version = 0.0.167\r\n",
|
||||
"Python version = 3.11.0\r\n",
|
||||
"System = Windows 11 (using Jupyter)\n",
|
||||
"\n",
|
||||
"### Who can help?\n",
|
||||
"\n",
|
||||
"- @hwchase17\r\n",
|
||||
"- @agola11\r\n",
|
||||
"- @UmerHA (I have a fix ready, will submit a PR)\n",
|
||||
"\n",
|
||||
"### Information\n",
|
||||
"\n",
|
||||
"- [ ] The official example notebooks/scripts\n",
|
||||
"- [X] My own modified scripts\n",
|
||||
"\n",
|
||||
"### Related Components\n",
|
||||
"\n",
|
||||
"- [X] LLMs/Chat Models\n",
|
||||
"- [ ] Embedding Models\n",
|
||||
"- [X] Prompts / Prompt Templates / Prompt Selectors\n",
|
||||
"- [ ] Output Parsers\n",
|
||||
"- [ ] Document Loaders\n",
|
||||
"- [ ] Vector Stores / Retrievers\n",
|
||||
"- [ ] Memory\n",
|
||||
"- [ ] Agents / Agent Executors\n",
|
||||
"- [ ] Tools / Toolkits\n",
|
||||
"- [ ] Chains\n",
|
||||
"- [ ] Callbacks/Tracing\n",
|
||||
"- [ ] Async\n",
|
||||
"\n",
|
||||
"### Reproduction\n",
|
||||
"\n",
|
||||
"```\r\n",
|
||||
"import os\r\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"...\"\r\n",
|
||||
"\r\n",
|
||||
"from langchain.chains import LLMChain\r\n",
|
||||
"from langchain_openai import ChatOpenAI\r\n",
|
||||
"from langchain.prompts import PromptTemplate\r\n",
|
||||
"from langchain.prompts.chat import ChatPromptTemplate\r\n",
|
||||
"from langchain.schema import messages_from_dict\r\n",
|
||||
"\r\n",
|
||||
"role_strings = [\r\n",
|
||||
" (\"system\", \"you are a bird expert\"), \r\n",
|
||||
" (\"human\", \"which bird has a point beak?\")\r\n",
|
||||
"]\r\n",
|
||||
"prompt = ChatPromptTemplate.from_role_strings(role_strings)\r\n",
|
||||
"chain = LLMChain(llm=ChatOpenAI(), prompt=prompt)\r\n",
|
||||
"chain.run({})\r\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### Expected behavior\n",
|
||||
"\n",
|
||||
"Chain should run\n",
|
||||
"{'url': 'https://github.com/langchain-ai/langchain/issues/5027', 'title': \"ChatOpenAI models don't work with prompts created via ChatPromptTemplate.from_role_strings\", 'creator': 'UmerHA', 'created_at': '2023-05-20T10:39:18Z', 'comments': 1, 'state': 'open', 'labels': [], 'assignee': None, 'milestone': None, 'locked': False, 'number': 5027, 'is_pull_request': False}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)\n",
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Github File Content\n",
|
||||
"\n",
|
||||
"For below code, loads all markdown file in rpeo `langchain-ai/langchain`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import GithubFileLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GithubFileLoader(\n",
|
||||
" repo=\"langchain-ai/langchain\", # the repo name\n",
|
||||
" access_token=ACCESS_TOKEN,\n",
|
||||
" github_api_url=\"https://api.github.com\",\n",
|
||||
" file_filter=lambda file_path: file_path.endswith(\n",
|
||||
" \".md\"\n",
|
||||
" ), # load all markdowns files.\n",
|
||||
")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"example output of one of document: \n",
|
||||
"\n",
|
||||
"```json\n",
|
||||
"documents.metadata: \n",
|
||||
" {\n",
|
||||
" \"path\": \"README.md\",\n",
|
||||
" \"sha\": \"82f1c4ea88ecf8d2dfsfx06a700e84be4\",\n",
|
||||
" \"source\": \"https://github.com/langchain-ai/langchain/blob/master/README.md\"\n",
|
||||
" }\n",
|
||||
"documents.content:\n",
|
||||
" mock content\n",
|
||||
"```"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -219,7 +253,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -27,17 +27,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 5,
|
||||
"id": "0cb0f937-b610-42a2-b765-336eed037031",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"········\n"
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -51,20 +51,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 6,
|
||||
"id": "6fb585dd",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms import AlephAlpha"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 7,
|
||||
"id": "f81a230d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -75,12 +76,12 @@
|
||||
"\n",
|
||||
"A:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 8,
|
||||
"id": "f0d26e48",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -97,19 +98,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 9,
|
||||
"id": "6811d621",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = prompt | llm"
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 10,
|
||||
"id": "3058e63f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -118,10 +119,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Artificial Intelligence is the simulation of human intelligence processes by machines.\\n\\n'"
|
||||
"' Artificial Intelligence (AI) is the simulation of human intelligence processes by machines, especially computer systems.\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -129,16 +130,8 @@
|
||||
"source": [
|
||||
"question = \"What is AI?\"\n",
|
||||
"\n",
|
||||
"llm_chain.invoke({\"question\": question})"
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a3544eff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -157,7 +150,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.12"
|
||||
"version": "3.10.6"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -90,7 +90,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = prompt | llm"
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -104,7 +104,7 @@
|
||||
"source": [
|
||||
"question = \"When was George Washington president?\"\n",
|
||||
"\n",
|
||||
"llm_chain.invoke({\"question\": question})"
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -151,7 +151,7 @@
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Baichuan LLM\n",
|
||||
"Baichuan Inc. (https://www.baichuan-ai.com/) is a Chinese startup in the era of AGI, dedicated to addressing fundamental human needs: Efficiency, Health, and Happiness."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisite\n",
|
||||
"An API key is required to access Baichuan LLM API. Visit https://platform.baichuan-ai.com/ to get your API key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use Baichuan LLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"BAICHUAN_API_KEY\"] = \"YOUR_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import BaichuanLLM\n",
|
||||
"\n",
|
||||
"# Load the model\n",
|
||||
"llm = BaichuanLLM()\n",
|
||||
"\n",
|
||||
"res = llm(\"What's your name?\")\n",
|
||||
"print(res)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = llm.generate(prompts=[\"你好!\"])\n",
|
||||
"res"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for res in llm.stream(\"Who won the second world war?\"):\n",
|
||||
" print(res)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def run_aio_stream():\n",
|
||||
" async for res in llm.astream(\"Write a poem about the sun.\"):\n",
|
||||
" print(res)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"asyncio.run(run_aio_stream())"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -66,7 +66,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -107,43 +107,12 @@
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Custom models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"custom_llm = Bedrock(\n",
|
||||
" credentials_profile_name=\"bedrock-admin\",\n",
|
||||
" provider=\"cohere\",\n",
|
||||
" model_id=\"<Custom model ARN>\", # ARN like 'arn:aws:bedrock:...' obtained via provisioning the custom model\n",
|
||||
" model_kwargs={\"temperature\": 1},\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=custom_llm, verbose=True, memory=ConversationBufferMemory()\n",
|
||||
")\n",
|
||||
"conversation.predict(input=\"What is the recipe of mayonnaise?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Guardrails for Amazon Bedrock example \n",
|
||||
"\n",
|
||||
"## Guardrails for Amazon Bedrock (Preview) \n",
|
||||
"[Guardrails for Amazon Bedrock](https://aws.amazon.com/bedrock/guardrails/) evaluates user inputs and model responses based on use case specific policies, and provides an additional layer of safeguards regardless of the underlying model. Guardrails can be applied across models, including Anthropic Claude, Meta Llama 2, Cohere Command, AI21 Labs Jurassic, and Amazon Titan Text, as well as fine-tuned models.\n",
|
||||
"**Note**: Guardrails for Amazon Bedrock is currently in preview and not generally available. Reach out through your usual AWS Support contacts if you’d like access to this feature.\n",
|
||||
"In this section, we are going to set up a Bedrock language model with specific guardrails that include tracing capabilities. "
|
||||
]
|
||||
},
|
||||
@@ -167,7 +136,7 @@
|
||||
" print(f\"Guardrails: {kwargs}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Guardrails for Amazon Bedrock with trace\n",
|
||||
"# guardrails for Amazon Bedrock with trace\n",
|
||||
"llm = Bedrock(\n",
|
||||
" credentials_profile_name=\"bedrock-admin\",\n",
|
||||
" model_id=\"<Model_ID>\",\n",
|
||||
@@ -194,7 +163,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"# System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model\n",
|
||||
"llm = NIBittensorLLM(\n",
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -11,102 +11,7 @@
|
||||
"\n",
|
||||
"[ChatGLM2-6B](https://github.com/THUDM/ChatGLM2-6B) is the second-generation version of the open-source bilingual (Chinese-English) chat model ChatGLM-6B. It retains the smooth conversation flow and low deployment threshold of the first-generation model, while introducing the new features like better performance, longer context and more efficient inference.\n",
|
||||
"\n",
|
||||
"[ChatGLM3](https://github.com/THUDM/ChatGLM3) is a new generation of pre-trained dialogue models jointly released by Zhipu AI and Tsinghua KEG. ChatGLM3-6B is the open-source model in the ChatGLM3 series"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install required dependencies\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ChatGLM3\n",
|
||||
"\n",
|
||||
"This examples goes over how to use LangChain to interact with ChatGLM3-6B Inference for text completion."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.schema.messages import AIMessage\n",
|
||||
"from langchain_community.llms.chatglm3 import ChatGLM3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"{question}\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"endpoint_url = \"http://127.0.0.1:8000/v1/chat/completions\"\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" AIMessage(content=\"我将从美国到中国来旅游,出行前希望了解中国的城市\"),\n",
|
||||
" AIMessage(content=\"欢迎问我任何问题。\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"llm = ChatGLM3(\n",
|
||||
" endpoint_url=endpoint_url,\n",
|
||||
" max_tokens=80000,\n",
|
||||
" prefix_messages=messages,\n",
|
||||
" top_p=0.9,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'北京和上海是中国两个不同的城市,它们在很多方面都有所不同。\\n\\n北京是中国的首都,也是历史悠久的城市之一。它有着丰富的历史文化遗产,如故宫、颐和园等,这些景点吸引着众多游客前来观光。北京也是一个政治、文化和教育中心,有很多政府机构和学术机构总部设在北京。\\n\\n上海则是一个现代化的城市,它是中国的经济中心之一。上海拥有许多高楼大厦和国际化的金融机构,是中国最国际化的城市之一。上海也是一个美食和购物天堂,有许多著名的餐厅和购物中心。\\n\\n北京和上海的气候也不同。北京属于温带大陆性气候,冬季寒冷干燥,夏季炎热多风;而上海属于亚热带季风气候,四季分明,春秋宜人。\\n\\n北京和上海有很多不同之处,但都是中国非常重要的城市,每个城市都有自己独特的魅力和特色。'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"question = \"北京和上海两座城市有什么不同?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ChatGLM and ChatGLM2\n",
|
||||
"\n",
|
||||
"The following example shows how to use LangChain to interact with the ChatGLM2-6B Inference to complete text.\n",
|
||||
"This example goes over how to use LangChain to interact with ChatGLM2-6B Inference for text completion.\n",
|
||||
"ChatGLM-6B and ChatGLM2-6B has the same api specs, so this example should work with both."
|
||||
]
|
||||
},
|
||||
@@ -130,7 +35,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"{question}\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -201,7 +106,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "langchain-dev",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -215,9 +120,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"\n",
|
||||
"AI Assistant: \"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -109,7 +109,7 @@
|
||||
"\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
|
||||
@@ -201,7 +201,7 @@
|
||||
"template = \"\"\"{question}\n",
|
||||
"\n",
|
||||
"Let's think step by step. \"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
|
||||
@@ -146,7 +146,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -80,7 +80,7 @@
|
||||
"\n",
|
||||
"template = \"What is capital of {country}?\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
|
||||
@@ -111,7 +111,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -73,7 +73,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -175,7 +175,7 @@
|
||||
"\n",
|
||||
"Answer: \"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -118,7 +118,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,27 +1,20 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Konko\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "136d9ba6-c42a-435b-9e19-77ebcc7a3145",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Konko\n",
|
||||
"# ChatKonko\n",
|
||||
"\n",
|
||||
">[Konko](https://www.konko.ai/) API is a fully managed Web API designed to help application developers:\n",
|
||||
"\n",
|
||||
"1. **Select** the right open source or proprietary LLMs for their application\n",
|
||||
"2. **Build** applications faster with integrations to leading application frameworks and fully managed APIs\n",
|
||||
"3. **Fine tune** smaller open-source LLMs to achieve industry-leading performance at a fraction of the cost\n",
|
||||
"4. **Deploy production-scale APIs** that meet security, privacy, throughput, and latency SLAs without infrastructure set-up or administration using Konko AI's SOC 2 compliant, multi-cloud infrastructure\n"
|
||||
"Konko API is a fully managed API designed to help application developers:\n",
|
||||
"\n",
|
||||
"1. Select the right LLM(s) for their application\n",
|
||||
"2. Prototype with various open-source and proprietary LLMs\n",
|
||||
"3. Access Fine Tuning for open-source LLMs to get industry-leading performance at a fraction of the cost\n",
|
||||
"4. Setup low-cost production APIs according to security, privacy, throughput, latency SLAs without infrastructure set-up or administration using Konko AI's SOC 2 compliant, multi-cloud infrastructure\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,44 +22,25 @@
|
||||
"id": "0d896d07-82b4-4f38-8c37-f0bc8b0e4fe1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Steps to Access Models\n",
|
||||
"1. **Explore Available Models:** Start by browsing through the [available models](https://docs.konko.ai/docs/list-of-models) on Konko. Each model caters to different use cases and capabilities.\n",
|
||||
"\n",
|
||||
"2. **Identify Suitable Endpoints:** Determine which [endpoint](https://docs.konko.ai/docs/list-of-models#list-of-available-models) (ChatCompletion or Completion) supports your selected model.\n",
|
||||
"\n",
|
||||
"3. **Selecting a Model:** [Choose a model](https://docs.konko.ai/docs/list-of-models#list-of-available-models) based on its metadata and how well it fits your use case.\n",
|
||||
"\n",
|
||||
"4. **Prompting Guidelines:** Once a model is selected, refer to the [prompting guidelines](https://docs.konko.ai/docs/prompting) to effectively communicate with it.\n",
|
||||
"\n",
|
||||
"5. **Using the API:** Finally, use the appropriate Konko [API endpoint](https://docs.konko.ai/docs/quickstart-for-completion-and-chat-completion-endpoint) to call the model and receive responses.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `Konko` completion [models](https://docs.konko.ai/docs/list-of-models#konko-hosted-models-for-completion)\n",
|
||||
"\n",
|
||||
"To run this notebook, you'll need Konko API key. Sign in to our web app to [create an API key](https://platform.konko.ai/settings/api-keys) to access models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Set Environment Variables\n",
|
||||
"\n",
|
||||
"1. You can set environment variables for \n",
|
||||
" 1. KONKO_API_KEY (Required)\n",
|
||||
" 2. OPENAI_API_KEY (Optional)\n",
|
||||
"2. In your current shell session, use the export command:\n",
|
||||
"\n",
|
||||
"```shell\n",
|
||||
"export KONKO_API_KEY={your_KONKO_API_KEY_here}\n",
|
||||
"export OPENAI_API_KEY={your_OPENAI_API_KEY_here} #Optional\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Calling a model\n",
|
||||
"\n",
|
||||
"Find a model on the [Konko overview page](https://docs.konko.ai/docs/list-of-models)\n",
|
||||
"\n",
|
||||
"Another way to find the list of models running on the Konko instance is through this [endpoint](https://docs.konko.ai/reference/get-models).\n",
|
||||
"\n",
|
||||
"From here, we can initialize our model:"
|
||||
"To run this notebook, you'll need Konko API key. You can create one by signing up on [Konko](https://www.konko.ai/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "dd70bccb-7a65-42d0-a3f2-8116f3549da7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
||||
@@ -234,7 +234,7 @@
|
||||
"\n",
|
||||
"Answer: Let's work this out in a step by step way to be sure we have the right answer.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"CONCISE SUMMARY:\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(_prompt)\n",
|
||||
"prompt = PromptTemplate(template=_prompt, input_variables=[\"text\"])\n",
|
||||
"\n",
|
||||
"text_splitter = CharacterTextSplitter()\n",
|
||||
"\n",
|
||||
|
||||
@@ -113,7 +113,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -122,7 +122,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -26,19 +26,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OCTOAI_API_TOKEN\"] = \"OCTOAI_API_TOKEN\"\n",
|
||||
"os.environ[\"ENDPOINT_URL\"] = \"https://text.octoai.run/v1/chat/completions\""
|
||||
"os.environ[\"ENDPOINT_URL\"] = \"https://mpt-7b-demo-f1kzsig6xes9.octoai.run/generate\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -56,50 +56,46 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\\n Instruction:\\n{question}\\n Response: \"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OctoAIEndpoint(\n",
|
||||
" model_kwargs={\n",
|
||||
" \"model\": \"llama-2-13b-chat-fp16\",\n",
|
||||
" \"max_tokens\": 128,\n",
|
||||
" \"presence_penalty\": 0,\n",
|
||||
" \"temperature\": 0.1,\n",
|
||||
" \"top_p\": 0.9,\n",
|
||||
" \"messages\": [\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": \"You are a helpful assistant. Keep your responses limited to one short paragraph if possible.\",\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" \"max_new_tokens\": 200,\n",
|
||||
" \"temperature\": 0.75,\n",
|
||||
" \"top_p\": 0.95,\n",
|
||||
" \"repetition_penalty\": 1,\n",
|
||||
" \"seed\": None,\n",
|
||||
" \"stop\": [],\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Sure thing! Here's my response:\n",
|
||||
"\n",
|
||||
"Leonardo da Vinci was a true Renaissance man - an Italian polymath who excelled in various fields, including painting, sculpture, engineering, mathematics, anatomy, and geology. He is widely considered one of the greatest painters of all time, and his inventive and innovative works continue to inspire and influence artists and thinkers to this day. Some of his most famous works include the Mona Lisa, The Last Supper, and Vitruvian Man. \n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nLeonardo da Vinci was an Italian polymath and painter regarded by many as one of the greatest painters of all time. He is best known for his masterpieces including Mona Lisa, The Last Supper, and The Virgin of the Rocks. He was a draftsman, sculptor, architect, and one of the most important figures in the history of science. Da Vinci flew gliders, experimented with water turbines and windmills, and invented the catapult and a joystick-type human-powered aircraft control. He may have pioneered helicopters. As a scholar, he was interested in anatomy, geology, botany, engineering, mathematics, and astronomy.\\nOther painters and patrons claimed to be more talented, but Leonardo da Vinci was an incredibly productive artist, sculptor, engineer, anatomist, and scientist.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@@ -107,7 +103,7 @@
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
"print(llm_chain.run(question))"
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -127,7 +123,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -244,7 +244,7 @@
|
||||
"\n",
|
||||
"def plt_img_base64(img_base64):\n",
|
||||
" \"\"\"\n",
|
||||
" Display base64 encoded string as image\n",
|
||||
" Disply base64 encoded string as image\n",
|
||||
"\n",
|
||||
" :param img_base64: Base64 string\n",
|
||||
" \"\"\"\n",
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -119,7 +119,7 @@
|
||||
"\n",
|
||||
"template = \"What is a good name for a company that makes {product}?\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"product\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"for model in [\"text-davinci-003\", \"huggingface.co/gpt2\"]:\n",
|
||||
" llm = OpenLM(model=model)\n",
|
||||
|
||||
@@ -4,9 +4,8 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Alibaba Cloud PAI EAS\n",
|
||||
"\n",
|
||||
">[Machine Learning Platform for AI of Alibaba Cloud](https://www.alibabacloud.com/help/en/pai) is a machine learning or deep learning engineering platform intended for enterprises and developers. It provides easy-to-use, cost-effective, high-performance, and easy-to-scale plug-ins that can be applied to various industry scenarios. With over 140 built-in optimization algorithms, `Machine Learning Platform for AI` provides whole-process AI engineering capabilities including data labeling (`PAI-iTAG`), model building (`PAI-Designer` and `PAI-DSW`), model training (`PAI-DLC`), compilation optimization, and inference deployment (`PAI-EAS`). `PAI-EAS` supports different types of hardware resources, including CPUs and GPUs, and features high throughput and low latency. It allows you to deploy large-scale complex models with a few clicks and perform elastic scale-ins and scale-outs in real time. It also provides a comprehensive O&M and monitoring system."
|
||||
"# AliCloud PAI EAS\n",
|
||||
"Machine Learning Platform for AI of Alibaba Cloud is a machine learning or deep learning engineering platform intended for enterprises and developers. It provides easy-to-use, cost-effective, high-performance, and easy-to-scale plug-ins that can be applied to various industry scenarios. With over 140 built-in optimization algorithms, Machine Learning Platform for AI provides whole-process AI engineering capabilities including data labeling (PAI-iTAG), model building (PAI-Designer and PAI-DSW), model training (PAI-DLC), compilation optimization, and inference deployment (PAI-EAS). PAI-EAS supports different types of hardware resources, including CPUs and GPUs, and features high throughput and low latency. It allows you to deploy large-scale complex models with a few clicks and perform elastic scale-ins and scale-outs in real time. It also provides a comprehensive O&M and monitoring system."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -23,14 +22,14 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"One who wants to use EAS LLMs must set up EAS service first. When the EAS service is launched, `EAS_SERVICE_URL` and `EAS_SERVICE_TOKEN` can be obtained. Users can refer to https://www.alibabacloud.com/help/en/pai/user-guide/service-deployment/ for more information,"
|
||||
"One who want to use eas llms must set up eas service first. When the eas service is launched, eas_service_rul and eas_service token can be got. Users can refer to https://www.alibabacloud.com/help/en/pai/user-guide/service-deployment/ for more information,"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -51,7 +50,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -66,16 +65,16 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_chain = prompt | llm\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"llm_chain.invoke({\"question\": question})"
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -89,9 +88,10 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -133,7 +133,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -107,7 +107,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -118,7 +118,7 @@
|
||||
"Query: {query}\n",
|
||||
"\n",
|
||||
"Result: \"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"query\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -191,7 +191,7 @@
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)\n",
|
||||
"\n",
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
@@ -209,7 +209,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Write a {adjective} poem about {subject}.\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"adjective\", \"subject\"])\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)\n",
|
||||
"\n",
|
||||
"llm_chain.predict(adjective=\"sad\", subject=\"ducks\")"
|
||||
|
||||
@@ -83,7 +83,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -53,7 +53,7 @@
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"llm = TextGen(model_url=model_url)\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
|
||||
@@ -104,7 +104,7 @@
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"llm = TextGen(\n",
|
||||
" model_url=model_url, streaming=True, callbacks=[StreamingStdOutCallbackHandler()]\n",
|
||||
")\n",
|
||||
|
||||
@@ -146,7 +146,7 @@
|
||||
"\n",
|
||||
"template = \"What is the capital of {country}\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"\n",
|
||||
|
||||
@@ -95,7 +95,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
" temperature=0.8,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(llm.invoke(\"What is the capital of France ?\"))"
|
||||
"print(llm(\"What is the capital of France ?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -117,7 +117,8 @@
|
||||
"1. The first Pokemon game was released in 1996.\n",
|
||||
"2. The president was Bill Clinton.\n",
|
||||
"3. Clinton was president from 1993 to 2001.\n",
|
||||
"4. The answer is Clinton.\n"
|
||||
"4. The answer is Clinton.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -135,13 +136,13 @@
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
"question = \"Who was the US president in the year the first Pokemon game was released?\"\n",
|
||||
"\n",
|
||||
"print(llm_chain.invoke(question))"
|
||||
"print(llm_chain.run(question))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -171,36 +172,7 @@
|
||||
" trust_remote_code=True, # mandatory for hf models\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm.invoke(\"What is the future of AI?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6ca8fd911d25faa",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Quantization\n",
|
||||
"\n",
|
||||
"vLLM supports `awq` quantization. To enable it, pass `quantization` to `vllm_kwargs`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2cada3174c46a0ea",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_q = VLLM(\n",
|
||||
" model=\"TheBloke/Llama-2-7b-Chat-AWQ\",\n",
|
||||
" trust_remote_code=True,\n",
|
||||
" max_new_tokens=512,\n",
|
||||
" vllm_kwargs={\"quantization\": \"awq\"},\n",
|
||||
")"
|
||||
"llm(\"What is the future of AI?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -244,7 +216,7 @@
|
||||
" model_name=\"tiiuae/falcon-7b\",\n",
|
||||
" model_kwargs={\"stop\": [\".\"]},\n",
|
||||
")\n",
|
||||
"print(llm.invoke(\"Rome is\"))"
|
||||
"print(llm(\"Rome is\"))"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -7,9 +7,8 @@
|
||||
"source": [
|
||||
"# IBM watsonx.ai\n",
|
||||
"\n",
|
||||
">[WatsonxLLM](https://ibm.github.io/watsonx-ai-python-sdk/fm_extensions.html#langchain) is a wrapper for IBM [watsonx.ai](https://www.ibm.com/products/watsonx-ai) foundation models.\n",
|
||||
"\n",
|
||||
"This example shows how to communicate with `watsonx.ai` models using `LangChain`."
|
||||
"[WatsonxLLM](https://ibm.github.io/watsonx-ai-python-sdk/fm_extensions.html#langchain) is a wrapper for IBM [watsonx.ai](https://www.ibm.com/products/watsonx-ai) foundation models.\n",
|
||||
"This example shows how to communicate with watsonx.ai models using LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -17,8 +16,6 @@
|
||||
"id": "ea35b2b7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up\n",
|
||||
"\n",
|
||||
"Install the package [`ibm-watsonx-ai`](https://ibm.github.io/watsonx-ai-python-sdk/install.html)."
|
||||
]
|
||||
},
|
||||
@@ -63,7 +60,6 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the model\n",
|
||||
"\n",
|
||||
"You might need to adjust model `parameters` for different models or tasks. For details, refer to [documentation](https://ibm.github.io/watsonx-ai-python-sdk/fm_model.html#metanames.GenTextParamsMetaNames)."
|
||||
]
|
||||
},
|
||||
@@ -332,7 +328,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
@@ -72,7 +72,7 @@
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -126,7 +126,7 @@
|
||||
"\n",
|
||||
"template = \"Where can we visit in the capital of {country}?\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"What is the capital of {country}?\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"country\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -207,11 +207,15 @@ from langchain_community.vectorstores import MatchingEngine
|
||||
> [Google BigQuery](https://cloud.google.com/bigquery),
|
||||
> BigQuery is a serverless and cost-effective enterprise data warehouse in Google Cloud.
|
||||
>
|
||||
> [Google BigQuery Vector Search](https://cloud.google.com/bigquery/docs/vector-search-intro)
|
||||
> Google BigQuery Vector Search
|
||||
> BigQuery vector search lets you use GoogleSQL to do semantic search, using vector indexes for fast but approximate results, or using brute force for exact results.
|
||||
|
||||
> It can calculate Euclidean or Cosine distance. With LangChain, we default to use Euclidean distance.
|
||||
|
||||
> This is a private preview (experimental) feature. Please submit this
|
||||
> [enrollment form](https://docs.google.com/forms/d/18yndSb4dTf2H0orqA9N7NAchQEDQekwWiD5jYfEkGWk/viewform?edit_requested=true)
|
||||
> if you want to enroll BigQuery Vector Search Experimental.
|
||||
|
||||
We need to install several python packages.
|
||||
|
||||
```bash
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
sidebar_position: 0
|
||||
sidebar_class_name: hidden
|
||||
---
|
||||
|
||||
# Providers
|
||||
|
||||
LangChain integrates with many providers
|
||||
|
||||
## Partner Packages
|
||||
|
||||
- [OpenAI](/docs/integrations/platforms/openai)
|
||||
- [Anthropic](/docs/integrations/platforms/anthropic)
|
||||
- [Google](/docs/integrations/platforms/google)
|
||||
- [MistralAI](/docs/integrations/providers/mistralai)
|
||||
- [NVIDIA AI](/docs/integrations/providers/nvidia)
|
||||
- [Together AI](/docs/integrations/providers/together)
|
||||
- [Robocorp](/docs/integrations/providers/robocorp)
|
||||
- [Exa Search](/docs/integrations/providers/exa_search)
|
||||
- [Nomic](/docs/integrations/providers/nomic)
|
||||
|
||||
|
||||
## Featured Community Providers
|
||||
|
||||
- [AWS](/docs/integrations/platforms/aws)
|
||||
- [Hugging Face](/docs/integrations/platforms/huggingface)
|
||||
- [Microsoft](/docs/integrations/platforms/microsoft)
|
||||
@@ -6,11 +6,8 @@
|
||||
Visit us at https://www.baichuan-ai.com/.
|
||||
Register and get an API key if you are trying out our APIs.
|
||||
|
||||
## Baichuan LLM Endpoint
|
||||
An example is available at [example](/docs/integrations/llms/baichuan)
|
||||
|
||||
## Baichuan Chat Model
|
||||
An example is available at [example](/docs/integrations/chat/baichuan).
|
||||
|
||||
## Baichuan Text Embedding Model
|
||||
An example is available at [example](/docs/integrations/text_embedding/baichuan)
|
||||
An example is available at [example] (/docs/integrations/text_embedding/baichuan)
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# BREEBS (Open Knowledge)
|
||||
|
||||
[BREEBS](https://www.breebs.com/) is an open collaborative knowledge platform.
|
||||
Anybody can create a Breeb, a knowledge capsule based on PDFs stored on a Google Drive folder.
|
||||
A breeb can be used by any LLM/chatbot to improve its expertise, reduce hallucinations and give access to sources.
|
||||
Behind the scenes, Breebs implements several Retrieval Augmented Generation (RAG) models to seamlessly provide useful context at each iteration.
|
||||
|
||||
## List of available Breebs
|
||||
|
||||
To get the full list of Breebs, including their key (breeb_key) and description :
|
||||
https://breebs.promptbreeders.com/web/listbreebs.
|
||||
Dozens of Breebs have already been created by the community and are freely available for use. They cover a wide range of expertise, from organic chemistry to mythology, as well as tips on seduction and decentralized finance.
|
||||
|
||||
## Creating a new Breeb
|
||||
|
||||
To generate a new Breeb, simply compile PDF files in a publicly shared Google Drive folder and initiate the creation process on the [BREEBS website](https://www.breebs.com/) by clicking the "Create Breeb" button. You can currently include up to 120 files, with a total character limit of 15 million.
|
||||
|
||||
## Retriever
|
||||
```python
|
||||
from langchain.retrievers import BreebsRetriever
|
||||
```
|
||||
|
||||
# Example
|
||||
[See usage example (Retrieval & ConversationalRetrievalChain)](https://python.langchain.com/docs/integrations/retrievers/breebs)
|
||||
@@ -185,7 +185,7 @@
|
||||
" \n",
|
||||
"- **Output:** An engaging tweet that correctly answers the question from the retrieved info.\n",
|
||||
" \n",
|
||||
"Let's use LangChain's expression language (LCEL) to illustrate this. Any prompt here will do, we will optimize the final prompt with DSPy.\n",
|
||||
"Let's use LangChain's expression langugage (LCEL) to illustrate this. Any prompt here will do, we will optimize the final prompt with DSPy.\n",
|
||||
"\n",
|
||||
"Considering that, let's just keep it to the barebones: **Given {context}, answer the question {question} as a tweet.**"
|
||||
]
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Exa Search\n",
|
||||
"\n",
|
||||
"Exa's search integration exists in its own [partner package](https://pypi.org/project/langchain-exa/). You can install it with:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-exa"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order to use the package, you will also need to set the `EXA_API_KEY` environment variable to your Exa API key.\n",
|
||||
"\n",
|
||||
"## Retriever\n",
|
||||
"\n",
|
||||
"You can use the [`ExaSearchRetriever`](/docs/integrations/tools/exa_search#using-exasearchretriever) in a standard retrieval pipeline. You can import it as follows"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "y8ku6X96sebl"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_exa import ExaSearchRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"You can use Exa as an agent tool as described in the [Exa tool calling docs](/docs/integrations/tools/exa_search#using-the-exa-sdk-as-langchain-agent-tools).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,45 +1,81 @@
|
||||
# Konko
|
||||
All functionality related to Konko
|
||||
This page covers how to run models on Konko within LangChain.
|
||||
|
||||
>[Konko AI](https://www.konko.ai/) provides a fully managed API to help application developers
|
||||
Konko API is a fully managed API designed to help application developers:
|
||||
|
||||
>1. **Select** the right open source or proprietary LLMs for their application
|
||||
>2. **Build** applications faster with integrations to leading application frameworks and fully managed APIs
|
||||
>3. **Fine tune** smaller open-source LLMs to achieve industry-leading performance at a fraction of the cost
|
||||
>4. **Deploy production-scale APIs** that meet security, privacy, throughput, and latency SLAs without infrastructure set-up or administration using Konko AI's SOC 2 compliant, multi-cloud infrastructure
|
||||
Select the right LLM(s) for their application
|
||||
Prototype with various open-source and proprietary LLMs
|
||||
Move to production in-line with their security, privacy, throughput, latency SLAs without infrastructure set-up or administration using Konko AI's SOC 2 compliant infrastructure
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
1. Sign in to our web app to [create an API key](https://platform.konko.ai/settings/api-keys) to access models via our endpoints for [chat completions](https://docs.konko.ai/reference/post-chat-completions) and [completions](https://docs.konko.ai/reference/post-completions).
|
||||
2. Enable a Python3.8+ environment
|
||||
3. Install the SDK
|
||||
### First you'll need an API key
|
||||
You can request it by messaging [support@konko.ai](mailto:support@konko.ai)
|
||||
|
||||
```bash
|
||||
pip install konko
|
||||
```
|
||||
### Install Konko AI's Python SDK
|
||||
|
||||
4. Set API Keys as environment variables(`KONKO_API_KEY`,`OPENAI_API_KEY`)
|
||||
#### 1. Enable a Python3.8+ environment
|
||||
|
||||
```bash
|
||||
#### 2. Set API Keys
|
||||
|
||||
##### Option 1: Set Environment Variables
|
||||
|
||||
1. You can set environment variables for
|
||||
1. KONKO_API_KEY (Required)
|
||||
2. OPENAI_API_KEY (Optional)
|
||||
|
||||
2. In your current shell session, use the export command:
|
||||
|
||||
```shell
|
||||
export KONKO_API_KEY={your_KONKO_API_KEY_here}
|
||||
export OPENAI_API_KEY={your_OPENAI_API_KEY_here} #Optional
|
||||
```
|
||||
|
||||
Please see [the Konko docs](https://docs.konko.ai/docs/getting-started) for more details.
|
||||
Alternatively, you can add the above lines directly to your shell startup script (such as .bashrc or .bash_profile for Bash shell and .zshrc for Zsh shell) to have them set automatically every time a new shell session starts.
|
||||
|
||||
##### Option 2: Set API Keys Programmatically
|
||||
|
||||
If you prefer to set your API keys directly within your Python script or Jupyter notebook, you can use the following commands:
|
||||
|
||||
```python
|
||||
konko.set_api_key('your_KONKO_API_KEY_here')
|
||||
konko.set_openai_api_key('your_OPENAI_API_KEY_here') # Optional
|
||||
```
|
||||
|
||||
#### 3. Install the SDK
|
||||
|
||||
|
||||
## LLM
|
||||
```shell
|
||||
pip install konko
|
||||
```
|
||||
|
||||
**Explore Available Models:** Start by browsing through the [available models](https://docs.konko.ai/docs/list-of-models) on Konko. Each model caters to different use cases and capabilities.
|
||||
#### 4. Verify Installation & Authentication
|
||||
|
||||
Another way to find the list of models running on the Konko instance is through this [endpoint](https://docs.konko.ai/reference/get-models).
|
||||
```python
|
||||
#Confirm konko has installed successfully
|
||||
import konko
|
||||
#Confirm API keys from Konko and OpenAI are set properly
|
||||
konko.Model.list()
|
||||
```
|
||||
|
||||
See a usage [example](/docs/integrations/llms/konko).
|
||||
## Calling a model
|
||||
|
||||
### Examples of Endpoint Usage
|
||||
Find a model on the [Konko Introduction page](https://docs.konko.ai/docs/list-of-models)
|
||||
|
||||
Another way to find the list of models running on the Konko instance is through this [endpoint](https://docs.konko.ai/reference/listmodels).
|
||||
|
||||
## Examples of Endpoint Usage
|
||||
|
||||
|
||||
- **ChatCompletion with Mistral-7B:**
|
||||
```python
|
||||
chat_instance = ChatKonko(max_tokens=10, model = 'mistralai/mistral-7b-instruct-v0.1')
|
||||
msg = HumanMessage(content="Hi")
|
||||
chat_response = chat_instance([msg])
|
||||
|
||||
```
|
||||
|
||||
- **Completion with mistralai/Mistral-7B-v0.1:**
|
||||
|
||||
```python
|
||||
from langchain.llms import Konko
|
||||
llm = Konko(max_tokens=800, model='mistralai/Mistral-7B-v0.1')
|
||||
@@ -47,19 +83,4 @@ See a usage [example](/docs/integrations/llms/konko).
|
||||
response = llm(prompt)
|
||||
```
|
||||
|
||||
## Chat Models
|
||||
|
||||
See a usage [example](/docs/integrations/chat/konko).
|
||||
|
||||
|
||||
- **ChatCompletion with Mistral-7B:**
|
||||
|
||||
```python
|
||||
from langchain.schema import HumanMessage
|
||||
from langchain_community.chat_models import ChatKonko
|
||||
chat_instance = ChatKonko(max_tokens=10, model = 'mistralai/mistral-7b-instruct-v0.1')
|
||||
msg = HumanMessage(content="Hi")
|
||||
chat_response = chat_instance([msg])
|
||||
```
|
||||
|
||||
For further assistance, contact [support@konko.ai](mailto:support@konko.ai) or join our [Discord](https://discord.gg/TXV2s3z7RZ).
|
||||
@@ -1,78 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MistralAI\n",
|
||||
"\n",
|
||||
"Mistral AI is a platform that offers hosting for their powerful open source models.\n",
|
||||
"\n",
|
||||
"You can access them via their [API](https://docs.mistral.ai/api/).\n",
|
||||
"\n",
|
||||
"A valid [API key](https://console.mistral.ai/users/api-keys/) is needed to communicate with the API.\n",
|
||||
"\n",
|
||||
"You will also need the `langchain-mistralai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-core langchain-mistralai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "y8ku6X96sebl"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_mistralai import ChatMistralAI, MistralAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See the docs for their\n",
|
||||
"\n",
|
||||
"- [Chat Model](/docs/integrations/chat/mistralai)\n",
|
||||
"- [Embeddings Model](/docs/integrations/text_embedding/mistralai)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Nomic\n",
|
||||
"\n",
|
||||
"Nomic currently offers two products:\n",
|
||||
"\n",
|
||||
"- Atlas: their Visual Data Engine\n",
|
||||
"- GPT4All: their Open Source Edge Language Model Ecosystem\n",
|
||||
"\n",
|
||||
"The Nomic integration exists in its own [partner package](https://pypi.org/project/langchain-nomic/). You can install it with:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-nomic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Currently, you can import their hosted [embedding model](/docs/integrations/text_embedding/nomic) as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "y8ku6X96sebl"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_nomic import NomicEmbeddings"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -21,7 +21,7 @@ To use, you should set up the environment variables `ANYSCALE_API_BASE` and
|
||||
## LLM
|
||||
|
||||
```python
|
||||
from langchain_community.llms import Ollama
|
||||
from langchain.llms import Ollama
|
||||
```
|
||||
|
||||
See the notebook example [here](/docs/integrations/llms/ollama).
|
||||
@@ -31,7 +31,7 @@ See the notebook example [here](/docs/integrations/llms/ollama).
|
||||
### Chat Ollama
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatOllama
|
||||
from langchain.chat_models import ChatOllama
|
||||
```
|
||||
|
||||
See the notebook example [here](/docs/integrations/chat/ollama).
|
||||
@@ -47,7 +47,7 @@ See the notebook example [here](/docs/integrations/chat/ollama_functions).
|
||||
## Embedding models
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import OllamaEmbeddings
|
||||
from langchain.embeddings import OllamaEmbeddings
|
||||
```
|
||||
|
||||
See the notebook example [here](/docs/integrations/text_embedding/ollama).
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
# Ontotext GraphDB
|
||||
|
||||
>[Ontotext GraphDB](https://graphdb.ontotext.com/) is a graph database and knowledge discovery tool compliant with RDF and SPARQL.
|
||||
|
||||
## Dependencies
|
||||
|
||||
Install the [rdflib](https://github.com/RDFLib/rdflib) package with
|
||||
```bash
|
||||
pip install rdflib==7.0.0
|
||||
```
|
||||
|
||||
## Graph QA Chain
|
||||
|
||||
Connect your GraphDB Database with a chat model to get insights on your data.
|
||||
|
||||
See the notebook example [here](/docs/use_cases/graph/graph_ontotext_graphdb_qa).
|
||||
|
||||
```python
|
||||
from langchain_community.graphs import OntotextGraphDBGraph
|
||||
from langchain.chains import OntotextGraphDBQAChain
|
||||
```
|
||||
@@ -55,7 +55,7 @@ Head to stories to get ALL the deets on each box! 👆 BONUS: Save 50% on your f
|
||||
Query: {query}
|
||||
|
||||
Result: """
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
prompt = PromptTemplate(template=template, input_variables=["query"])
|
||||
|
||||
# With "guarding" or controlling the output of the LLM. See the
|
||||
# Prediction Guard docs (https://docs.predictionguard.com) to learn how to
|
||||
@@ -93,7 +93,7 @@ pgllm = PredictionGuard(model="OpenAI-gpt-3.5-turbo-instruct")
|
||||
template = """Question: {question}
|
||||
|
||||
Answer: Let's think step by step."""
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
prompt = PromptTemplate(template=template, input_variables=["question"])
|
||||
llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)
|
||||
|
||||
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
|
||||
|
||||
@@ -135,7 +135,7 @@
|
||||
" # We initialize the LLM, template and the chain here\n",
|
||||
" llm = OpenAI(openai_api_key=OPENAI_API_KEY)\n",
|
||||
" template = \"Question: {question}\\n\\nAnswer: Let's think step by step.\"\n",
|
||||
" prompt = PromptTemplate.from_template(template)\n",
|
||||
" prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
" self.chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"\n",
|
||||
" def _run_chain(self, text: str):\n",
|
||||
|
||||
@@ -33,7 +33,7 @@ template = """Question: {question}
|
||||
|
||||
# Answer: Let's think step by step."""
|
||||
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
prompt = PromptTemplate(template=template, input_variables=["question"])
|
||||
|
||||
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user