mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-11 19:49:54 +00:00
Compare commits
46 Commits
jacob/peop
...
erick/test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
648949e009 | ||
|
|
afc1def49b | ||
|
|
f6a98032e4 | ||
|
|
cd806400fc | ||
|
|
9678797625 | ||
|
|
15e42f1799 | ||
|
|
50ba3c68bb | ||
|
|
6ef12fdfd2 | ||
|
|
c05cbf0533 | ||
|
|
ed789be8f4 | ||
|
|
971d29e718 | ||
|
|
b0cfb86c48 | ||
|
|
b5f8cf9509 | ||
|
|
f685d2f50c | ||
|
|
29660f8918 | ||
|
|
9b0b0032c2 | ||
|
|
e8633e53c4 | ||
|
|
78521caf51 | ||
|
|
4f88a5130e | ||
|
|
9775de46cc | ||
|
|
f6e3aa9770 | ||
|
|
d068e8ea54 | ||
|
|
e237dcec91 | ||
|
|
9cf6661dc5 | ||
|
|
a51a257575 | ||
|
|
ecd72d26cf | ||
|
|
a53370a060 | ||
|
|
e5e38e89ce | ||
|
|
da957a22cc | ||
|
|
919b8a387f | ||
|
|
7248e98b9e | ||
|
|
1ec8199c8e | ||
|
|
42f158c128 | ||
|
|
0e26b16930 | ||
|
|
66e1005898 | ||
|
|
3d91be94b1 | ||
|
|
c524bf31f5 | ||
|
|
3019a594b7 | ||
|
|
815ec74298 | ||
|
|
375051a64e | ||
|
|
762f49162a | ||
|
|
9e54c227f1 | ||
|
|
242981b8f0 | ||
|
|
581095b9b5 | ||
|
|
ed0b7c3b72 | ||
|
|
5019951a5d |
9
.github/actions/people/app/main.py
vendored
9
.github/actions/people/app/main.py
vendored
@@ -113,6 +113,9 @@ query Q($after: String) {
|
||||
login
|
||||
avatarUrl
|
||||
url
|
||||
... on User {
|
||||
twitterUsername
|
||||
}
|
||||
}
|
||||
title
|
||||
createdAt
|
||||
@@ -123,6 +126,9 @@ query Q($after: String) {
|
||||
login
|
||||
avatarUrl
|
||||
url
|
||||
... on User {
|
||||
twitterUsername
|
||||
}
|
||||
}
|
||||
state
|
||||
}
|
||||
@@ -139,6 +145,7 @@ class Author(BaseModel):
|
||||
login: str
|
||||
avatarUrl: str
|
||||
url: str
|
||||
twitterUsername: Union[str, None] = None
|
||||
|
||||
|
||||
# Issues and Discussions
|
||||
@@ -501,6 +508,7 @@ def get_top_users(
|
||||
"login": commentor,
|
||||
"count": count,
|
||||
"avatarUrl": author.avatarUrl,
|
||||
"twitterUsername": author.twitterUsername,
|
||||
"url": author.url,
|
||||
}
|
||||
)
|
||||
@@ -550,6 +558,7 @@ if __name__ == "__main__":
|
||||
"login": login,
|
||||
"count": contributors[login], #+ question_commentors[login],
|
||||
"avatarUrl": user.avatarUrl,
|
||||
"twitterUsername": user.twitterUsername,
|
||||
"url": user.url,
|
||||
}
|
||||
)
|
||||
|
||||
4
.github/scripts/check_diff.py
vendored
4
.github/scripts/check_diff.py
vendored
@@ -48,3 +48,7 @@ if __name__ == "__main__":
|
||||
pass
|
||||
json_output = json.dumps(list(dirs_to_run))
|
||||
print(f"dirs-to-run={json_output}") # noqa: T201
|
||||
|
||||
extended_test_dirs = [d for d in dirs_to_run if not d.startswith("libs/partners")]
|
||||
json_output_extended = json.dumps(extended_test_dirs)
|
||||
print(f"dirs-to-run-extended={json_output_extended}") # noqa: T201
|
||||
|
||||
110
.github/workflows/_all_ci.yml
vendored
110
.github/workflows/_all_ci.yml
vendored
@@ -1,110 +0,0 @@
|
||||
---
|
||||
name: langchain CI
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: choice
|
||||
default: 'libs/langchain'
|
||||
options:
|
||||
- libs/langchain
|
||||
- libs/core
|
||||
- libs/experimental
|
||||
- libs/community
|
||||
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.working-directory }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
dependencies:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_dependencies.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
name: "make extended_tests #${{ matrix.python-version }}"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ ! startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing --with test
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
1
.github/workflows/_release.yml
vendored
1
.github/workflows/_release.yml
vendored
@@ -181,6 +181,7 @@ jobs:
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
|
||||
|
||||
107
.github/workflows/check_diffs.yml
vendored
107
.github/workflows/check_diffs.yml
vendored
@@ -16,6 +16,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -31,14 +34,114 @@ jobs:
|
||||
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
dirs-to-run: ${{ steps.set-matrix.outputs.dirs-to-run }}
|
||||
ci:
|
||||
dirs-to-run-extended: ${{ steps.set-matrix.outputs.dirs-to-run-extended }}
|
||||
lint:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-run) }}
|
||||
uses: ./.github/workflows/_all_ci.yml
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-run) }}
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-run) }}
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
dependencies:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-run) }}
|
||||
uses: ./.github/workflows/_dependencies.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
name: "cd ${{ matrix.working-directory }} / make extended_tests #${{ matrix.python-version }}"
|
||||
needs: [ build ]
|
||||
strategy:
|
||||
matrix:
|
||||
# note different variable for extended test dirs
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-run-extended) }}
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing --with test
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
ci_end:
|
||||
name: "CI Success"
|
||||
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "CI Success"
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
echo "Success"
|
||||
exit 0
|
||||
- name: "CI Failure"
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
echo "Failure"
|
||||
exit 1
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -520,7 +520,7 @@
|
||||
"source": [
|
||||
"import re\n",
|
||||
"\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -167,7 +167,7 @@
|
||||
"from langchain.llms import LlamaCpp\n",
|
||||
"from langchain.memory import ConversationTokenBufferMemory\n",
|
||||
"from langchain.prompts import PromptTemplate, load_prompt\n",
|
||||
"from langchain.schema import SystemMessage\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"from langchain_experimental.chat_models import Llama2Chat\n",
|
||||
"from quixstreams import Application, State, message_key\n",
|
||||
"\n",
|
||||
|
||||
@@ -42,9 +42,9 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -114,8 +114,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -67,9 +67,9 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -138,8 +138,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -40,8 +40,8 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -103,8 +103,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
"source": [
|
||||
"from typing import Any, List, Tuple, Union\n",
|
||||
"\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class FakeAgent(BaseMultiActionAgent):\n",
|
||||
|
||||
@@ -73,8 +73,9 @@
|
||||
" AsyncCallbackManagerForRetrieverRun,\n",
|
||||
" CallbackManagerForRetrieverRun,\n",
|
||||
")\n",
|
||||
"from langchain.schema import BaseRetriever, Document\n",
|
||||
"from langchain_community.utilities import GoogleSerperAPIWrapper\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.retrievers import BaseRetriever\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -358,7 +358,7 @@
|
||||
"\n",
|
||||
"from langchain.chains.openai_functions import create_qa_with_structure_chain\n",
|
||||
"from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -19,7 +19,9 @@
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"For this example, we will use Pinecone and some fake data"
|
||||
"For this example, we will use Pinecone and some fake data. To configure Pinecone, set the following environment variable:\n",
|
||||
"\n",
|
||||
"- `PINECONE_API_KEY`: Your Pinecone API key"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,11 +31,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pinecone\n",
|
||||
"from langchain_community.vectorstores import Pinecone\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"pinecone.init(api_key=\"...\", environment=\"...\")"
|
||||
"from langchain_pinecone import PineconeVectorStore"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -64,7 +63,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_texts(\n",
|
||||
"vectorstore = PineconeVectorStore.from_texts(\n",
|
||||
" list(all_documents.values()), OpenAIEmbeddings(), index_name=\"rag-fusion\"\n",
|
||||
")"
|
||||
]
|
||||
@@ -162,7 +161,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_existing_index(\"rag-fusion\", OpenAIEmbeddings())\n",
|
||||
"vectorstore = PineconeVectorStore.from_existing_index(\"rag-fusion\", OpenAIEmbeddings())\n",
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -51,10 +51,10 @@
|
||||
"from langchain.chains.base import Chain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.prompts.base import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.llms import BaseLLM\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
|
||||
@@ -401,7 +401,7 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish"
|
||||
"from langchain_core.agents import AgentAction, AgentFinish"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -49,7 +49,7 @@ class ExampleLinksDirective(SphinxDirective):
|
||||
class_or_func_name = self.arguments[0]
|
||||
links = imported_classes.get(class_or_func_name, {})
|
||||
list_node = nodes.bullet_list()
|
||||
for doc_name, link in links.items():
|
||||
for doc_name, link in sorted(links.items()):
|
||||
item_node = nodes.list_item()
|
||||
para_node = nodes.paragraph()
|
||||
link_node = nodes.reference()
|
||||
|
||||
@@ -351,7 +351,7 @@ def main() -> None:
|
||||
# Skip any hidden directories
|
||||
# Some of these could be present by mistake in the code base
|
||||
# e.g., .pytest_cache from running tests from the wrong location.
|
||||
if not dir.startswith("."):
|
||||
if dir.startswith("."):
|
||||
print("Skipping dir:", dir)
|
||||
continue
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Agents\n",
|
||||
"\n",
|
||||
"You can pass a Runnable into an agent."
|
||||
"You can pass a Runnable into an agent. Make sure you have `langchainhub` installed: `pip install langchainhub`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -98,7 +98,7 @@
|
||||
"source": [
|
||||
"Building an agent from a runnable usually involves a few things:\n",
|
||||
"\n",
|
||||
"1. Data processing for the intermediate steps. These need to represented in a way that the language model can recognize them. This should be pretty tightly coupled to the instructions in the prompt\n",
|
||||
"1. Data processing for the intermediate steps. These need to be represented in a way that the language model can recognize them. This should be pretty tightly coupled to the instructions in the prompt\n",
|
||||
"\n",
|
||||
"2. The prompt itself\n",
|
||||
"\n",
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
||||
@@ -169,8 +169,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import format_document\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, get_buffer_string\n",
|
||||
"from langchain_core.prompts import format_document\n",
|
||||
"from langchain_core.runnables import RunnableParallel"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"source": [
|
||||
"# Showing the example using anthropic, but you can use\n",
|
||||
"# your favorite chat model!\n",
|
||||
"from langchain.chat_models import ChatAnthropic\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"\n",
|
||||
"model = ChatAnthropic()\n",
|
||||
"\n",
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.evaluation import AgentTrajectoryEvaluator\n",
|
||||
"from langchain.schema import AgentAction\n",
|
||||
"from langchain_core.agents import AgentAction\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"documents = [Document(page_content=document_content)]"
|
||||
]
|
||||
@@ -879,7 +879,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain.schema import format_document\n",
|
||||
"from langchain_core.prompts import format_document\n",
|
||||
"\n",
|
||||
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
|
||||
"\n",
|
||||
|
||||
@@ -242,7 +242,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import LabelStudioCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat_llm = ChatOpenAI(\n",
|
||||
|
||||
@@ -53,7 +53,7 @@ Example:
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.schema import SystemMessage, HumanMessage
|
||||
from langchain_core.messages import SystemMessage, HumanMessage
|
||||
from langchain.agents import OpenAIFunctionsAgent, AgentExecutor, tool
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
|
||||
@@ -267,7 +267,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import TrubricsCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -83,7 +83,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage"
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -109,7 +109,7 @@
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful AI that shares everything you know.\"),\n",
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import AzureChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -74,11 +74,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models.azureml_endpoint import (\n",
|
||||
" AzureMLEndpointApiType,\n",
|
||||
" LlamaChatContentFormatter,\n",
|
||||
")"
|
||||
")\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -105,8 +105,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models.azureml_endpoint import LlamaContentFormatter\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"chat = AzureMLChatOnlineEndpoint(\n",
|
||||
" endpoint_url=\"https://<your-endpoint>.<your_region>.inference.ml.azure.com/score\",\n",
|
||||
|
||||
@@ -29,8 +29,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatBaichuan"
|
||||
"from langchain_community.chat_models import ChatBaichuan\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -47,8 +47,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import BedrockChat"
|
||||
"from langchain_community.chat_models import BedrockChat\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -68,8 +68,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatDeepInfra\n",
|
||||
"from langchain.schema import HumanMessage"
|
||||
"from langchain_community.chat_models import ChatDeepInfra\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -216,7 +216,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -76,8 +76,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ErnieBotChat\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"chat = ErnieBotChat(\n",
|
||||
" ernie_client_id=\"YOUR_CLIENT_ID\", ernie_client_secret=\"YOUR_CLIENT_SECRET\"\n",
|
||||
|
||||
@@ -73,8 +73,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful AI that shares everything you know.\"),\n",
|
||||
@@ -127,8 +127,8 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a humorous AI that delights people.\"),\n",
|
||||
@@ -185,8 +185,8 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a humorous AI that delights people.\"),\n",
|
||||
|
||||
@@ -37,8 +37,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models.fireworks import ChatFireworks"
|
||||
"from langchain_community.chat_models.fireworks import ChatFireworks\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -75,7 +75,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
|
||||
@@ -70,9 +70,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import GPTRouter\n",
|
||||
"from langchain_community.chat_models.gpt_router import GPTRouterModel"
|
||||
"from langchain_community.chat_models.gpt_router import GPTRouterModel\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
181
docs/docs/integrations/chat/groq.ipynb
Normal file
181
docs/docs/integrations/chat/groq.ipynb
Normal file
@@ -0,0 +1,181 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Groq\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Groq\n",
|
||||
"\n",
|
||||
"Install the langchain-groq package if not already installed:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install langchain-groq\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Request an [API key](https://wow.groq.com) and set it as an environment variable:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export GROQ_API_KEY=<YOUR API KEY>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Alternatively, you may configure the API key when you initialize ChatGroq."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Import the ChatGroq class and initialize it with a model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_groq import ChatGroq"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatGroq(temperature=0, model_name=\"mixtral-8x7b-32768\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can view the available models [here](https://console.groq.com/docs/models).\n",
|
||||
"\n",
|
||||
"If you do not want to set your API key in the environment, you can pass it directly to the client:\n",
|
||||
"```python\n",
|
||||
"chat = ChatGroq(temperature=0, groq_api_key=\"YOUR_API_KEY\", model_name=\"mixtral-8x7b-32768\")\n",
|
||||
"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Write a prompt and invoke ChatGroq to create completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Low Latency Large Language Models (LLMs) are a type of artificial intelligence model that can understand and generate human-like text. The term \"low latency\" refers to the model\\'s ability to process and respond to inputs quickly, with minimal delay.\\n\\nThe importance of low latency in LLMs can be explained through the following points:\\n\\n1. Improved user experience: In real-time applications such as chatbots, virtual assistants, and interactive games, users expect quick and responsive interactions. Low latency LLMs can provide instant feedback and responses, creating a more seamless and engaging user experience.\\n\\n2. Better decision-making: In time-sensitive scenarios, such as financial trading or autonomous vehicles, low latency LLMs can quickly process and analyze vast amounts of data, enabling faster and more informed decision-making.\\n\\n3. Enhanced accessibility: For individuals with disabilities, low latency LLMs can help create more responsive and inclusive interfaces, such as voice-controlled assistants or real-time captioning systems.\\n\\n4. Competitive advantage: In industries where real-time data analysis and decision-making are crucial, low latency LLMs can provide a competitive edge by enabling businesses to react more quickly to market changes, customer needs, or emerging opportunities.\\n\\n5. Scalability: Low latency LLMs can efficiently handle a higher volume of requests and interactions, making them more suitable for large-scale applications and services.\\n\\nIn summary, low latency is an essential aspect of LLMs, as it significantly impacts user experience, decision-making, accessibility, competitiveness, and scalability. By minimizing delays and response times, low latency LLMs can unlock new possibilities and applications for artificial intelligence in various industries and scenarios.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant.\"\n",
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({\n",
|
||||
" \"text\": \"Explain the importance of low latency LLMs.\"\n",
|
||||
"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `ChatGroq` also supports async and streaming functionality:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"There's a star that shines up in the sky,\\nThe Sun, that makes the day bright and spry.\\nIt rises and sets,\\nIn a daily, predictable bet,\\nGiving life to the world, oh my!\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatGroq(temperature=0, model_name=\"mixtral-8x7b-32768\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Write a Limerick about {topic}\")])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"await chain.ainvoke({\"topic\": \"The Sun\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The moon's gentle glow\n",
|
||||
"Illuminates the night sky\n",
|
||||
"Peaceful and serene"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatGroq(temperature=0, model_name=\"llama2-70b-4096\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Write a haiku about {topic}\")])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"for chunk in chain.stream({\"topic\": \"The Moon\"}):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -24,8 +24,8 @@
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import JinaChat"
|
||||
"from langchain_community.chat_models import JinaChat\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
654
docs/docs/integrations/chat/kinetica.ipynb
Normal file
654
docs/docs/integrations/chat/kinetica.ipynb
Normal file
@@ -0,0 +1,654 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Kinetica\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Kinetica SqlAssist LLM Demo\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use Kinetica to transform natural language into SQL\n",
|
||||
"and simplify the process of data retrieval. This demo is intended to show the mechanics\n",
|
||||
"of creating and using a chain as opposed to the capabilities of the LLM.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"With the Kinetica LLM workflow you create an LLM context in the database that provides\n",
|
||||
"information needed for infefencing that includes tables, annotations, rules, and\n",
|
||||
"samples. Invoking ``ChatKinetica.load_messages_from_context()`` will retrieve the\n",
|
||||
"context information from the database so that it can be used to create a chat prompt.\n",
|
||||
"\n",
|
||||
"The chat prompt consists of a ``SystemMessage`` and pairs of\n",
|
||||
"``HumanMessage``/``AIMessage`` that contain the samples which are question/SQL\n",
|
||||
"pairs. You can append pairs samples to this list but it is not intended to\n",
|
||||
"facilitate a typical natural language conversation.\n",
|
||||
"\n",
|
||||
"When you create a chain from the chat prompt and execute it, the Kinetica LLM will\n",
|
||||
"generate SQL from the input. Optionally you can use ``KineticaSqlOutputParser`` to\n",
|
||||
"execute the SQL and return the result as a dataframe.\n",
|
||||
"\n",
|
||||
"Currently, 2 LLM's are supported for SQL generation: \n",
|
||||
"\n",
|
||||
"1. **Kinetica SQL-GPT**: This LLM is based on OpenAI ChatGPT API.\n",
|
||||
"2. **Kinetica SqlAssist**: This LLM is purpose built to integrate with the Kinetica\n",
|
||||
" database and it can run in a secure customer premise.\n",
|
||||
"\n",
|
||||
"For this demo we will be using **SqlAssist**. See the [Kinetica Documentation\n",
|
||||
"site](https://docs.kinetica.com/7.1/sql-gpt/concepts/) for more information.\n",
|
||||
"\n",
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"To get started you will need a Kinetica DB instance. If you don't have one you can\n",
|
||||
"obtain a [free development instance](https://cloud.kinetica.com/trynow).\n",
|
||||
"\n",
|
||||
"You will need to install the following packages..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Install Langchain community and core packages\n",
|
||||
"%pip install --upgrade --quiet langchain-core langchain-community\n",
|
||||
"\n",
|
||||
"# Install Kineitca DB connection package\n",
|
||||
"%pip install --upgrade --quiet gpudb typeguard\n",
|
||||
"\n",
|
||||
"# Install packages needed for this tutorial\n",
|
||||
"%pip install --upgrade --quiet faker"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Database Connection\n",
|
||||
"\n",
|
||||
"You must set the database connection in the following environment variables. If you are using a virtual environment you can set them in the `.env` file of the project:\n",
|
||||
"* `KINETICA_URL`: Database connection URL\n",
|
||||
"* `KINETICA_USER`: Database user\n",
|
||||
"* `KINETICA_PASSWD`: Secure password.\n",
|
||||
"\n",
|
||||
"If you can create an instance of `KineticaChatLLM` then you are successfully connected."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.kinetica import ChatKinetica\n",
|
||||
"\n",
|
||||
"kinetica_llm = ChatKinetica()\n",
|
||||
"\n",
|
||||
"# Test table we will create\n",
|
||||
"table_name = \"demo.user_profiles\"\n",
|
||||
"\n",
|
||||
"# LLM Context we will create\n",
|
||||
"kinetica_ctx = \"demo.test_llm_ctx\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create test data\n",
|
||||
"\n",
|
||||
"Before we can generate SQL we will need to create a Kinetica table and an LLM context that can inference the table.\n",
|
||||
"\n",
|
||||
"### Create some fake user profiles\n",
|
||||
"\n",
|
||||
"We will use the `faker` package to create a dataframe with 100 fake profiles."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>username</th>\n",
|
||||
" <th>name</th>\n",
|
||||
" <th>sex</th>\n",
|
||||
" <th>address</th>\n",
|
||||
" <th>mail</th>\n",
|
||||
" <th>birthdate</th>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>id</th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" <th></th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>eduardo69</td>\n",
|
||||
" <td>Haley Beck</td>\n",
|
||||
" <td>F</td>\n",
|
||||
" <td>59836 Carla Causeway Suite 939\\nPort Eugene, I...</td>\n",
|
||||
" <td>meltondenise@yahoo.com</td>\n",
|
||||
" <td>1997-09-09</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>lbarrera</td>\n",
|
||||
" <td>Joshua Stephens</td>\n",
|
||||
" <td>M</td>\n",
|
||||
" <td>3108 Christina Forges\\nPort Timothychester, KY...</td>\n",
|
||||
" <td>erica80@hotmail.com</td>\n",
|
||||
" <td>1924-05-05</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>bburton</td>\n",
|
||||
" <td>Paula Kaiser</td>\n",
|
||||
" <td>F</td>\n",
|
||||
" <td>Unit 7405 Box 3052\\nDPO AE 09858</td>\n",
|
||||
" <td>timothypotts@gmail.com</td>\n",
|
||||
" <td>1933-09-06</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>melissa49</td>\n",
|
||||
" <td>Wendy Reese</td>\n",
|
||||
" <td>F</td>\n",
|
||||
" <td>6408 Christopher Hill Apt. 459\\nNew Benjamin, ...</td>\n",
|
||||
" <td>dadams@gmail.com</td>\n",
|
||||
" <td>1988-07-28</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>melissacarter</td>\n",
|
||||
" <td>Manuel Rios</td>\n",
|
||||
" <td>M</td>\n",
|
||||
" <td>2241 Bell Gardens Suite 723\\nScottside, CA 38463</td>\n",
|
||||
" <td>williamayala@gmail.com</td>\n",
|
||||
" <td>1930-12-19</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" username name sex \\\n",
|
||||
"id \n",
|
||||
"0 eduardo69 Haley Beck F \n",
|
||||
"1 lbarrera Joshua Stephens M \n",
|
||||
"2 bburton Paula Kaiser F \n",
|
||||
"3 melissa49 Wendy Reese F \n",
|
||||
"4 melissacarter Manuel Rios M \n",
|
||||
"\n",
|
||||
" address mail \\\n",
|
||||
"id \n",
|
||||
"0 59836 Carla Causeway Suite 939\\nPort Eugene, I... meltondenise@yahoo.com \n",
|
||||
"1 3108 Christina Forges\\nPort Timothychester, KY... erica80@hotmail.com \n",
|
||||
"2 Unit 7405 Box 3052\\nDPO AE 09858 timothypotts@gmail.com \n",
|
||||
"3 6408 Christopher Hill Apt. 459\\nNew Benjamin, ... dadams@gmail.com \n",
|
||||
"4 2241 Bell Gardens Suite 723\\nScottside, CA 38463 williamayala@gmail.com \n",
|
||||
"\n",
|
||||
" birthdate \n",
|
||||
"id \n",
|
||||
"0 1997-09-09 \n",
|
||||
"1 1924-05-05 \n",
|
||||
"2 1933-09-06 \n",
|
||||
"3 1988-07-28 \n",
|
||||
"4 1930-12-19 "
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Generator\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"from faker import Faker\n",
|
||||
"\n",
|
||||
"Faker.seed(5467)\n",
|
||||
"faker = Faker(locale=\"en-US\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def profile_gen(count: int) -> Generator:\n",
|
||||
" for id in range(0, count):\n",
|
||||
" rec = dict(id=id, **faker.simple_profile())\n",
|
||||
" rec[\"birthdate\"] = pd.Timestamp(rec[\"birthdate\"])\n",
|
||||
" yield rec\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"load_df = pd.DataFrame.from_records(data=profile_gen(100), index=\"id\")\n",
|
||||
"load_df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a Kinetica table from the Dataframe"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>name</th>\n",
|
||||
" <th>type</th>\n",
|
||||
" <th>properties</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>username</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char32]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>name</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char32]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>sex</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char1]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>address</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char64]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>mail</td>\n",
|
||||
" <td>string</td>\n",
|
||||
" <td>[char32]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>5</th>\n",
|
||||
" <td>birthdate</td>\n",
|
||||
" <td>long</td>\n",
|
||||
" <td>[timestamp]</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" name type properties\n",
|
||||
"0 username string [char32]\n",
|
||||
"1 name string [char32]\n",
|
||||
"2 sex string [char1]\n",
|
||||
"3 address string [char64]\n",
|
||||
"4 mail string [char32]\n",
|
||||
"5 birthdate long [timestamp]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from gpudb import GPUdbTable\n",
|
||||
"\n",
|
||||
"gpudb_table = GPUdbTable.from_df(\n",
|
||||
" load_df,\n",
|
||||
" db=kinetica_llm.kdbc,\n",
|
||||
" table_name=table_name,\n",
|
||||
" clear_table=True,\n",
|
||||
" load_data=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# See the Kinetica column types\n",
|
||||
"gpudb_table.type_as_df()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the LLM context\n",
|
||||
"\n",
|
||||
"You can create an LLM Context using the Kinetica Workbench UI or you can manually create it with the `CREATE OR REPLACE CONTEXT` syntax. \n",
|
||||
"\n",
|
||||
"Here we create a context from the SQL syntax referencing the table we created."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'status': 'OK',\n",
|
||||
" 'message': '',\n",
|
||||
" 'data_type': 'execute_sql_response',\n",
|
||||
" 'response_time': 0.0148}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# create an LLM context for the table.\n",
|
||||
"\n",
|
||||
"from gpudb import GPUdbException\n",
|
||||
"\n",
|
||||
"sql = f\"\"\"\n",
|
||||
"CREATE OR REPLACE CONTEXT {kinetica_ctx}\n",
|
||||
"(\n",
|
||||
" TABLE = demo.test_profiles\n",
|
||||
" COMMENT = 'Contains user profiles.'\n",
|
||||
"),\n",
|
||||
"(\n",
|
||||
" SAMPLES = (\n",
|
||||
" 'How many male users are there?' = \n",
|
||||
" 'select count(1) as num_users\n",
|
||||
" from demo.test_profiles\n",
|
||||
" where sex = ''M'';')\n",
|
||||
")\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _check_error(response: dict) -> None:\n",
|
||||
" status = response[\"status_info\"][\"status\"]\n",
|
||||
" if status != \"OK\":\n",
|
||||
" message = response[\"status_info\"][\"message\"]\n",
|
||||
" raise GPUdbException(\"[%s]: %s\" % (status, message))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"response = kinetica_llm.kdbc.execute_sql(sql)\n",
|
||||
"_check_error(response)\n",
|
||||
"response[\"status_info\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use Langchain for inferencing\n",
|
||||
"\n",
|
||||
"In the example below we will create a chain from the previously created table and LLM context. This chain will generate SQL and return the resulting data as a dataframe.\n",
|
||||
"\n",
|
||||
"### Load the chat prompt from the Kinetica DB\n",
|
||||
"\n",
|
||||
"The `load_messages_from_context()` function will retrieve a context from the DB and convert it into a list of chat messages that we use to create a ``ChatPromptTemplate``."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m System Message \u001b[0m================================\n",
|
||||
"\n",
|
||||
"CREATE TABLE demo.test_profiles AS\n",
|
||||
"(\n",
|
||||
" username VARCHAR (32) NOT NULL,\n",
|
||||
" name VARCHAR (32) NOT NULL,\n",
|
||||
" sex VARCHAR (1) NOT NULL,\n",
|
||||
" address VARCHAR (64) NOT NULL,\n",
|
||||
" mail VARCHAR (32) NOT NULL,\n",
|
||||
" birthdate TIMESTAMP NOT NULL\n",
|
||||
");\n",
|
||||
"COMMENT ON TABLE demo.test_profiles IS 'Contains user profiles.';\n",
|
||||
"\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"How many male users are there?\n",
|
||||
"\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"select count(1) as num_users\n",
|
||||
" from demo.test_profiles\n",
|
||||
" where sex = 'M';\n",
|
||||
"\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"\u001b[33;1m\u001b[1;3m{input}\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"# load the context from the database\n",
|
||||
"ctx_messages = kinetica_llm.load_messages_from_context(kinetica_ctx)\n",
|
||||
"\n",
|
||||
"# Add the input prompt. This is where input question will be substituted.\n",
|
||||
"ctx_messages.append((\"human\", \"{input}\"))\n",
|
||||
"\n",
|
||||
"# Create the prompt template.\n",
|
||||
"prompt_template = ChatPromptTemplate.from_messages(ctx_messages)\n",
|
||||
"prompt_template.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create the chain\n",
|
||||
"\n",
|
||||
"The last element of this chain is `KineticaSqlOutputParser` that will execute the SQL and return a dataframe. This is optional and if we left it out then only SQL would be returned."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.kinetica import (\n",
|
||||
" KineticaSqlOutputParser,\n",
|
||||
" KineticaSqlResponse,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt_template | kinetica_llm | KineticaSqlOutputParser(kdbc=kinetica_llm.kdbc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate the SQL\n",
|
||||
"\n",
|
||||
"The chain we created will take a question as input and return a ``KineticaSqlResponse`` containing the generated SQL and data. The question must be relevant to the to LLM context we used to create the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"SQL: SELECT username, name\n",
|
||||
" FROM demo.test_profiles\n",
|
||||
" WHERE sex = 'F'\n",
|
||||
" ORDER BY username;\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>username</th>\n",
|
||||
" <th>name</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>alexander40</td>\n",
|
||||
" <td>Tina Ramirez</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>bburton</td>\n",
|
||||
" <td>Paula Kaiser</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>brian12</td>\n",
|
||||
" <td>Stefanie Williams</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>brownanna</td>\n",
|
||||
" <td>Jennifer Rowe</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>carl19</td>\n",
|
||||
" <td>Amanda Potts</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" username name\n",
|
||||
"0 alexander40 Tina Ramirez\n",
|
||||
"1 bburton Paula Kaiser\n",
|
||||
"2 brian12 Stefanie Williams\n",
|
||||
"3 brownanna Jennifer Rowe\n",
|
||||
"4 carl19 Amanda Potts"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Here you must ask a question relevant to the LLM context provided in the prompt template.\n",
|
||||
"response: KineticaSqlResponse = chain.invoke(\n",
|
||||
" {\"input\": \"What are the female users ordered by username?\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(f\"SQL: {response.sql}\")\n",
|
||||
"response.dataframe.head()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -40,8 +40,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatKonko"
|
||||
"from langchain_community.chat_models import ChatKonko\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -32,8 +32,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatLiteLLM"
|
||||
"from langchain_community.chat_models import ChatLiteLLM\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -38,8 +38,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatLiteLLMRouter\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from litellm import Router"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -54,7 +54,7 @@
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" MessagesPlaceholder,\n",
|
||||
")\n",
|
||||
"from langchain.schema import SystemMessage\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"\n",
|
||||
"template_messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful assistant.\"),\n",
|
||||
|
||||
@@ -39,8 +39,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import MiniMaxChat"
|
||||
"from langchain_community.chat_models import MiniMaxChat\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -278,7 +278,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
@@ -313,8 +313,8 @@
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
@@ -463,8 +463,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"llm = ChatOllama(model=\"bakllava\", temperature=0)\n",
|
||||
"\n",
|
||||
|
||||
@@ -102,7 +102,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"model.invoke(\"what is the weather in Boston?\")"
|
||||
]
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -62,8 +62,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import PromptLayerChatOpenAI"
|
||||
"from langchain_community.chat_models import PromptLayerChatOpenAI\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -30,8 +30,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\"\"\"For basic init and call\"\"\"\n",
|
||||
"from langchain.chat_models import ChatSparkLLM\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatSparkLLM\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"chat = ChatSparkLLM(\n",
|
||||
" spark_app_id=\"<app_id>\", spark_api_key=\"<api_key>\", spark_api_secret=\"<api_secret>\"\n",
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import ChatHunyuan"
|
||||
"from langchain_community.chat_models import ChatHunyuan\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -100,8 +100,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models.tongyi import ChatTongyi\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"chatLLM = ChatTongyi(\n",
|
||||
" streaming=True,\n",
|
||||
@@ -128,7 +128,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -48,8 +48,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.chat_models import VolcEngineMaasChat"
|
||||
"from langchain_community.chat_models import VolcEngineMaasChat\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -58,8 +58,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatYandexGPT"
|
||||
"from langchain_community.chat_models import ChatYandexGPT\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -79,8 +79,8 @@
|
||||
"import re\n",
|
||||
"from typing import Iterator, List\n",
|
||||
"\n",
|
||||
"from langchain.schema import BaseMessage, HumanMessage\n",
|
||||
"from langchain_community.chat_loaders import base as chat_loaders\n",
|
||||
"from langchain_core.messages import BaseMessage, HumanMessage\n",
|
||||
"\n",
|
||||
"logger = logging.getLogger()\n",
|
||||
"\n",
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.adapters.openai import convert_message_to_dict\n",
|
||||
"from langchain.schema import AIMessage"
|
||||
"from langchain_core.messages import AIMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -78,8 +78,8 @@
|
||||
"import re\n",
|
||||
"from typing import Iterator, List\n",
|
||||
"\n",
|
||||
"from langchain.schema import BaseMessage, HumanMessage\n",
|
||||
"from langchain_community.chat_loaders import base as chat_loaders\n",
|
||||
"from langchain_core.messages import BaseMessage, HumanMessage\n",
|
||||
"\n",
|
||||
"logger = logging.getLogger()\n",
|
||||
"\n",
|
||||
|
||||
@@ -198,8 +198,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_loaders import TensorflowDatasetLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"loader = TensorflowDatasetLoader(\n",
|
||||
" dataset_name=\"mlqa/en\",\n",
|
||||
|
||||
189
docs/docs/integrations/document_loaders/tidb.ipynb
Normal file
189
docs/docs/integrations/document_loaders/tidb.ipynb
Normal file
@@ -0,0 +1,189 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# TiDB\n",
|
||||
"\n",
|
||||
"> [TiDB](https://github.com/pingcap/tidb) is an open-source, cloud-native, distributed, MySQL-Compatible database for elastic scale and real-time analytics.\n",
|
||||
"\n",
|
||||
"This notebook introduces how to use `TiDBLoader` to load data from TiDB in langchain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prerequisites\n",
|
||||
"\n",
|
||||
"Before using the `TiDBLoader`, we will install the following dependencies:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then, we will configure the connection to a TiDB. In this notebook, we will follow the standard connection method provided by TiDB Cloud to establish a secure and efficient database connection."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"# copy from tidb cloud console,replace it with your own\n",
|
||||
"tidb_connection_string_template = \"mysql+pymysql://<USER>:<PASSWORD>@<HOST>:4000/<DB>?ssl_ca=/etc/ssl/cert.pem&ssl_verify_cert=true&ssl_verify_identity=true\"\n",
|
||||
"tidb_password = getpass.getpass(\"Input your TiDB password:\")\n",
|
||||
"tidb_connection_string = tidb_connection_string_template.replace(\n",
|
||||
" \"<PASSWORD>\", tidb_password\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Data from TiDB\n",
|
||||
"\n",
|
||||
"Here's a breakdown of some key arguments you can use to customize the behavior of the `TiDBLoader`:\n",
|
||||
"\n",
|
||||
"- `query` (str): This is the SQL query to be executed against the TiDB database. The query should select the data you want to load into your `Document` objects. \n",
|
||||
" For instance, you might use a query like `\"SELECT * FROM my_table\"` to fetch all data from `my_table`.\n",
|
||||
"\n",
|
||||
"- `page_content_columns` (Optional[List[str]]): Specifies the list of column names whose values should be included in the `page_content` of each `Document` object. \n",
|
||||
" If set to `None` (the default), all columns returned by the query are included in `page_content`. This allows you to tailor the content of each document based on specific columns of your data.\n",
|
||||
"\n",
|
||||
"- `metadata_columns` (Optional[List[str]]): Specifies the list of column names whose values should be included in the `metadata` of each `Document` object. \n",
|
||||
" By default, this list is empty, meaning no metadata will be included unless explicitly specified. This is useful for including additional information about each document that doesn't form part of the main content but is still valuable for processing or analysis."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine\n",
|
||||
"\n",
|
||||
"# Connect to the database\n",
|
||||
"engine = create_engine(tidb_connection_string)\n",
|
||||
"metadata = MetaData()\n",
|
||||
"table_name = \"test_tidb_loader\"\n",
|
||||
"\n",
|
||||
"# Create a table\n",
|
||||
"test_table = Table(\n",
|
||||
" table_name,\n",
|
||||
" metadata,\n",
|
||||
" Column(\"id\", Integer, primary_key=True),\n",
|
||||
" Column(\"name\", String(255)),\n",
|
||||
" Column(\"description\", String(255)),\n",
|
||||
")\n",
|
||||
"metadata.create_all(engine)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with engine.connect() as connection:\n",
|
||||
" transaction = connection.begin()\n",
|
||||
" try:\n",
|
||||
" connection.execute(\n",
|
||||
" test_table.insert(),\n",
|
||||
" [\n",
|
||||
" {\"name\": \"Item 1\", \"description\": \"Description of Item 1\"},\n",
|
||||
" {\"name\": \"Item 2\", \"description\": \"Description of Item 2\"},\n",
|
||||
" {\"name\": \"Item 3\", \"description\": \"Description of Item 3\"},\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
" transaction.commit()\n",
|
||||
" except:\n",
|
||||
" transaction.rollback()\n",
|
||||
" raise"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"------------------------------\n",
|
||||
"content: name: Item 1\n",
|
||||
"description: Description of Item 1\n",
|
||||
"metada: {'id': 1}\n",
|
||||
"------------------------------\n",
|
||||
"content: name: Item 2\n",
|
||||
"description: Description of Item 2\n",
|
||||
"metada: {'id': 2}\n",
|
||||
"------------------------------\n",
|
||||
"content: name: Item 3\n",
|
||||
"description: Description of Item 3\n",
|
||||
"metada: {'id': 3}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TiDBLoader\n",
|
||||
"\n",
|
||||
"# Setup TiDBLoader to retrieve data\n",
|
||||
"loader = TiDBLoader(\n",
|
||||
" connection_string=tidb_connection_string,\n",
|
||||
" query=f\"SELECT * FROM {table_name};\",\n",
|
||||
" page_content_columns=[\"name\", \"description\"],\n",
|
||||
" metadata_columns=[\"id\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Load data\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"# Display the loaded documents\n",
|
||||
"for doc in documents:\n",
|
||||
" print(\"-\" * 30)\n",
|
||||
" print(f\"content: {doc.page_content}\\nmetada: {doc.metadata}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_table.drop(bind=engine)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -32,8 +32,8 @@
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_transformers import DoctranPropertyExtractor"
|
||||
"from langchain_community.document_transformers import DoctranPropertyExtractor\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -30,8 +30,8 @@
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_transformers import DoctranQATransformer"
|
||||
"from langchain_community.document_transformers import DoctranQATransformer\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -28,8 +28,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_transformers import DoctranTextTranslator"
|
||||
"from langchain_community.document_transformers import DoctranTextTranslator\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -31,8 +31,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_transformers import GoogleTranslateTransformer"
|
||||
"from langchain_community.document_transformers import GoogleTranslateTransformer\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -21,10 +21,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.document_transformers.openai_functions import (\n",
|
||||
" create_metadata_tagger,\n",
|
||||
")\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -70,11 +70,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.llms.azureml_endpoint import (\n",
|
||||
" AzureMLEndpointApiType,\n",
|
||||
" LlamaContentFormatter,\n",
|
||||
")\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"llm = AzureMLOnlineEndpoint(\n",
|
||||
" endpoint_url=\"https://<your-endpoint>.<your_region>.inference.ml.azure.com/score\",\n",
|
||||
@@ -117,11 +117,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_community.llms.azureml_endpoint import (\n",
|
||||
" AzureMLEndpointApiType,\n",
|
||||
" LlamaContentFormatter,\n",
|
||||
")\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"llm = AzureMLOnlineEndpoint(\n",
|
||||
" endpoint_url=\"https://<your-endpoint>.<your_region>.inference.ml.azure.com/v1/completions\",\n",
|
||||
|
||||
@@ -19,17 +19,17 @@
|
||||
"source": [
|
||||
"## Setting up\n",
|
||||
"\n",
|
||||
"Install the package [`ibm-watsonx-ai`](https://ibm.github.io/watsonx-ai-python-sdk/install.html)."
|
||||
"Install the package `langchain-ibm`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 4,
|
||||
"id": "2f1fff4e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet ibm-watsonx-ai"
|
||||
"!pip install -qU langchain-ibm"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -57,6 +57,30 @@
|
||||
"os.environ[\"WATSONX_APIKEY\"] = watsonx_api_key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c59782a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Additionaly you are able to pass additional secrets as an environment variable. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f98c573c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"WATSONX_URL\"] = \"your service instance url\"\n",
|
||||
"os.environ[\"WATSONX_TOKEN\"] = \"your token for accessing the CPD cluster\"\n",
|
||||
"os.environ[\"WATSONX_PASSWORD\"] = \"your password for accessing the CPD cluster\"\n",
|
||||
"os.environ[\"WATSONX_USERNAME\"] = \"your username for accessing the CPD cluster\"\n",
|
||||
"os.environ[\"WATSONX_INSTANCE_ID\"] = \"your instance_id for accessing the CPD cluster\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e36acbef",
|
||||
@@ -74,15 +98,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams\n",
|
||||
"\n",
|
||||
"parameters = {\n",
|
||||
" GenParams.DECODING_METHOD: \"sample\",\n",
|
||||
" GenParams.MAX_NEW_TOKENS: 100,\n",
|
||||
" GenParams.MIN_NEW_TOKENS: 1,\n",
|
||||
" GenParams.TEMPERATURE: 0.5,\n",
|
||||
" GenParams.TOP_K: 50,\n",
|
||||
" GenParams.TOP_P: 1,\n",
|
||||
" \"decoding_method\": \"sample\",\n",
|
||||
" \"max_new_tokens\": 100,\n",
|
||||
" \"min_new_tokens\": 1,\n",
|
||||
" \"temperature\": 0.5,\n",
|
||||
" \"top_k\": 50,\n",
|
||||
" \"top_p\": 1,\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
@@ -99,12 +121,15 @@
|
||||
"- To provide context for the API call, you must add `project_id` or `space_id`. For more information see [documentation](https://www.ibm.com/docs/en/watsonx-as-a-service?topic=projects).\n",
|
||||
"- Depending on the region of your provisioned service instance, use one of the urls described [here](https://ibm.github.io/watsonx-ai-python-sdk/setup_cloud.html#authentication).\n",
|
||||
"\n",
|
||||
"In this example, we’ll use the `project_id` and Dallas url."
|
||||
"In this example, we’ll use the `project_id` and Dallas url.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"You need to specify `model_id` that will be used for inferencing. All avaliable models you can find in [documentation](https://ibm.github.io/watsonx-ai-python-sdk/fm_model.html#ibm_watsonx_ai.foundation_models.utils.enums.ModelTypes)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 4,
|
||||
"id": "359898de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -112,7 +137,7 @@
|
||||
"from langchain_ibm import WatsonxLLM\n",
|
||||
"\n",
|
||||
"watsonx_llm = WatsonxLLM(\n",
|
||||
" model_id=\"google/flan-ul2\",\n",
|
||||
" model_id=\"ibm/granite-13b-instruct-v2\",\n",
|
||||
" url=\"https://us-south.ml.cloud.ibm.com\",\n",
|
||||
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
|
||||
" params=parameters,\n",
|
||||
@@ -135,7 +160,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"watsonx_llm = WatsonxLLM(\n",
|
||||
" model_id=\"google/flan-ul2\",\n",
|
||||
" model_id=\"ibm/granite-13b-instruct-v2\",\n",
|
||||
" url=\"PASTE YOUR URL HERE\",\n",
|
||||
" username=\"PASTE YOUR USERNAME HERE\",\n",
|
||||
" password=\"PASTE YOUR PASSWORD HERE\",\n",
|
||||
@@ -180,7 +205,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"id": "c7d80c05",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -201,18 +226,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 10,
|
||||
"id": "dc076c56",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'topic': 'dog',\n",
|
||||
" 'text': 'What is the name of the dog that is the most popular in the world?'}"
|
||||
"{'topic': 'dog', 'text': 'Why do dogs howl?'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -235,17 +259,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"id": "beea2b5b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'dog'"
|
||||
"\"Man's best friend is his dog. \""
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -258,17 +282,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 11,
|
||||
"id": "8ab1a25a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[Generation(text='greyhounds', generation_info={'generated_token_count': 4, 'input_token_count': 8, 'finish_reason': 'eos_token'})], [Generation(text='The Basenji is a dog breed from South Africa.', generation_info={'generated_token_count': 13, 'input_token_count': 7, 'finish_reason': 'eos_token'})]], llm_output={'model_id': 'google/flan-ul2'}, run=[RunInfo(run_id=UUID('03c73a42-db68-428e-ab8d-8ae10abc84fc')), RunInfo(run_id=UUID('c289f67a-87d6-4c8b-a8b7-0b5012c94ca8'))])"
|
||||
"LLMResult(generations=[[Generation(text='The fastest dog in the world is the greyhound, which can run up to 45 miles per hour. This is about the same speed as a human running down a track. Greyhounds are very fast because they have long legs, a streamlined body, and a strong tail. They can run this fast for short distances, but they can also run for long distances, like a marathon. ', generation_info={'finish_reason': 'eos_token'})], [Generation(text='The Beagle is a scent hound, meaning it is bred to hunt by following a trail of scents.', generation_info={'finish_reason': 'eos_token'})]], llm_output={'token_usage': {'generated_token_count': 106, 'input_token_count': 13}, 'model_id': 'ibm/granite-13b-instruct-v2', 'deployment_id': ''}, run=[RunInfo(run_id=UUID('52cb421d-b63f-4c5f-9b04-d4770c664725')), RunInfo(run_id=UUID('df2ea606-1622-4ed7-8d5d-8f6e068b71c4'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -296,7 +320,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"execution_count": 12,
|
||||
"id": "3f63166a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -304,7 +328,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The golden retriever is my favorite dog because it is very friendly and good with children."
|
||||
"My favorite breed of dog is a Labrador Retriever. Labradors are my favorite because they are extremely smart, very friendly, and love to be with people. They are also very playful and love to run around and have a lot of energy. "
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -332,7 +356,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -180,8 +180,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_community.chat_models import ChatJavelinAIGateway\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
|
||||
@@ -8,6 +8,13 @@
|
||||
"Tongyi Qwen is a large-scale language model developed by Alibaba's Damo Academy. It is capable of understanding user intent through natural language understanding and semantic analysis, based on user input in natural language. It provides services and assistance to users in different domains and tasks. By providing clear and detailed instructions, you can obtain results that better align with your expectations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -25,7 +32,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-07-10T19:55:38.553933Z",
|
||||
@@ -34,10 +41,10 @@
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"········\n"
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -50,7 +57,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-07-10T19:55:38.554152Z",
|
||||
@@ -66,7 +73,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-07-10T19:55:39.812664Z",
|
||||
@@ -75,14 +82,57 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms import Tongyi"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Justin Bieber was born on March 1, 1994. The Super Bowl that took place in the same year was Super Bowl XXVIII, which was played on January 30, 1994. The winner of that Super Bowl was the Dallas Cowboys, who defeated the Buffalo Bills with a score of 30-13.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"Tongyi().invoke(\"What NFL team won the Super Bowl in the year Justin Bieber was born?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using in a chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = Tongyi()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-07-10T19:55:39.817327Z",
|
||||
@@ -98,52 +148,36 @@
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = Tongyi()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"The year Justin Bieber was born was 1994. The Denver Broncos won the Super Bowl in 1997, which means they would have been the team that won the Super Bowl during Justin Bieber's birth year. So the answer is the Denver Broncos.\""
|
||||
"'Justin Bieber was born on March 1, 1994. The Super Bowl that took place in the same calendar year was Super Bowl XXVIII, which was played on January 30, 1994. The winner of Super Bowl XXVIII was the Dallas Cowboys, who defeated the Buffalo Bills with a score of 30-13.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
"chain.invoke({\"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -162,9 +196,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -52,8 +52,8 @@
|
||||
"from langchain.agents import AgentType, Tool, initialize_agent\n",
|
||||
"from langchain.memory import ZepMemory\n",
|
||||
"from langchain.retrievers import ZepRetriever\n",
|
||||
"from langchain.schema import AIMessage, HumanMessage\n",
|
||||
"from langchain_community.utilities import WikipediaAPIWrapper\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# Set this to your Zep server URL\n",
|
||||
|
||||
@@ -4,7 +4,7 @@ All functionality related to [Google Cloud Platform](https://cloud.google.com/)
|
||||
|
||||
## Chat models
|
||||
|
||||
### Google AI
|
||||
### Google Generative AI
|
||||
|
||||
Access GoogleAI `Gemini` models such as `gemini-pro` and `gemini-pro-vision` through the `ChatGoogleGenerativeAI` class.
|
||||
|
||||
@@ -25,14 +25,14 @@ llm = ChatGoogleGenerativeAI(model="gemini-pro")
|
||||
llm.invoke("Sing a ballad of LangChain.")
|
||||
```
|
||||
|
||||
Gemini vision model supports image inputs when providing a single chat message. Example:
|
||||
Gemini vision model supports image inputs when providing a single chat message.
|
||||
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
|
||||
llm = ChatGoogleGenerativeAI(model="gemini-pro-vision")
|
||||
# example
|
||||
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{
|
||||
@@ -69,29 +69,27 @@ See a [usage example](/docs/integrations/chat/google_vertex_ai_palm).
|
||||
from langchain_google_vertexai import ChatVertexAI
|
||||
```
|
||||
|
||||
## Document Loaders
|
||||
### Google BigQuery
|
||||
## LLMs
|
||||
|
||||
> [Google BigQuery](https://cloud.google.com/bigquery) is a serverless and cost-effective enterprise data warehouse that works across clouds and scales with your data.
|
||||
`BigQuery` is a part of the `Google Cloud Platform`.
|
||||
### Google Generative AI
|
||||
|
||||
We need to install `google-cloud-bigquery` python package.
|
||||
Access GoogleAI `Gemini` models such as `gemini-pro` and `gemini-pro-vision` through the `GoogleGenerativeAI` class.
|
||||
|
||||
Install python package.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-bigquery
|
||||
pip install langchain-google-genai
|
||||
```
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/google_bigquery).
|
||||
See a [usage example](/docs/integrations/llms/google_ai).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import BigQueryLoader
|
||||
from langchain_google_genai import GoogleGenerativeAI
|
||||
```
|
||||
|
||||
## LLMs
|
||||
|
||||
### Vertex AI
|
||||
|
||||
Access to `Gemini` and `PaLM` LLMs (like `text-bison` and `code-bison`) via `Google Vertex AI`.
|
||||
Access to `Gemini` and `PaLM` LLMs (like `text-bison` and `code-bison`) via `Vertex AI` on Google Cloud.
|
||||
|
||||
We need to install `langchain-google-vertexai` python package.
|
||||
|
||||
@@ -107,7 +105,7 @@ from langchain_google_vertexai import VertexAI
|
||||
|
||||
### Model Garden
|
||||
|
||||
Access PaLM and hundreds of OSS models via `Vertex AI Model Garden`.
|
||||
Access PaLM and hundreds of OSS models via `Vertex AI Model Garden` on Google Cloud.
|
||||
|
||||
We need to install `langchain-google-vertexai` python package.
|
||||
|
||||
@@ -121,71 +119,11 @@ See a [usage example](/docs/integrations/llms/google_vertex_ai_palm#vertex-model
|
||||
from langchain_google_vertexai import VertexAIModelGarden
|
||||
```
|
||||
|
||||
|
||||
### Google Cloud Storage
|
||||
|
||||
>[Google Cloud Storage](https://en.wikipedia.org/wiki/Google_Cloud_Storage) is a managed service for storing unstructured data.
|
||||
|
||||
We need to install `google-cloud-storage` python package.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-storage
|
||||
```
|
||||
|
||||
There are two loaders for the `Google Cloud Storage`: the `Directory` and the `File` loaders.
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_directory).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GCSDirectoryLoader
|
||||
```
|
||||
See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_file).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GCSFileLoader
|
||||
```
|
||||
|
||||
### Google Drive
|
||||
|
||||
>[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) is a file storage and synchronization service developed by Google.
|
||||
|
||||
Currently, only `Google Docs` are supported.
|
||||
|
||||
We need to install several python packages.
|
||||
|
||||
```bash
|
||||
pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_drive).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GoogleDriveLoader
|
||||
```
|
||||
|
||||
### Speech-to-Text
|
||||
|
||||
> [Google Cloud Speech-to-Text](https://cloud.google.com/speech-to-text) is an audio transcription API powered by Google's speech recognition models.
|
||||
|
||||
This document loader transcribes audio files and outputs the text results as Documents.
|
||||
|
||||
First, we need to install the python package.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-speech
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_speech_to_text).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GoogleSpeechToTextLoader
|
||||
```
|
||||
|
||||
## Vector Stores
|
||||
|
||||
### Google Vertex AI Vector Search
|
||||
### Vertex AI Vector Search
|
||||
|
||||
> [Google Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/matching-engine/overview),
|
||||
> [Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/matching-engine/overview) from Google Cloud,
|
||||
> formerly known as `Vertex AI Matching Engine`, provides the industry's leading high-scale
|
||||
> low latency vector database. These vector databases are commonly
|
||||
> referred to as vector similarity-matching or an approximate nearest neighbor (ANN) service.
|
||||
@@ -202,12 +140,12 @@ See a [usage example](/docs/integrations/vectorstores/google_vertex_ai_vector_se
|
||||
from langchain_community.vectorstores import MatchingEngine
|
||||
```
|
||||
|
||||
### Google BigQuery Vector Search
|
||||
### BigQuery
|
||||
|
||||
> [Google BigQuery](https://cloud.google.com/bigquery),
|
||||
> [BigQuery](https://cloud.google.com/bigquery),
|
||||
> BigQuery is a serverless and cost-effective enterprise data warehouse in Google Cloud.
|
||||
>
|
||||
> [Google BigQuery Vector Search](https://cloud.google.com/bigquery/docs/vector-search-intro)
|
||||
> [BigQuery Vector Search](https://cloud.google.com/bigquery/docs/vector-search-intro)
|
||||
> BigQuery vector search lets you use GoogleSQL to do semantic search, using vector indexes for fast but approximate results, or using brute force for exact results.
|
||||
|
||||
> It can calculate Euclidean or Cosine distance. With LangChain, we default to use Euclidean distance.
|
||||
@@ -265,11 +203,10 @@ See a [usage example and authorization instructions](/docs/integrations/retrieve
|
||||
from langchain_googledrive.retrievers import GoogleDriveRetriever
|
||||
```
|
||||
|
||||
|
||||
### Vertex AI Search
|
||||
|
||||
> [Google Cloud Vertex AI Search](https://cloud.google.com/generative-ai-app-builder/docs/introduction)
|
||||
> allows developers to quickly build generative AI powered search engines for customers and employees.
|
||||
> [Vertex AI Search](https://cloud.google.com/generative-ai-app-builder/docs/introduction)
|
||||
> from Google Cloud allows developers to quickly build generative AI powered search engines for customers and employees.
|
||||
|
||||
We need to install the `google-cloud-discoveryengine` python package.
|
||||
|
||||
@@ -284,10 +221,10 @@ from langchain.retrievers import GoogleVertexAISearchRetriever
|
||||
```
|
||||
|
||||
### Document AI Warehouse
|
||||
> [Google Cloud Document AI Warehouse](https://cloud.google.com/document-ai-warehouse)
|
||||
> allows enterprises to search, store, govern, and manage documents and their AI-extracted
|
||||
|
||||
> [Document AI Warehouse](https://cloud.google.com/document-ai-warehouse)
|
||||
> from Google Cloud allows enterprises to search, store, govern, and manage documents and their AI-extracted
|
||||
> data and metadata in a single platform.
|
||||
>
|
||||
|
||||
```python
|
||||
from langchain.retrievers import GoogleDocumentAIWarehouseRetriever
|
||||
@@ -300,11 +237,136 @@ documents = docai_wh_retriever.get_relevant_documents(
|
||||
)
|
||||
```
|
||||
|
||||
## Document Loaders
|
||||
|
||||
### BigQuery
|
||||
|
||||
> [BigQuery](https://cloud.google.com/bigquery) is a serverless and cost-effective enterprise data warehouse that works across clouds and scales with your data in Google Cloud.
|
||||
|
||||
We need to install `google-cloud-bigquery` python package.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-bigquery
|
||||
```
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/google_bigquery).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import BigQueryLoader
|
||||
```
|
||||
|
||||
### Cloud Storage
|
||||
|
||||
>[Cloud Storage](https://en.wikipedia.org/wiki/Google_Cloud_Storage) is a managed service for storing unstructured data in Google Cloud.
|
||||
|
||||
We need to install `google-cloud-storage` python package.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-storage
|
||||
```
|
||||
|
||||
There are two loaders for the `Google Cloud Storage`: the `Directory` and the `File` loaders.
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_directory).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GCSDirectoryLoader
|
||||
```
|
||||
See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_file).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GCSFileLoader
|
||||
```
|
||||
|
||||
### Google Drive
|
||||
|
||||
>[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) is a file storage and synchronization service developed by Google.
|
||||
|
||||
Currently, only `Google Docs` are supported.
|
||||
|
||||
We need to install several python packages.
|
||||
|
||||
```bash
|
||||
pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_drive).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GoogleDriveLoader
|
||||
```
|
||||
|
||||
### Speech-to-Text
|
||||
|
||||
> [Speech-to-Text](https://cloud.google.com/speech-to-text) is an audio transcription API powered by Google's speech recognition models in Google Cloud.
|
||||
|
||||
This document loader transcribes audio files and outputs the text results as Documents.
|
||||
|
||||
First, we need to install the python package.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-speech
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_speech_to_text).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GoogleSpeechToTextLoader
|
||||
```
|
||||
|
||||
## Document Transformers
|
||||
|
||||
### Document AI
|
||||
|
||||
>[Document AI](https://cloud.google.com/document-ai/docs/overview) is a Google Cloud
|
||||
> service that transforms unstructured data from documents into structured data, making it easier
|
||||
> to understand, analyze, and consume.
|
||||
|
||||
We need to set up a [`GCS` bucket and create your own OCR processor](https://cloud.google.com/document-ai/docs/create-processor)
|
||||
The `GCS_OUTPUT_PATH` should be a path to a folder on GCS (starting with `gs://`)
|
||||
and a processor name should look like `projects/PROJECT_NUMBER/locations/LOCATION/processors/PROCESSOR_ID`.
|
||||
We can get it either programmatically or copy from the `Prediction endpoint` section of the `Processor details`
|
||||
tab in the Google Cloud Console.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-documentai
|
||||
pip install google-cloud-documentai-toolbox
|
||||
```
|
||||
|
||||
See a [usage example](/docs/integrations/document_transformers/docai).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders.blob_loaders import Blob
|
||||
from langchain_community.document_loaders.parsers import DocAIParser
|
||||
```
|
||||
|
||||
### Google Translate
|
||||
|
||||
> [Google Translate](https://translate.google.com/) is a multilingual neural machine
|
||||
> translation service developed by Google to translate text, documents and websites
|
||||
> from one language into another.
|
||||
|
||||
The `GoogleTranslateTransformer` allows you to translate text and HTML with the [Google Cloud Translation API](https://cloud.google.com/translate).
|
||||
|
||||
To use it, you should have the `google-cloud-translate` python package installed, and a Google Cloud project with the [Translation API enabled](https://cloud.google.com/translate/docs/setup). This transformer uses the [Advanced edition (v3)](https://cloud.google.com/translate/docs/intro-to-v3).
|
||||
|
||||
First, we need to install the python package.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-translate
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/document_transformers/google_translate).
|
||||
|
||||
```python
|
||||
from langchain_community.document_transformers import GoogleTranslateTransformer
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
### Google Cloud Text-to-Speech
|
||||
### Text-to-Speech
|
||||
|
||||
>[Google Cloud Text-to-Speech](https://cloud.google.com/text-to-speech) enables developers to
|
||||
>[Text-to-Speech](https://cloud.google.com/text-to-speech) is a Google Cloud service that enables developers to
|
||||
> synthesize natural-sounding speech with 100+ voices, available in multiple languages and variants.
|
||||
> It applies DeepMind’s groundbreaking research in WaveNet and Google’s powerful neural networks
|
||||
> to deliver the highest fidelity possible.
|
||||
@@ -321,7 +383,6 @@ See a [usage example and authorization instructions](/docs/integrations/tools/go
|
||||
from langchain.tools import GoogleCloudTextToSpeechTool
|
||||
```
|
||||
|
||||
|
||||
### Google Drive
|
||||
|
||||
We need to install several python packages.
|
||||
@@ -439,55 +500,6 @@ from langchain_community.tools.google_trends import GoogleTrendsQueryRun
|
||||
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
|
||||
```
|
||||
|
||||
|
||||
## Document Transformers
|
||||
|
||||
### Google Document AI
|
||||
|
||||
>[Document AI](https://cloud.google.com/document-ai/docs/overview) is a `Google Cloud Platform`
|
||||
> service that transforms unstructured data from documents into structured data, making it easier
|
||||
> to understand, analyze, and consume.
|
||||
|
||||
We need to set up a [`GCS` bucket and create your own OCR processor](https://cloud.google.com/document-ai/docs/create-processor)
|
||||
The `GCS_OUTPUT_PATH` should be a path to a folder on GCS (starting with `gs://`)
|
||||
and a processor name should look like `projects/PROJECT_NUMBER/locations/LOCATION/processors/PROCESSOR_ID`.
|
||||
We can get it either programmatically or copy from the `Prediction endpoint` section of the `Processor details`
|
||||
tab in the Google Cloud Console.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-documentai
|
||||
pip install google-cloud-documentai-toolbox
|
||||
```
|
||||
|
||||
See a [usage example](/docs/integrations/document_transformers/docai).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders.blob_loaders import Blob
|
||||
from langchain_community.document_loaders.parsers import DocAIParser
|
||||
```
|
||||
|
||||
### Google Translate
|
||||
|
||||
> [Google Translate](https://translate.google.com/) is a multilingual neural machine
|
||||
> translation service developed by Google to translate text, documents and websites
|
||||
> from one language into another.
|
||||
|
||||
The `GoogleTranslateTransformer` allows you to translate text and HTML with the [Google Cloud Translation API](https://cloud.google.com/translate).
|
||||
|
||||
To use it, you should have the `google-cloud-translate` python package installed, and a Google Cloud project with the [Translation API enabled](https://cloud.google.com/translate/docs/setup). This transformer uses the [Advanced edition (v3)](https://cloud.google.com/translate/docs/intro-to-v3).
|
||||
|
||||
First, we need to install the python package.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-translate
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/document_transformers/google_translate).
|
||||
|
||||
```python
|
||||
from langchain_community.document_transformers import GoogleTranslateTransformer
|
||||
```
|
||||
|
||||
## Toolkits
|
||||
|
||||
### GMail
|
||||
@@ -509,9 +521,9 @@ from langchain_community.agent_toolkits import GmailToolkit
|
||||
|
||||
## Memory
|
||||
|
||||
### Cloud Firestore
|
||||
### Firestore
|
||||
|
||||
> [`Cloud Firestore`](https://cloud.google.com/firestore) is a NoSQL document database built for automatic scaling, high performance, and ease of application development.
|
||||
> [`Firestore`](https://cloud.google.com/firestore) is a NoSQL document database built for automatic scaling, high performance, and ease of application development in Google Cloud.
|
||||
|
||||
First, we need to install the python package.
|
||||
|
||||
@@ -556,7 +568,7 @@ See [usage examples and authorization instructions](/docs/integrations/tools/sea
|
||||
from langchain_community.utilities import SearchApiAPIWrapper
|
||||
```
|
||||
|
||||
### SerpAPI
|
||||
### SerpApi
|
||||
|
||||
>[SerpApi](https://serpapi.com/) provides a 3rd-party API to access Google search results.
|
||||
|
||||
@@ -627,4 +639,4 @@ See a [usage example](/docs/integrations/document_loaders/youtube_transcript).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import YoutubeLoader
|
||||
```
|
||||
```
|
||||
|
||||
@@ -11,15 +11,21 @@ LangChain integrates with many providers.
|
||||
|
||||
These providers have standalone `langchain-{provider}` packages for improved versioning, dependency management and testing.
|
||||
|
||||
- [OpenAI](/docs/integrations/platforms/openai)
|
||||
- [AI21](/docs/integrations/providers/ai21)
|
||||
- [Anthropic](/docs/integrations/platforms/anthropic)
|
||||
- [Google](/docs/integrations/platforms/google)
|
||||
- [MistralAI](/docs/integrations/providers/mistralai)
|
||||
- [NVIDIA AI](/docs/integrations/providers/nvidia)
|
||||
- [Together AI](/docs/integrations/providers/together)
|
||||
- [Robocorp](/docs/integrations/providers/robocorp)
|
||||
- [Astra DB](/docs/integrations/providers/astradb)
|
||||
- [Exa Search](/docs/integrations/providers/exa_search)
|
||||
- [Google Generative AI](/docs/integrations/platforms/google)
|
||||
- [Google Vertex AI](/docs/integrations/platforms/google)
|
||||
- [IBM](/docs/integrations/providers/ibm)
|
||||
- [MistralAI](/docs/integrations/providers/mistralai)
|
||||
- [Nomic](/docs/integrations/providers/nomic)
|
||||
- [Nvidia AI Endpoints](/docs/integrations/providers/nvidia)
|
||||
- [Nvidia AI](/docs/integrations/providers/nvidia)
|
||||
- [OpenAI](/docs/integrations/platforms/openai)
|
||||
- [Pinecone](/docs/integrations/providers/pinecone)
|
||||
- [Robocorp](/docs/integrations/providers/robocorp)
|
||||
- [Together AI](/docs/integrations/providers/together)
|
||||
|
||||
|
||||
## Featured Community Providers
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"source": [
|
||||
"from langchain.callbacks import ArthurCallbackHandler\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -27,7 +27,7 @@ Get a [Cohere api key](https://dashboard.cohere.ai/) and set it as an environmen
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatCohere
|
||||
from langchain.schema import HumanMessage
|
||||
from langchain_core.messages import HumanMessage
|
||||
chat = ChatCohere()
|
||||
messages = [HumanMessage(content="knock knock")]
|
||||
print(chat(messages))
|
||||
|
||||
@@ -30,7 +30,7 @@ from langchain.callbacks import FlyteCallbackHandler
|
||||
from langchain.chains import LLMChain
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.schema import HumanMessage
|
||||
from langchain_core.messages import HumanMessage
|
||||
```
|
||||
|
||||
Set up the necessary environment variables to utilize the OpenAI API and Serp API:
|
||||
|
||||
28
docs/docs/integrations/providers/groq.mdx
Normal file
28
docs/docs/integrations/providers/groq.mdx
Normal file
@@ -0,0 +1,28 @@
|
||||
# Groq
|
||||
|
||||
Welcome to Groq! 🚀 At Groq, we've developed the world's first Language Processing Unit™, or LPU. The Groq LPU has a deterministic, single core streaming architecture that sets the standard for GenAI inference speed with predictable and repeatable performance for any given workload.
|
||||
|
||||
Beyond the architecture, our software is designed to empower developers like you with the tools you need to create innovative, powerful AI applications. With Groq as your engine, you can:
|
||||
|
||||
* Achieve uncompromised low latency and performance for real-time AI and HPC inferences 🔥
|
||||
* Know the exact performance and compute time for any given workload 🔮
|
||||
* Take advantage of our cutting-edge technology to stay ahead of the competition 💪
|
||||
|
||||
Want more Groq? Check out our [website](https://groq.com) for more resources and join our [Discord community](https://discord.gg/JvNsBDKeCG) to connect with our developers!
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
Install the integration package:
|
||||
|
||||
```bash
|
||||
pip install langchain-groq
|
||||
```
|
||||
|
||||
Request an [API key](https://wow.groq.com) and set it as an environment variable:
|
||||
|
||||
```bash
|
||||
export GROQ_API_KEY=gsk_...
|
||||
```
|
||||
|
||||
## Chat Model
|
||||
See a [usage example](/docs/integrations/chat/groq).
|
||||
39
docs/docs/integrations/providers/ibm.mdx
Normal file
39
docs/docs/integrations/providers/ibm.mdx
Normal file
@@ -0,0 +1,39 @@
|
||||
# IBM
|
||||
|
||||
The `LangChain` integrations related to [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai) platform.
|
||||
|
||||
IBM® watsonx.ai™ AI studio is part of the IBM [watsonx](https://www.ibm.com/watsonx)™ AI and data platform, bringing together new generative
|
||||
AI capabilities powered by [foundation models](https://www.ibm.com/products/watsonx-ai/foundation-models) and traditional machine learning (ML)
|
||||
into a powerful studio spanning the AI lifecycle. Tune and guide models with your enterprise data to meet your needs with easy-to-use tools for
|
||||
building and refining performant prompts. With watsonx.ai, you can build AI applications in a fraction of the time and with a fraction of the data.
|
||||
Watsonx.ai offers:
|
||||
|
||||
- **Multi-model variety and flexibility:** Choose from IBM-developed, open-source and third-party models, or build your own model.
|
||||
- **Differentiated client protection:** IBM stands behind IBM-developed models and indemnifies the client against third-party IP claims.
|
||||
- **End-to-end AI governance:** Enterprises can scale and accelerate the impact of AI with trusted data across the business, using data wherever it resides.
|
||||
- **Hybrid, multi-cloud deployments:** IBM provides the flexibility to integrate and deploy your AI workloads into your hybrid-cloud stack of choice.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Install the integration package with
|
||||
```bash
|
||||
pip install -qU langchain-ibm
|
||||
```
|
||||
|
||||
Get an IBM watsonx.ai api key and set it as an environment variable (`WATSONX_APIKEY`)
|
||||
```python
|
||||
import os
|
||||
|
||||
os.environ["WATSONX_APIKEY"] = "your IBM watsonx.ai api key"
|
||||
```
|
||||
|
||||
## LLMs
|
||||
|
||||
### WatsonxLLM
|
||||
|
||||
See a [usage example](/docs/integrations/llms/ibm_watsonx).
|
||||
|
||||
```python
|
||||
from langchain_ibm import WatsonxLLM
|
||||
```
|
||||
@@ -66,7 +66,7 @@ print(embeddings.embed_documents(["hello"]))
|
||||
## Chat Example
|
||||
```python
|
||||
from langchain_community.chat_models import ChatJavelinAIGateway
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
messages = [
|
||||
SystemMessage(
|
||||
|
||||
28
docs/docs/integrations/providers/kinetica.mdx
Normal file
28
docs/docs/integrations/providers/kinetica.mdx
Normal file
@@ -0,0 +1,28 @@
|
||||
# Kinetica
|
||||
|
||||
[Kinetica](https://www.kinetica.com/) is a real-time database purpose built for enabling
|
||||
analytics and generative AI on time-series & spatial data.
|
||||
|
||||
## Chat Model
|
||||
|
||||
The Kinetica LLM wrapper uses the [Kinetica SqlAssist
|
||||
LLM](https://docs.kinetica.com/7.2/sql-gpt/concepts/) to transform natural language into
|
||||
SQL to simplify the process of data retrieval.
|
||||
|
||||
See [Kinetica SqlAssist LLM Demo](/docs/integrations/chat/kinetica) for usage.
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models.kinetica import ChatKinetica
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
The Kinetca vectorstore wrapper leverages Kinetica's native support for [vector
|
||||
similarity search](https://docs.kinetica.com/7.2/vector_search/).
|
||||
|
||||
See [Kinetica Vectorsore API](/docs/integrations/vectorstores/kinetica) for usage.
|
||||
|
||||
```python
|
||||
from langchain_community.vectorstores import Kinetica
|
||||
```
|
||||
|
||||
@@ -55,7 +55,7 @@ See a usage [example](/docs/integrations/chat/konko).
|
||||
- **ChatCompletion with Mistral-7B:**
|
||||
|
||||
```python
|
||||
from langchain.schema import HumanMessage
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_community.chat_models import ChatKonko
|
||||
chat_instance = ChatKonko(max_tokens=10, model = 'mistralai/mistral-7b-instruct-v0.1')
|
||||
msg = HumanMessage(content="Hi")
|
||||
|
||||
@@ -18,7 +18,7 @@ Integration with log10 is a simple one-line `log10_callback` integration as show
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.schema import HumanMessage
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
from log10.langchain import Log10Callback
|
||||
from log10.llm import Log10Config
|
||||
@@ -43,7 +43,7 @@ llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback])
|
||||
from langchain_openai import OpenAI
|
||||
from langchain_community.chat_models import ChatAnthropic
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.schema import HumanMessage
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
from log10.langchain import Log10Callback
|
||||
from log10.llm import Log10Config
|
||||
|
||||
@@ -100,7 +100,7 @@ print(embeddings.embed_documents(["hello"]))
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatMlflow
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
chat = ChatMlflow(
|
||||
target_uri="http://127.0.0.1:5000",
|
||||
|
||||
@@ -113,7 +113,7 @@ print(embeddings.embed_documents(["hello"]))
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatMLflowAIGateway
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
chat = ChatMLflowAIGateway(
|
||||
gateway_uri="http://127.0.0.1:5000",
|
||||
|
||||
@@ -18,7 +18,7 @@ There exists a wrapper around Pinecone indexes, allowing you to use it as a vect
|
||||
whether for semantic search or example selection.
|
||||
|
||||
```python
|
||||
from langchain_community.vectorstores import Pinecone
|
||||
from langchain_pinecone import PineconeVectorStore
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Pinecone vectorstore, see [this notebook](/docs/integrations/vectorstores/pinecone)
|
||||
|
||||
@@ -276,7 +276,7 @@
|
||||
"from langchain.chains.openai_functions import (\n",
|
||||
" create_structured_output_chain,\n",
|
||||
")\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"retriever = BM25Retriever.from_documents(\n",
|
||||
" [\n",
|
||||
|
||||
@@ -34,8 +34,8 @@
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"from langchain.retrievers import MultiVectorRetriever\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.stores import BaseStore\n",
|
||||
"from langchain_core.vectorstores import VectorStore\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
@@ -194,7 +194,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
|
||||
@@ -119,13 +119,8 @@
|
||||
"import pinecone\n",
|
||||
"\n",
|
||||
"api_key = os.getenv(\"PINECONE_API_KEY\") or \"PINECONE_API_KEY\"\n",
|
||||
"# find environment next to your API key in the Pinecone console\n",
|
||||
"env = os.getenv(\"PINECONE_ENVIRONMENT\") or \"PINECONE_ENVIRONMENT\"\n",
|
||||
"\n",
|
||||
"index_name = \"langchain-pinecone-hybrid-search\"\n",
|
||||
"\n",
|
||||
"pinecone.init(api_key=api_key, environment=env)\n",
|
||||
"pinecone.whoami()"
|
||||
"index_name = \"langchain-pinecone-hybrid-search\""
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -83,8 +83,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import DeepLake\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
|
||||
@@ -84,8 +84,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain.vectorstores import AstraDB\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
|
||||
@@ -87,8 +87,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
|
||||
@@ -92,9 +92,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.embeddings import DashScopeEmbeddings\n",
|
||||
"from langchain_community.vectorstores import DashVector\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"embeddings = DashScopeEmbeddings()\n",
|
||||
"\n",
|
||||
|
||||
@@ -60,8 +60,8 @@
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import ElasticsearchStore\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
|
||||
@@ -67,8 +67,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import Milvus\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
|
||||
@@ -57,8 +57,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import MongoDBAtlasVectorSearch\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from pymongo import MongoClient\n",
|
||||
"\n",
|
||||
|
||||
@@ -78,8 +78,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import MyScale\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user