mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-26 16:43:35 +00:00
docs: migrate integrations using langchain-cli (#21929)
Migrate integration docs
This commit is contained in:
parent
c98bd8505f
commit
8ed2ba9301
@ -29,6 +29,7 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import comet_llm\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"LANGCHAIN_COMET_TRACING\"] = \"true\"\n",
|
||||
"\n",
|
||||
@ -40,8 +41,7 @@
|
||||
"# here we are configuring the comet project\n",
|
||||
"os.environ[\"COMET_PROJECT_NAME\"] = \"comet-example-langchain-tracing\"\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -114,10 +114,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"token = os.environ[\"CONTEXT_API_TOKEN\"]\n",
|
||||
|
@ -94,9 +94,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
")\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat_llm = ChatOpenAI(\n",
|
||||
|
@ -127,7 +127,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import get_openai_callback"
|
||||
"from langchain_community.callbacks import get_openai_callback"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -161,8 +161,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -86,7 +86,13 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='为你找到关于coze的信息如下:\n\nCoze是一个由字节跳动推出的AI聊天机器人和应用程序编辑开发平台。\n\n用户无论是否有编程经验,都可以通过该平台快速创建各种类型的聊天机器人、智能体、AI应用和插件,并将其部署在社交平台和即时聊天应用程序中。\n\n国际版使用的模型比国内版更强大。')"
|
||||
"AIMessage(content='为你找到关于coze的信息如下:\n",
|
||||
"\n",
|
||||
"Coze是一个由字节跳动推出的AI聊天机器人和应用程序编辑开发平台。\n",
|
||||
"\n",
|
||||
"用户无论是否有编程经验,都可以通过该平台快速创建各种类型的聊天机器人、智能体、AI应用和插件,并将其部署在社交平台和即时聊天应用程序中。\n",
|
||||
"\n",
|
||||
"国际版使用的模型比国内版更强大。')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@ -173,8 +179,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -67,7 +67,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -133,8 +133,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -126,8 +126,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
@ -184,8 +184,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.chat_models import ChatEverlyAI\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
|
@ -143,8 +143,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -94,8 +94,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -122,8 +122,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -173,8 +173,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import OnlinePDFLoader\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders import OnlinePDFLoader\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"# Loading the COMVEST 2024 notice\n",
|
||||
"loader = OnlinePDFLoader(\n",
|
||||
@ -202,7 +202,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import BM25Retriever\n",
|
||||
"from langchain_community.retrievers import BM25Retriever\n",
|
||||
"\n",
|
||||
"retriever = BM25Retriever.from_documents(texts)"
|
||||
]
|
||||
|
@ -71,8 +71,7 @@
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -66,10 +66,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models.mlx import ChatMLX\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
|
@ -78,8 +78,7 @@
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -34,7 +34,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "AIMessage(content='Hello! How can I help you today?')"
|
||||
"text/plain": [
|
||||
"AIMessage(content='Hello! How can I help you today?')"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
|
@ -2,17 +2,17 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Yuan2.0\n",
|
||||
"---"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%% raw\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Yuan2.0\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -216,7 +216,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"chat = ChatYuan2(\n",
|
||||
" yuan2_api_base=\"http://127.0.0.1:8001/v1\",\n",
|
||||
|
@ -199,28 +199,32 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Using With Functions Call\n",
|
||||
"\n",
|
||||
"GLM-4 Model can be used with the function call as well,use the following code to run a simple LangChain json_chat_agent."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"TAVILY_API_KEY\"] = \"tavily_api_key\""
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"execution_count": null
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
@ -235,22 +239,18 @@
|
||||
"agent_executor = AgentExecutor(\n",
|
||||
" agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"execution_count": null
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.invoke({\"input\": \"what is LangChain?\"})"
|
||||
],
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"execution_count": null
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor.invoke({\"input\": \"what is LangChain?\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
@ -135,8 +135,8 @@
|
||||
"source": [
|
||||
"from pprint import pprint\n",
|
||||
"\n",
|
||||
"from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.utils.function_calling import convert_pydantic_to_openai_function\n",
|
||||
"\n",
|
||||
"openai_function_def = convert_pydantic_to_openai_function(Calculator)\n",
|
||||
"pprint(openai_function_def)"
|
||||
@ -149,7 +149,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser\n",
|
||||
"from langchain_core.output_parsers.openai_functions import PydanticOutputFunctionsParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
@ -166,7 +166,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
|
@ -149,7 +149,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
|
@ -151,7 +151,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
|
@ -156,7 +156,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
|
@ -152,7 +152,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
|
@ -149,7 +149,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
|
@ -152,7 +152,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
|
@ -153,7 +153,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_record(record, id):\n",
|
||||
|
@ -101,8 +101,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.indexes import VectorstoreIndexCreator\n",
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_community.document_loaders import ApifyDatasetLoader"
|
||||
"from langchain_community.document_loaders import ApifyDatasetLoader\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -208,8 +208,7 @@
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -80,21 +80,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-01-08T12:41:22.643335Z",
|
||||
"start_time": "2024-01-08T12:40:57.759116Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"ASTRA_DB_API_ENDPOINT = input(\"ASTRA_DB_API_ENDPOINT = \")\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN = getpass(\"ASTRA_DB_APPLICATION_TOKEN = \")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-01-08T12:41:22.643335Z",
|
||||
"start_time": "2024-01-08T12:40:57.759116Z"
|
||||
}
|
||||
},
|
||||
"execution_count": 4
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@ -118,18 +118,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
],
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-01-08T12:42:30.236489Z",
|
||||
"start_time": "2024-01-08T12:42:29.612133Z"
|
||||
}
|
||||
},
|
||||
"execution_count": 7
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@ -143,7 +143,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "Document(page_content='{\"_id\": \"659bdffa16cbc4586b11a423\", \"title\": \"Dangerous Men\", \"reviewtext\": \"\\\\\"Dangerous Men,\\\\\" the picture\\'s production notes inform, took 26 years to reach the big screen. After having seen it, I wonder: What was the rush?\"}', metadata={'namespace': 'default_keyspace', 'api_endpoint': 'https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com', 'collection': 'movie_reviews'})"
|
||||
"text/plain": [
|
||||
"Document(page_content='{\"_id\": \"659bdffa16cbc4586b11a423\", \"title\": \"Dangerous Men\", \"reviewtext\": \"\\\\\"Dangerous Men,\\\\\" the picture\\'s production notes inform, took 26 years to reach the big screen. After having seen it, I wonder: What was the rush?\"}', metadata={'namespace': 'default_keyspace', 'api_endpoint': 'https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com', 'collection': 'movie_reviews'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
|
@ -104,37 +104,37 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "91a7ac07",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configuring the AWS Boto3 client\n",
|
||||
"You can configure the AWS [Boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) client by passing\n",
|
||||
"named arguments when creating the S3DirectoryLoader.\n",
|
||||
"This is useful for instance when AWS credentials can't be set as environment variables.\n",
|
||||
"See the [list of parameters](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session) that can be configured."
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "91a7ac07"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f485ec8c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = S3DirectoryLoader(\n",
|
||||
" \"testing-hwc\", aws_access_key_id=\"xxxx\", aws_secret_access_key=\"yyyy\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "f485ec8c"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c0fa76ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "c0fa76ae"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
@ -80,24 +80,24 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "43106ee8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = S3FileLoader(\n",
|
||||
" \"testing-hwc\", \"fake.docx\", aws_access_key_id=\"xxxx\", aws_secret_access_key=\"yyyy\"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "43106ee8"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1764a727",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "1764a727"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
@ -96,8 +96,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -17,7 +17,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document"
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -145,8 +145,7 @@
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -76,8 +76,7 @@
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -198,8 +198,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.0"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -13,14 +13,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Initializing the lakeFS loader\n",
|
||||
"\n",
|
||||
"Replace `ENDPOINT`, `LAKEFS_ACCESS_KEY`, and `LAKEFS_SECRET_KEY` values with your own."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@ -50,15 +50,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Specifying a path\n",
|
||||
"You can specify a prefix or a complete object path to control which files to load.\n",
|
||||
"\n",
|
||||
"Specify the repository, reference (branch, commit id, or tag), and path in the corresponding `REPO`, `REF`, and `PATH` to load the documents from:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
@ -103,7 +103,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "Document(page_content='Abstract\\nWe study how to apply large language models to write grounded and organized long-form articles from scratch, with comparable breadth and depth to Wikipedia pages.\\nThis underexplored problem poses new challenges at the pre-writing stage, including how to research the topic and prepare an outline prior to writing.\\nWe propose STORM, a writing system for the Synthesis of Topic Outlines through\\nReferences\\nFull-length Article\\nTopic\\nOutline\\n2022 Winter Olympics\\nOpening Ceremony\\nResearch via Question Asking\\nRetrieval and Multi-perspective Question Asking.\\nSTORM models the pre-writing stage by\\nLLM\\n(1) discovering diverse perspectives in researching the given topic, (2) simulating conversations where writers carrying different perspectives pose questions to a topic expert grounded on trusted Internet sources, (3) curating the collected information to create an outline.\\nFor evaluation, we curate FreshWiki, a dataset of recent high-quality Wikipedia articles, and formulate outline assessments to evaluate the pre-writing stage.\\nWe further gather feedback from experienced Wikipedia editors.\\nCompared to articles generated by an outlinedriven retrieval-augmented baseline, more of STORM’s articles are deemed to be organized (by a 25% absolute increase) and broad in coverage (by 10%).\\nThe expert feedback also helps identify new challenges for generating grounded long articles, such as source bias transfer and over-association of unrelated facts.\\n1. Can you provide any information about the transportation arrangements for the opening ceremony?\\nLLM\\n2. Can you provide any information about the budget for the 2022 Winter Olympics opening ceremony?…\\nLLM- Role1\\nLLM- Role2\\nLLM- Role1', metadata={'source': 'https://arxiv.org/pdf/2402.14207.pdf', 'section_number': 1, 'section_title': 'Abstract'})"
|
||||
"text/plain": [
|
||||
"Document(page_content='Abstract\\nWe study how to apply large language models to write grounded and organized long-form articles from scratch, with comparable breadth and depth to Wikipedia pages.\\nThis underexplored problem poses new challenges at the pre-writing stage, including how to research the topic and prepare an outline prior to writing.\\nWe propose STORM, a writing system for the Synthesis of Topic Outlines through\\nReferences\\nFull-length Article\\nTopic\\nOutline\\n2022 Winter Olympics\\nOpening Ceremony\\nResearch via Question Asking\\nRetrieval and Multi-perspective Question Asking.\\nSTORM models the pre-writing stage by\\nLLM\\n(1) discovering diverse perspectives in researching the given topic, (2) simulating conversations where writers carrying different perspectives pose questions to a topic expert grounded on trusted Internet sources, (3) curating the collected information to create an outline.\\nFor evaluation, we curate FreshWiki, a dataset of recent high-quality Wikipedia articles, and formulate outline assessments to evaluate the pre-writing stage.\\nWe further gather feedback from experienced Wikipedia editors.\\nCompared to articles generated by an outlinedriven retrieval-augmented baseline, more of STORM’s articles are deemed to be organized (by a 25% absolute increase) and broad in coverage (by 10%).\\nThe expert feedback also helps identify new challenges for generating grounded long articles, such as source bias transfer and over-association of unrelated facts.\\n1. Can you provide any information about the transportation arrangements for the opening ceremony?\\nLLM\\n2. Can you provide any information about the budget for the 2022 Winter Olympics opening ceremony?…\\nLLM- Role1\\nLLM- Role2\\nLLM- Role1', metadata={'source': 'https://arxiv.org/pdf/2402.14207.pdf', 'section_number': 1, 'section_title': 'Abstract'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
@ -128,7 +130,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "79"
|
||||
"text/plain": [
|
||||
"79"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
@ -188,7 +192,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "Document(page_content='Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models\\nStanford University {shaoyj, yuchengj, tkanell, peterxu, okhattab}@stanford.edu lam@cs.stanford.edu', metadata={'source': 'https://arxiv.org/pdf/2402.14207.pdf', 'chunk_number': 1, 'chunk_type': 'para'})"
|
||||
"text/plain": [
|
||||
"Document(page_content='Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models\\nStanford University {shaoyj, yuchengj, tkanell, peterxu, okhattab}@stanford.edu lam@cs.stanford.edu', metadata={'source': 'https://arxiv.org/pdf/2402.14207.pdf', 'chunk_number': 1, 'chunk_type': 'para'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
@ -213,7 +219,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "306"
|
||||
"text/plain": [
|
||||
"306"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
@ -273,7 +281,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'<html><h1>Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models</h1><table><th><td colSpan=1>Yijia Shao</td><td colSpan=1>Yucheng Jiang</td><td colSpan=1>Theodore A. Kanell</td><td colSpan=1>Peter Xu</td></th><tr><td colSpan=1></td><td colSpan=1>Omar Khattab</td><td colSpan=1>Monica S. Lam</td><td colSpan=1></td></tr></table><p>Stanford University {shaoyj, yuchengj, '"
|
||||
"text/plain": [
|
||||
"'<html><h1>Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models</h1><table><th><td colSpan=1>Yijia Shao</td><td colSpan=1>Yucheng Jiang</td><td colSpan=1>Theodore A. Kanell</td><td colSpan=1>Peter Xu</td></th><tr><td colSpan=1></td><td colSpan=1>Omar Khattab</td><td colSpan=1>Monica S. Lam</td><td colSpan=1></td></tr></table><p>Stanford University {shaoyj, yuchengj, '"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
@ -298,7 +308,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "1"
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
@ -358,7 +370,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models\\n | Yijia Shao | Yucheng Jiang | Theodore A. Kanell | Peter Xu\\n | --- | --- | --- | ---\\n | | Omar Khattab | Monica S. Lam | \\n\\nStanford University {shaoyj, yuchengj, tkanell, peterxu, okhattab}@stanford.edu lam@cs.stanford.edu\\nAbstract\\nWe study how to apply large language models to write grounded and organized long'"
|
||||
"text/plain": [
|
||||
"'Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models\\n | Yijia Shao | Yucheng Jiang | Theodore A. Kanell | Peter Xu\\n | --- | --- | --- | ---\\n | | Omar Khattab | Monica S. Lam | \\n\\nStanford University {shaoyj, yuchengj, tkanell, peterxu, okhattab}@stanford.edu lam@cs.stanford.edu\\nAbstract\\nWe study how to apply large language models to write grounded and organized long'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
@ -383,7 +397,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "1"
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
|
@ -121,8 +121,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -81,13 +81,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Use nlp=True to run nlp analysis and generate keywords + summary"
|
||||
],
|
||||
"id": "98ac26c488315bff",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "98ac26c488315bff"
|
||||
"source": [
|
||||
"Use nlp=True to run nlp analysis and generate keywords + summary"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@ -120,10 +120,34 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ae37e004e0284b1d",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-08-02T21:18:19.585758200Z",
|
||||
"start_time": "2023-08-02T21:18:19.585758200Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "['powell',\n 'know',\n 'donald',\n 'trump',\n 'review',\n 'indictment',\n 'telling',\n 'view',\n 'reasonable',\n 'person',\n 'testimony',\n 'coconspirators',\n 'riot',\n 'representatives',\n 'claims']"
|
||||
"text/plain": [
|
||||
"['powell',\n",
|
||||
" 'know',\n",
|
||||
" 'donald',\n",
|
||||
" 'trump',\n",
|
||||
" 'review',\n",
|
||||
" 'indictment',\n",
|
||||
" 'telling',\n",
|
||||
" 'view',\n",
|
||||
" 'reasonable',\n",
|
||||
" 'person',\n",
|
||||
" 'testimony',\n",
|
||||
" 'coconspirators',\n",
|
||||
" 'riot',\n",
|
||||
" 'representatives',\n",
|
||||
" 'claims']"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
@ -132,23 +156,25 @@
|
||||
],
|
||||
"source": [
|
||||
"data[0].metadata[\"keywords\"]"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-08-02T21:18:19.585758200Z",
|
||||
"start_time": "2023-08-02T21:18:19.585758200Z"
|
||||
}
|
||||
},
|
||||
"id": "ae37e004e0284b1d"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7676155fb175e53e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-08-02T21:18:19.598966800Z",
|
||||
"start_time": "2023-08-02T21:18:19.594950200Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'In testimony to the congressional committee examining the 6 January riot, Mrs Powell said she did not review all of the many claims of election fraud she made, telling them that \"no reasonable person\" would view her claims as fact.\\nNeither she nor her representatives have commented.'"
|
||||
"text/plain": [
|
||||
"'In testimony to the congressional committee examining the 6 January riot, Mrs Powell said she did not review all of the many claims of election fraud she made, telling them that \"no reasonable person\" would view her claims as fact.\\nNeither she nor her representatives have commented.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
@ -157,15 +183,7 @@
|
||||
],
|
||||
"source": [
|
||||
"data[0].metadata[\"summary\"]"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-08-02T21:18:19.598966800Z",
|
||||
"start_time": "2023-08-02T21:18:19.594950200Z"
|
||||
}
|
||||
},
|
||||
"id": "7676155fb175e53e"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
@ -32,7 +32,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
||||
"from langchain_community.document_loaders import CSVLoader\n",
|
||||
"\n",
|
||||
"loader = CSVLoader(\"data/corp_sens_data.csv\")\n",
|
||||
"documents = loader.load()\n",
|
||||
@ -52,8 +52,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
||||
"from langchain_community.document_loaders import PebbloSafeLoader\n",
|
||||
"from langchain_community.document_loaders import CSVLoader, PebbloSafeLoader\n",
|
||||
"\n",
|
||||
"loader = PebbloSafeLoader(\n",
|
||||
" CSVLoader(\"data/corp_sens_data.csv\"),\n",
|
||||
@ -80,8 +79,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
||||
"from langchain_community.document_loaders import PebbloSafeLoader\n",
|
||||
"from langchain_community.document_loaders import CSVLoader, PebbloSafeLoader\n",
|
||||
"\n",
|
||||
"loader = PebbloSafeLoader(\n",
|
||||
" CSVLoader(\"data/corp_sens_data.csv\"),\n",
|
||||
@ -109,8 +107,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
||||
"from langchain_community.document_loaders import PebbloSafeLoader\n",
|
||||
"from langchain_community.document_loaders import CSVLoader, PebbloSafeLoader\n",
|
||||
"\n",
|
||||
"loader = PebbloSafeLoader(\n",
|
||||
" CSVLoader(\"data/corp_sens_data.csv\"),\n",
|
||||
|
@ -239,8 +239,7 @@
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -296,8 +296,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -66,7 +66,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_huggingface import HuggingFaceEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
|
@ -646,8 +646,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
|
||||
"from langchain_google_vertexai import VertexAI\n",
|
||||
|
@ -12,8 +12,7 @@
|
||||
">\n",
|
||||
">[Cypher](https://en.wikipedia.org/wiki/Cypher_(query_language)) is a declarative graph query language that allows for expressive and efficient data querying in a property graph.\n",
|
||||
">\n",
|
||||
">[openCypher](https://opencypher.org/) is an open-source implementation of Cypher.",
|
||||
"# Neptune Open Cypher QA Chain\n",
|
||||
">[openCypher](https://opencypher.org/) is an open-source implementation of Cypher.# Neptune Open Cypher QA Chain\n",
|
||||
"This QA chain queries Amazon Neptune using openCypher and returns human readable response\n",
|
||||
"\n",
|
||||
"LangChain supports both [Neptune Database](https://docs.aws.amazon.com/neptune/latest/userguide/intro.html) and [Neptune Analytics](https://docs.aws.amazon.com/neptune-analytics/latest/userguide/what-is-neptune-analytics.html) with `NeptuneOpenCypherQAChain` \n",
|
||||
|
@ -66,9 +66,9 @@
|
||||
"source": [
|
||||
"import nest_asyncio\n",
|
||||
"from langchain.chains.graph_qa.gremlin import GremlinQAChain\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.graphs import GremlinGraph\n",
|
||||
"from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import AzureChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
@ -54,11 +54,11 @@
|
||||
"execution_count": 5,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-03-05T20:58:44.465443Z",
|
||||
"start_time": "2024-03-05T20:58:42.399724Z"
|
||||
}
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@ -83,16 +83,18 @@
|
||||
"execution_count": 6,
|
||||
"id": "98f70927a87e4745",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-03-05T20:58:45.859265Z",
|
||||
"start_time": "2024-03-05T20:58:44.466637Z"
|
||||
}
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'\\nLangChain is a (database)\\nLangChain is a database for storing and processing documents'"
|
||||
"text/plain": [
|
||||
"'\\nLangChain is a (database)\\nLangChain is a database for storing and processing documents'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
@ -118,6 +120,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9965c10269159ed1",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"# AI21 Contextual Answer\n",
|
||||
"\n",
|
||||
@ -126,14 +132,19 @@
|
||||
"\n",
|
||||
"This means that if the answer to your question is not in the document,\n",
|
||||
"the model will indicate it (instead of providing a false answer)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "9965c10269159ed1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "411adf42eab80829",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-03-05T20:59:00.943426Z",
|
||||
"start_time": "2024-03-05T20:59:00.263497Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21ContextualAnswers\n",
|
||||
@ -141,29 +152,29 @@
|
||||
"tsm = AI21ContextualAnswers()\n",
|
||||
"\n",
|
||||
"response = tsm.invoke(input={\"context\": \"Your context\", \"question\": \"Your question\"})"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-03-05T20:59:00.943426Z",
|
||||
"start_time": "2024-03-05T20:59:00.263497Z"
|
||||
}
|
||||
},
|
||||
"id": "411adf42eab80829",
|
||||
"execution_count": 9
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You can also use it with chains and output parsers and vector DBs"
|
||||
],
|
||||
"id": "af59ffdbf4964875",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "af59ffdbf4964875"
|
||||
"source": [
|
||||
"You can also use it with chains and output parsers and vector DBs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "bc63830f921b4ac9",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-03-05T20:59:07.719225Z",
|
||||
"start_time": "2024-03-05T20:59:07.102950Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21ContextualAnswers\n",
|
||||
@ -175,16 +186,7 @@
|
||||
"response = chain.invoke(\n",
|
||||
" {\"context\": \"Your context\", \"question\": \"Your question\"},\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-03-05T20:59:07.719225Z",
|
||||
"start_time": "2024-03-05T20:59:07.102950Z"
|
||||
}
|
||||
},
|
||||
"id": "bc63830f921b4ac9",
|
||||
"execution_count": 10
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
@ -149,7 +149,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "\" Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\""
|
||||
"text/plain": [
|
||||
"\" Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
@ -179,7 +181,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001B[1mAzureOpenAI\u001B[0m\n",
|
||||
"\u001b[1mAzureOpenAI\u001b[0m\n",
|
||||
"Params: {'deployment_name': 'gpt-35-turbo-instruct-0914', 'model_name': 'gpt-3.5-turbo-instruct', 'temperature': 0.7, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'logit_bias': {}, 'max_tokens': 256}\n"
|
||||
]
|
||||
}
|
||||
|
@ -254,7 +254,6 @@
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "6fa70026b407ae751a5c9e6bd7f7d482379da8ad616f98512780b705c84ee157"
|
||||
|
@ -83,8 +83,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.llms import Bedrock\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = Bedrock(\n",
|
||||
" credentials_profile_name=\"bedrock-admin\",\n",
|
||||
|
@ -118,8 +118,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import Tool\n",
|
||||
"from langchain_community.utilities import GoogleSearchAPIWrapper\n",
|
||||
"from langchain_core.tools import Tool\n",
|
||||
"\n",
|
||||
"search = GoogleSearchAPIWrapper()\n",
|
||||
"\n",
|
||||
|
@ -41,8 +41,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.schema.messages import AIMessage\n",
|
||||
"from langchain_community.llms.chatglm3 import ChatGLM3\n",
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
|
@ -119,8 +119,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -79,7 +79,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = CTransformers(\n",
|
||||
" model=\"marella/gpt-2-ggml\", callbacks=[StreamingStdOutCallbackHandler()]\n",
|
||||
@ -120,8 +120,7 @@
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -229,7 +229,6 @@
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "d1d3a3c58a58885896c5459933a599607cdbb9917d7e1ad7516c8786c51f2dd2"
|
||||
|
@ -298,8 +298,7 @@
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
},
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping a cluster driver proxy app\n",
|
||||
"\n",
|
||||
|
@ -189,8 +189,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.llms import EdenAI\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = EdenAI(\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
|
@ -47,9 +47,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_community.llms import GPT4All\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
|
@ -192,7 +192,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_huggingface import HuggingFaceEndpoint\n",
|
||||
"\n",
|
||||
"llm = HuggingFaceEndpoint(\n",
|
||||
|
@ -63,7 +63,7 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import requests\n",
|
||||
"from langchain.tools import tool\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"HF_TOKEN = os.environ.get(\"HUGGINGFACE_API_KEY\")\n",
|
||||
"\n",
|
||||
|
@ -46,7 +46,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.cache import InMemoryCache\n",
|
||||
"from langchain_community.cache import InMemoryCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(InMemoryCache())"
|
||||
]
|
||||
@ -141,7 +141,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We can do the same thing with a SQLite cache\n",
|
||||
"from langchain.cache import SQLiteCache\n",
|
||||
"from langchain_community.cache import SQLiteCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(SQLiteCache(database_path=\".langchain.db\"))"
|
||||
]
|
||||
@ -235,7 +235,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import langchain\n",
|
||||
"from langchain.cache import UpstashRedisCache\n",
|
||||
"from langchain_community.cache import UpstashRedisCache\n",
|
||||
"from upstash_redis import Redis\n",
|
||||
"\n",
|
||||
"URL = \"<UPSTASH_REDIS_REST_URL>\"\n",
|
||||
@ -335,7 +335,7 @@
|
||||
"source": [
|
||||
"# We can do the same thing with a Redis cache\n",
|
||||
"# (make sure your local Redis instance is running first before running this example)\n",
|
||||
"from langchain.cache import RedisCache\n",
|
||||
"from langchain_community.cache import RedisCache\n",
|
||||
"from redis import Redis\n",
|
||||
"\n",
|
||||
"set_llm_cache(RedisCache(redis_=Redis()))"
|
||||
@ -419,7 +419,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.cache import RedisSemanticCache\n",
|
||||
"from langchain_community.cache import RedisSemanticCache\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"set_llm_cache(\n",
|
||||
@ -517,7 +517,7 @@
|
||||
"from gptcache import Cache\n",
|
||||
"from gptcache.manager.factory import manager_factory\n",
|
||||
"from gptcache.processor.pre import get_prompt\n",
|
||||
"from langchain.cache import GPTCache\n",
|
||||
"from langchain_community.cache import GPTCache\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_hashed_name(name):\n",
|
||||
@ -616,7 +616,7 @@
|
||||
"\n",
|
||||
"from gptcache import Cache\n",
|
||||
"from gptcache.adapter.api import init_similar_cache\n",
|
||||
"from langchain.cache import GPTCache\n",
|
||||
"from langchain_community.cache import GPTCache\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_hashed_name(name):\n",
|
||||
@ -765,7 +765,7 @@
|
||||
"source": [
|
||||
"from datetime import timedelta\n",
|
||||
"\n",
|
||||
"from langchain.cache import MomentoCache\n",
|
||||
"from langchain_community.cache import MomentoCache\n",
|
||||
"\n",
|
||||
"cache_name = \"langchain\"\n",
|
||||
"ttl = timedelta(days=1)\n",
|
||||
@ -879,7 +879,7 @@
|
||||
"source": [
|
||||
"# You can define your own declarative SQLAlchemyCache child class to customize the schema used for caching. For example, to support high-speed fulltext prompt indexing with Postgres, use:\n",
|
||||
"\n",
|
||||
"from langchain.cache import SQLAlchemyCache\n",
|
||||
"from langchain_community.cache import SQLAlchemyCache\n",
|
||||
"from sqlalchemy import Column, Computed, Index, Integer, Sequence, String, create_engine\n",
|
||||
"from sqlalchemy.ext.declarative import declarative_base\n",
|
||||
"from sqlalchemy_utils import TSVectorType\n",
|
||||
@ -1296,8 +1296,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.cache import AstraDBCache\n",
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_astradb import AstraDBCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(\n",
|
||||
" AstraDBCache(\n",
|
||||
@ -1384,7 +1384,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.cache import AstraDBSemanticCache\n",
|
||||
"from langchain_astradb import AstraDBSemanticCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(\n",
|
||||
" AstraDBSemanticCache(\n",
|
||||
@ -1868,7 +1868,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"docs = [Document(page_content=t) for t in texts[:3]]\n",
|
||||
"from langchain.chains.summarize import load_summarize_chain"
|
||||
|
@ -169,8 +169,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -77,8 +77,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -117,10 +117,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"from langchain_community.embeddings import OCIGenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"embeddings = OCIGenAIEmbeddings(\n",
|
||||
" model_id=\"MY_EMBEDDING_MODEL\",\n",
|
||||
|
@ -58,11 +58,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.stdout import StdOutCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.globals import set_debug, set_verbose\n",
|
||||
"from langchain.memory import ConversationBufferWindowMemory\n",
|
||||
"from langchain_community.llms import OpaquePrompts\n",
|
||||
"from langchain_core.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
|
@ -291,7 +291,6 @@
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.9"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
|
||||
|
@ -311,7 +311,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = Replicate(\n",
|
||||
" streaming=True,\n",
|
||||
|
@ -58,7 +58,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.docstore.document import Document"
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -81,7 +81,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "LLMResult(generations=[[Generation(text='Hello! How can I assist you today?')]], llm_output=None, run=[RunInfo(run_id=UUID('d8cdcd41-a698-4cbf-a28d-e74f9cd2037b'))])"
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[Generation(text='Hello! How can I assist you today?')]], llm_output=None, run=[RunInfo(run_id=UUID('d8cdcd41-a698-4cbf-a28d-e74f9cd2037b'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
|
@ -91,10 +91,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.globals import set_debug\n",
|
||||
"from langchain_community.llms import TextGen\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"set_debug(True)\n",
|
||||
|
@ -29,11 +29,9 @@
|
||||
"source": [
|
||||
"import time\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"# Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood\n",
|
||||
"from langchain_community.llms import TitanTakeoff\n",
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
|
@ -87,7 +87,9 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'好的,下面是一个笑话:\\n\\n大学暑假我配了隐形眼镜,回家给爷爷说,我现在配了隐形眼镜。\\n爷爷让我给他看看,于是,我用小镊子夹了一片给爷爷看。\\n爷爷看完便准备出门,边走还边说:“真高级啊,还真是隐形眼镜!”\\n等爷爷出去后我才发现,我刚没夹起来!'"
|
||||
"text/plain": [
|
||||
"'好的,下面是一个笑话:\\n\\n大学暑假我配了隐形眼镜,回家给爷爷说,我现在配了隐形眼镜。\\n爷爷让我给他看看,于是,我用小镊子夹了一片给爷爷看。\\n爷爷看完便准备出门,边走还边说:“真高级啊,还真是隐形眼镜!”\\n等爷爷出去后我才发现,我刚没夹起来!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
|
@ -168,8 +168,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -2,6 +2,9 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"# Zep\n",
|
||||
"> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n",
|
||||
@ -29,10 +32,7 @@
|
||||
"2. Running an agent and having message automatically added to the store.\n",
|
||||
"3. Viewing the enriched messages.\n",
|
||||
"4. Vector search over the conversation history."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@ -47,11 +47,12 @@
|
||||
"source": [
|
||||
"from uuid import uuid4\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, Tool, initialize_agent\n",
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.memory import ZepMemory\n",
|
||||
"from langchain.retrievers import ZepRetriever\n",
|
||||
"from langchain_community.retrievers import ZepRetriever\n",
|
||||
"from langchain_community.utilities import WikipediaAPIWrapper\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"from langchain_core.tools import Tool\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# Set this to your Zep server URL\n",
|
||||
@ -257,11 +258,11 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001B[1m> Entering new chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mThought: Do I need to use a tool? No\n",
|
||||
"AI: Parable of the Sower is a prescient novel that speaks to the challenges facing contemporary society, such as climate change, inequality, and violence. It is a cautionary tale that warns of the dangers of unchecked greed and the need for individuals to take responsibility for their own lives and the lives of those around them.\u001B[0m\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: Do I need to use a tool? No\n",
|
||||
"AI: Parable of the Sower is a prescient novel that speaks to the challenges facing contemporary society, such as climate change, inequality, and violence. It is a cautionary tale that warns of the dangers of unchecked greed and the need for individuals to take responsibility for their own lives and the lives of those around them.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -65,7 +65,8 @@
|
||||
"import os\n",
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import AimCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain_community.callbacks import AimCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
|
@ -55,8 +55,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import ArthurCallbackHandler\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.callbacks import ArthurCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
|
@ -88,7 +88,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import ClearMLCallbackHandler"
|
||||
"from langchain_community.callbacks import ClearMLCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -105,7 +105,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# Setup and use the ClearML Callback\n",
|
||||
|
@ -121,7 +121,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain_community.callbacks import CometCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"comet_callback = CometCallbackHandler(\n",
|
||||
@ -152,8 +153,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_community.callbacks import CometCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
@ -191,7 +193,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import initialize_agent, load_tools\n",
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain_community.callbacks import CometCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"comet_callback = CometCallbackHandler(\n",
|
||||
@ -249,8 +252,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_community.callbacks import CometCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"from rouge_score import rouge_scorer\n",
|
||||
@ -339,7 +343,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.tracers.comet import CometTracer"
|
||||
"from langchain_community.callbacks.tracers.comet import CometTracer"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -75,8 +75,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.cache import SQLiteCache\n",
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_community.cache import SQLiteCache\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"set_llm_cache(SQLiteCache(database_path=\"cache.db\"))\n",
|
||||
|
@ -77,7 +77,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import MlflowCallbackHandler\n",
|
||||
"from langchain_community.callbacks import MlflowCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
|
@ -227,8 +227,7 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
|
@ -177,7 +177,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import SimpleSequentialChain, TransformChain\n",
|
||||
"from langchain.sql_database import SQLDatabase\n",
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"from langchain_experimental.sql import SQLDatabaseChain"
|
||||
]
|
||||
},
|
||||
|
@ -592,13 +592,13 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.chains.conversational_retrieval.prompts import (\n",
|
||||
" CONDENSE_QUESTION_PROMPT,\n",
|
||||
" QA_PROMPT,\n",
|
||||
")\n",
|
||||
"from langchain.chains.llm import LLMChain\n",
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"# Construct a ConversationalRetrievalChain with a streaming llm for combine docs\n",
|
||||
"# and a separate, non-streaming llm for question generation\n",
|
||||
|
@ -30,6 +30,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.callbacks import wandb_tracing_enabled\n",
|
||||
"\n",
|
||||
"os.environ[\"LANGCHAIN_WANDB_TRACING\"] = \"true\"\n",
|
||||
"\n",
|
||||
"# wandb documentation to configure wandb using env variables\n",
|
||||
@ -38,7 +40,6 @@
|
||||
"os.environ[\"WANDB_PROJECT\"] = \"langchain-tracing\"\n",
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain.callbacks import wandb_tracing_enabled\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
|
@ -62,7 +62,8 @@
|
||||
"source": [
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import StdOutCallbackHandler, WandbCallbackHandler\n",
|
||||
"from langchain_community.callbacks import WandbCallbackHandler\n",
|
||||
"from langchain_core.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
|
@ -83,7 +83,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import WhyLabsCallbackHandler"
|
||||
"from langchain_community.callbacks import WhyLabsCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -151,11 +151,11 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.document_loaders import DirectoryLoader, TextLoader\n",
|
||||
"from langchain.text_splitter import TokenTextSplitter\n",
|
||||
"from langchain.vectorstores import AzureSearch\n",
|
||||
"from langchain_community.document_loaders import DirectoryLoader, TextLoader\n",
|
||||
"from langchain_community.retrievers import AzureAISearchRetriever\n",
|
||||
"from langchain_community.vectorstores import AzureSearch\n",
|
||||
"from langchain_openai import AzureOpenAIEmbeddings, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import TokenTextSplitter\n",
|
||||
"\n",
|
||||
"os.environ[\"AZURE_AI_SEARCH_SERVICE_NAME\"] = \"<YOUR_SEARCH_SERVICE_NAME>\"\n",
|
||||
"os.environ[\"AZURE_AI_SEARCH_INDEX_NAME\"] = \"langchain-vector-demo\"\n",
|
||||
|
@ -32,6 +32,7 @@
|
||||
"# This is from https://langchain.readthedocs.io/en/latest/modules/document_loaders/examples/csv.html\n",
|
||||
"\n",
|
||||
"from langchain_community.document_loaders import CSVLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"loader = CSVLoader(\n",
|
||||
" file_path=\"../../document_loaders/examples/example_data/mlb_teams_2012.csv\"\n",
|
||||
@ -45,8 +46,6 @@
|
||||
"import json\n",
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.docstore.document import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def write_json(path: str, documents: List[Document]) -> None:\n",
|
||||
" results = [{\"text\": doc.page_content} for doc in documents]\n",
|
||||
|
@ -647,7 +647,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers import DocArrayRetriever\n",
|
||||
"from langchain_community.retrievers import DocArrayRetriever\n",
|
||||
"\n",
|
||||
"# create a retriever\n",
|
||||
"retriever = DocArrayRetriever(\n",
|
||||
@ -687,7 +687,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers import DocArrayRetriever\n",
|
||||
"from langchain_community.retrievers import DocArrayRetriever\n",
|
||||
"\n",
|
||||
"# create a retriever\n",
|
||||
"retriever = DocArrayRetriever(\n",
|
||||
@ -729,7 +729,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers import DocArrayRetriever\n",
|
||||
"from langchain_community.retrievers import DocArrayRetriever\n",
|
||||
"\n",
|
||||
"# create a retriever\n",
|
||||
"retriever = DocArrayRetriever(\n",
|
||||
|
@ -37,7 +37,7 @@
|
||||
"\n",
|
||||
"from elasticsearch import Elasticsearch\n",
|
||||
"from elasticsearch.helpers import bulk\n",
|
||||
"from langchain.embeddings import DeterministicFakeEmbedding\n",
|
||||
"from langchain_community.embeddings import DeterministicFakeEmbedding\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.embeddings import Embeddings\n",
|
||||
"from langchain_elasticsearch import ElasticsearchRetriever"
|
||||
|
@ -62,14 +62,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import (\n",
|
||||
" Kinetica,\n",
|
||||
" KineticaSettings,\n",
|
||||
")\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -87,7 +87,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores import AstraDB\n",
|
||||
"from langchain_community.vectorstores import AstraDB\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"docs = [\n",
|
||||
@ -146,8 +146,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.query_constructor.base import AttributeInfo\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"metadata_field_info = [\n",
|
||||
" AttributeInfo(\n",
|
||||
|
@ -72,8 +72,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import Dingo\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
|
@ -288,7 +288,12 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "[Document(page_content='The Dark Knight is a 2008 superhero film directed by Christopher Nolan.', metadata={'year': 2008, 'rating': '9.0', 'genre': 'science fiction', 'director': 'Christopher Nolan'}),\n Document(page_content='The Avengers is a 2012 American superhero film based on the Marvel Comics superhero team of the same name.', metadata={'year': 2012, 'rating': '8.0', 'genre': 'science fiction', 'director': 'Joss Whedon'}),\n Document(page_content='Black Panther is a 2018 American superhero film based on the Marvel Comics character of the same name.', metadata={'year': 2018, 'rating': '7.3', 'genre': 'science fiction', 'director': 'Ryan Coogler'}),\n Document(page_content='The Godfather is a 1972 American crime film directed by Francis Ford Coppola.', metadata={'year': 1972, 'rating': '9.2', 'genre': 'crime', 'director': 'Francis Ford Coppola'})]"
|
||||
"text/plain": [
|
||||
"[Document(page_content='The Dark Knight is a 2008 superhero film directed by Christopher Nolan.', metadata={'year': 2008, 'rating': '9.0', 'genre': 'science fiction', 'director': 'Christopher Nolan'}),\n",
|
||||
" Document(page_content='The Avengers is a 2012 American superhero film based on the Marvel Comics superhero team of the same name.', metadata={'year': 2012, 'rating': '8.0', 'genre': 'science fiction', 'director': 'Joss Whedon'}),\n",
|
||||
" Document(page_content='Black Panther is a 2018 American superhero film based on the Marvel Comics character of the same name.', metadata={'year': 2018, 'rating': '7.3', 'genre': 'science fiction', 'director': 'Ryan Coogler'}),\n",
|
||||
" Document(page_content='The Godfather is a 1972 American crime film directed by Francis Ford Coppola.', metadata={'year': 1972, 'rating': '9.2', 'genre': 'crime', 'director': 'Francis Ford Coppola'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
@ -314,7 +319,10 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "[Document(page_content='The Avengers is a 2012 American superhero film based on the Marvel Comics superhero team of the same name.', metadata={'year': 2012, 'rating': '8.0', 'genre': 'science fiction', 'director': 'Joss Whedon'}),\n Document(page_content='Black Panther is a 2018 American superhero film based on the Marvel Comics character of the same name.', metadata={'year': 2018, 'rating': '7.3', 'genre': 'science fiction', 'director': 'Ryan Coogler'})]"
|
||||
"text/plain": [
|
||||
"[Document(page_content='The Avengers is a 2012 American superhero film based on the Marvel Comics superhero team of the same name.', metadata={'year': 2012, 'rating': '8.0', 'genre': 'science fiction', 'director': 'Joss Whedon'}),\n",
|
||||
" Document(page_content='Black Panther is a 2018 American superhero film based on the Marvel Comics character of the same name.', metadata={'year': 2018, 'rating': '7.3', 'genre': 'science fiction', 'director': 'Ryan Coogler'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
@ -340,7 +348,10 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "[Document(page_content='The Avengers is a 2012 American superhero film based on the Marvel Comics superhero team of the same name.', metadata={'year': 2012, 'rating': '8.0', 'genre': 'science fiction', 'director': 'Joss Whedon'}),\n Document(page_content='Black Panther is a 2018 American superhero film based on the Marvel Comics character of the same name.', metadata={'year': 2018, 'rating': '7.3', 'genre': 'science fiction', 'director': 'Ryan Coogler'})]"
|
||||
"text/plain": [
|
||||
"[Document(page_content='The Avengers is a 2012 American superhero film based on the Marvel Comics superhero team of the same name.', metadata={'year': 2012, 'rating': '8.0', 'genre': 'science fiction', 'director': 'Joss Whedon'}),\n",
|
||||
" Document(page_content='Black Panther is a 2018 American superhero film based on the Marvel Comics character of the same name.', metadata={'year': 2018, 'rating': '7.3', 'genre': 'science fiction', 'director': 'Ryan Coogler'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
@ -403,7 +414,10 @@
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "[Document(page_content='The Dark Knight is a 2008 superhero film directed by Christopher Nolan.', metadata={'year': 2008, 'rating': '9.0', 'genre': 'science fiction', 'director': 'Christopher Nolan'}),\n Document(page_content='The Avengers is a 2012 American superhero film based on the Marvel Comics superhero team of the same name.', metadata={'year': 2012, 'rating': '8.0', 'genre': 'science fiction', 'director': 'Joss Whedon'})]"
|
||||
"text/plain": [
|
||||
"[Document(page_content='The Dark Knight is a 2008 superhero film directed by Christopher Nolan.', metadata={'year': 2008, 'rating': '9.0', 'genre': 'science fiction', 'director': 'Christopher Nolan'}),\n",
|
||||
" Document(page_content='The Avengers is a 2012 American superhero film based on the Marvel Comics superhero team of the same name.', metadata={'year': 2012, 'rating': '8.0', 'genre': 'science fiction', 'director': 'Joss Whedon'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
|
@ -2,6 +2,9 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"# Zep\n",
|
||||
"## Retriever Example for [Zep](https://docs.getzep.com/)\n",
|
||||
@ -18,10 +21,7 @@
|
||||
"\n",
|
||||
"> Zep Open Source project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n",
|
||||
"> Zep Open Source Docs: [https://docs.getzep.com/](https://docs.getzep.com/)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
|
@ -132,7 +132,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import CacheBackedEmbeddings, OpenAIEmbeddings\n",
|
||||
"from langchain.embeddings import CacheBackedEmbeddings\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = CacheBackedEmbeddings(\n",
|
||||
" underlying_embeddings=OpenAIEmbeddings(), document_embedding_store=store\n",
|
||||
|
@ -43,7 +43,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.storage import RedisStore\n",
|
||||
"from langchain_community.storage import RedisStore\n",
|
||||
"\n",
|
||||
"store = RedisStore(redis_url=\"redis://localhost:6379\")\n",
|
||||
"\n",
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user