mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-12 04:01:05 +00:00
Compare commits
141 Commits
eugene/run
...
v0.1.17rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
402298e376 | ||
|
|
989e4a92c2 | ||
|
|
2fa0ff1a2d | ||
|
|
267ee9db4c | ||
|
|
078c5d9bc6 | ||
|
|
d4aec8fc8f | ||
|
|
bc7af5fd7e | ||
|
|
abf1f4c124 | ||
|
|
bf16cefd18 | ||
|
|
c9fc0447ec | ||
|
|
38eccab3ae | ||
|
|
e1c2e2fdfa | ||
|
|
84b8e67c9c | ||
|
|
465fbaa30b | ||
|
|
12c906f6ce | ||
|
|
5653f36adc | ||
|
|
fe1304afc4 | ||
|
|
6598757037 | ||
|
|
d95e9fb67f | ||
|
|
9281841cfe | ||
|
|
7d8d0229fa | ||
|
|
4c437ebb9c | ||
|
|
c262cef1fb | ||
|
|
26455d156d | ||
|
|
891ae37437 | ||
|
|
28df4750ef | ||
|
|
5f1d1666e3 | ||
|
|
b54b19ba1c | ||
|
|
5e60d65917 | ||
|
|
898362de81 | ||
|
|
63a07f52df | ||
|
|
fd1061e7bf | ||
|
|
dc921f0823 | ||
|
|
a5028b6356 | ||
|
|
1202017c56 | ||
|
|
f386f71bb3 | ||
|
|
05ae8ca7d4 | ||
|
|
fdabd3cdf5 | ||
|
|
6986e44959 | ||
|
|
b8db73233c | ||
|
|
52896258ee | ||
|
|
520972fd0f | ||
|
|
748a6ae609 | ||
|
|
37cbbc00a9 | ||
|
|
a6b8ff23bd | ||
|
|
eca3640af7 | ||
|
|
82b5bdc7a1 | ||
|
|
baefbfb14e | ||
|
|
92969d49cb | ||
|
|
53bb7dbd29 | ||
|
|
ed26149a29 | ||
|
|
5b83130855 | ||
|
|
540f384197 | ||
|
|
ffad3985a1 | ||
|
|
6ccecf2363 | ||
|
|
9e694963a4 | ||
|
|
5da9dd1195 | ||
|
|
7c5063ef60 | ||
|
|
c2d09a5186 | ||
|
|
a936f696a6 | ||
|
|
2cd907ad7e | ||
|
|
2968f20970 | ||
|
|
481d3855dc | ||
|
|
9b7fb381a4 | ||
|
|
9e983c9500 | ||
|
|
6353991498 | ||
|
|
a9e2e98708 | ||
|
|
1aef8116de | ||
|
|
c8fd51e8c8 | ||
|
|
5ecebf168c | ||
|
|
243ba71b28 | ||
|
|
43c041cda5 | ||
|
|
a1614b88ac | ||
|
|
493afe4d8d | ||
|
|
9efab3ed66 | ||
|
|
13751c3297 | ||
|
|
0186e4e633 | ||
|
|
12e5ec6de3 | ||
|
|
8c95ac3145 | ||
|
|
477eb1745c | ||
|
|
a9c7d47c03 | ||
|
|
5ab3f9a995 | ||
|
|
70ae59bcfe | ||
|
|
8d1167b32f | ||
|
|
87d31a3ec0 | ||
|
|
d8aa72f51d | ||
|
|
3bcfbcc871 | ||
|
|
30e48c9878 | ||
|
|
6debadaa70 | ||
|
|
7984206c95 | ||
|
|
9111d3a636 | ||
|
|
06b04b80b8 | ||
|
|
5a3c65a756 | ||
|
|
ddc2274aea | ||
|
|
6622829c67 | ||
|
|
a7c347ab35 | ||
|
|
72f720fa38 | ||
|
|
ceea324071 | ||
|
|
42de5168b1 | ||
|
|
1544c9d050 | ||
|
|
7a78068cd4 | ||
|
|
d91fd8cdcb | ||
|
|
1f15b0885d | ||
|
|
4f16714195 | ||
|
|
f8598a7e48 | ||
|
|
30c7951505 | ||
|
|
5560cc448c | ||
|
|
1c89e45c14 | ||
|
|
ad6b5f84e5 | ||
|
|
4f67ce485a | ||
|
|
a2cc9b55ba | ||
|
|
9428923bab | ||
|
|
645b1e142e | ||
|
|
7a922f3e48 | ||
|
|
b481b73805 | ||
|
|
ed980601e1 | ||
|
|
be51cd3bc9 | ||
|
|
c807f0a6dd | ||
|
|
dc61e23886 | ||
|
|
6a0d44d632 | ||
|
|
fa4d6f9f8b | ||
|
|
0ae5027d98 | ||
|
|
eb18f4e155 | ||
|
|
2a11a30572 | ||
|
|
936c6cc74a | ||
|
|
38adbfdf34 | ||
|
|
ce23f8293a | ||
|
|
c010ec8b71 | ||
|
|
939d113d10 | ||
|
|
bb69819267 | ||
|
|
1c7b3c75a7 | ||
|
|
d0cee65cdc | ||
|
|
5ae738c4fe | ||
|
|
cb6e5e56c2 | ||
|
|
c909ae0152 | ||
|
|
06d18c106d | ||
|
|
d6470aab60 | ||
|
|
3a750e130c | ||
|
|
f111efeb6e | ||
|
|
e5f5d9ff56 | ||
|
|
75ffe51bbe |
12
.github/workflows/_release.yml
vendored
12
.github/workflows/_release.yml
vendored
@@ -13,6 +13,11 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
default: 'libs/langchain'
|
||||
dangerous-nonmaster-release:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: "Release from a non-master branch (danger!)"
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
@@ -20,7 +25,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.ref == 'refs/heads/master'
|
||||
if: github.ref == 'refs/heads/master' || inputs.dangerous-nonmaster-release
|
||||
environment: Scheduled testing
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -75,6 +80,7 @@ jobs:
|
||||
./.github/workflows/_test_release.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
dangerous-nonmaster-release: ${{ inputs.dangerous-nonmaster-release }}
|
||||
secrets: inherit
|
||||
|
||||
pre-release-checks:
|
||||
@@ -112,7 +118,7 @@ jobs:
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
VERSION: ${{ needs.build.outputs.version }}
|
||||
# Here we use:
|
||||
# - The default regular PyPI index as the *primary* index, meaning
|
||||
# - The default regular PyPI index as the *primary* index, meaning
|
||||
# that it takes priority (https://pypi.org/simple)
|
||||
# - The test PyPI index as an extra index, so that any dependencies that
|
||||
# are not found on test PyPI can be resolved and installed anyway.
|
||||
@@ -301,4 +307,4 @@ jobs:
|
||||
draft: false
|
||||
generateReleaseNotes: true
|
||||
tag: v${{ needs.build.outputs.version }}
|
||||
commit: master
|
||||
commit: ${{ github.sha }}
|
||||
|
||||
7
.github/workflows/_test_release.yml
vendored
7
.github/workflows/_test_release.yml
vendored
@@ -7,6 +7,11 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
dangerous-nonmaster-release:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: "Release from a non-master branch (danger!)"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -14,7 +19,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.ref == 'refs/heads/master'
|
||||
if: github.ref == 'refs/heads/master' || inputs.dangerous-nonmaster-release
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
|
||||
6
.github/workflows/scheduled_test.yml
vendored
6
.github/workflows/scheduled_test.yml
vendored
@@ -19,11 +19,11 @@ jobs:
|
||||
working-directory:
|
||||
- "libs/partners/openai"
|
||||
- "libs/partners/anthropic"
|
||||
# - "libs/partners/ai21" # standard-tests broken
|
||||
- "libs/partners/ai21"
|
||||
- "libs/partners/fireworks"
|
||||
# - "libs/partners/groq" # rate-limited
|
||||
- "libs/partners/groq"
|
||||
- "libs/partners/mistralai"
|
||||
# - "libs/partners/together" # rate-limited
|
||||
- "libs/partners/together"
|
||||
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -604,7 +604,7 @@
|
||||
"source": [
|
||||
"# Check retrieval\n",
|
||||
"query = \"Give me company names that are interesting investments based on EV / NTM and NTM rev growth. Consider EV / NTM multiples vs historical?\"\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query, limit=6)\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query, limit=6)\n",
|
||||
"\n",
|
||||
"# We get 4 docs\n",
|
||||
"len(docs)"
|
||||
@@ -630,7 +630,7 @@
|
||||
"source": [
|
||||
"# Check retrieval\n",
|
||||
"query = \"What are the EV / NTM and NTM rev growth for MongoDB, Cloudflare, and Datadog?\"\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query, limit=6)\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query, limit=6)\n",
|
||||
"\n",
|
||||
"# We get 4 docs\n",
|
||||
"len(docs)"
|
||||
|
||||
@@ -256,7 +256,7 @@
|
||||
" \"\"\"Make image summary\"\"\"\n",
|
||||
" model = ChatVertexAI(model_name=\"gemini-pro-vision\", max_output_tokens=1024)\n",
|
||||
"\n",
|
||||
" msg = model(\n",
|
||||
" msg = model.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=[\n",
|
||||
@@ -604,7 +604,7 @@
|
||||
],
|
||||
"source": [
|
||||
"query = \"What are the EV / NTM and NTM rev growth for MongoDB, Cloudflare, and Datadog?\"\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query, limit=1)\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query, limit=1)\n",
|
||||
"\n",
|
||||
"# We get 2 docs\n",
|
||||
"len(docs)"
|
||||
|
||||
@@ -47,6 +47,7 @@ Notebook | Description
|
||||
[press_releases.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/press_releases.ipynb) | Retrieve and query company press release data powered by [Kay.ai](https://kay.ai).
|
||||
[program_aided_language_model.i...](https://github.com/langchain-ai/langchain/tree/master/cookbook/program_aided_language_model.ipynb) | Implement program-aided language models as described in the provided research paper.
|
||||
[qa_citations.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/qa_citations.ipynb) | Different ways to get a model to cite its sources.
|
||||
[rag_upstage_layout_analysis_groundedness_check.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag_upstage_layout_analysis_groundedness_check.ipynb) | End-to-end RAG example using Upstage Layout Analysis and Groundedness Check.
|
||||
[retrieval_in_sql.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/retrieval_in_sql.ipynb) | Perform retrieval-augmented-generation (rag) on a PostgreSQL database using pgvector.
|
||||
[sales_agent_with_context.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/sales_agent_with_context.ipynb) | Implement a context-aware ai sales agent, salesgpt, that can have natural sales conversations, interact with other systems, and use a product knowledge base to discuss a company's offerings.
|
||||
[self_query_hotel_search.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/self_query_hotel_search.ipynb) | Build a hotel room search feature with self-querying retrieval, using a specific hotel recommendation dataset.
|
||||
|
||||
@@ -75,7 +75,7 @@
|
||||
"\n",
|
||||
"Apply to the [`LLaMA2`](https://arxiv.org/pdf/2307.09288.pdf) paper. \n",
|
||||
"\n",
|
||||
"We use the Unstructured [`partition_pdf`](https://unstructured-io.github.io/unstructured/bricks/partition.html#partition-pdf), which segments a PDF document by using a layout model. \n",
|
||||
"We use the Unstructured [`partition_pdf`](https://unstructured-io.github.io/unstructured/core/partition.html#partition-pdf), which segments a PDF document by using a layout model. \n",
|
||||
"\n",
|
||||
"This layout model makes it possible to extract elements, such as tables, from pdfs. \n",
|
||||
"\n",
|
||||
|
||||
@@ -562,9 +562,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# We can retrieve this table\n",
|
||||
"retriever.get_relevant_documents(\n",
|
||||
" \"What are results for LLaMA across across domains / subjects?\"\n",
|
||||
")[1]"
|
||||
"retriever.invoke(\"What are results for LLaMA across across domains / subjects?\")[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -614,9 +612,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[\n",
|
||||
" 1\n",
|
||||
"]"
|
||||
"retriever.invoke(\"Images / figures with playful and creative examples\")[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -501,9 +501,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[\n",
|
||||
" 0\n",
|
||||
"]"
|
||||
"retriever.invoke(\"Images / figures with playful and creative examples\")[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -342,7 +342,7 @@
|
||||
"# Testing on retrieval\n",
|
||||
"query = \"What percentage of CPI is dedicated to Housing, and how does it compare to the combined percentage of Medical Care, Apparel, and Other Goods and Services?\"\n",
|
||||
"suffix_for_images = \" Include any pie charts, graphs, or tables.\"\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query + suffix_for_images)"
|
||||
"docs = retriever_multi_vector_img.invoke(query + suffix_for_images)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
" ) -> AIMessage:\n",
|
||||
" messages = self.update_messages(input_message)\n",
|
||||
"\n",
|
||||
" output_message = self.model(messages)\n",
|
||||
" output_message = self.model.invoke(messages)\n",
|
||||
" self.update_messages(output_message)\n",
|
||||
"\n",
|
||||
" return output_message"
|
||||
|
||||
@@ -169,7 +169,7 @@
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" # Get the toolkits, one for each plugin\n",
|
||||
" tool_kits = [toolkits_dict[d.metadata[\"plugin_name\"]] for d in docs]\n",
|
||||
" # Get the tools: a separate NLAChain for each endpoint\n",
|
||||
|
||||
@@ -193,7 +193,7 @@
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" # Get the toolkits, one for each plugin\n",
|
||||
" tool_kits = [toolkits_dict[d.metadata[\"plugin_name\"]] for d in docs]\n",
|
||||
" # Get the tools: a separate NLAChain for each endpoint\n",
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" return [ALL_TOOLS[d.metadata[\"index\"]] for d in docs]"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -362,7 +362,7 @@
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI()\n",
|
||||
"llm(query)"
|
||||
"llm.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -108,7 +108,7 @@
|
||||
" return obs_message\n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" act_message = self.model(self.message_history)\n",
|
||||
" act_message = self.model.invoke(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)[\"action\"])\n",
|
||||
" return action\n",
|
||||
|
||||
@@ -206,7 +206,7 @@
|
||||
" print(\"---RETRIEVE---\")\n",
|
||||
" state_dict = state[\"keys\"]\n",
|
||||
" question = state_dict[\"question\"]\n",
|
||||
" documents = retriever.get_relevant_documents(question)\n",
|
||||
" documents = retriever.invoke(question)\n",
|
||||
" return {\"keys\": {\"documents\": documents, \"question\": question}}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -213,7 +213,7 @@
|
||||
" print(\"---RETRIEVE---\")\n",
|
||||
" state_dict = state[\"keys\"]\n",
|
||||
" question = state_dict[\"question\"]\n",
|
||||
" documents = retriever.get_relevant_documents(question)\n",
|
||||
" documents = retriever.invoke(question)\n",
|
||||
" return {\"keys\": {\"documents\": documents, \"question\": question}}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -435,7 +435,7 @@
|
||||
" display(HTML(image_html))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"docs = retriever.get_relevant_documents(\"Woman with children\", k=10)\n",
|
||||
"docs = retriever.invoke(\"Woman with children\", k=10)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
|
||||
@@ -443,7 +443,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"query = \"Woman with children\"\n",
|
||||
"docs = retriever.get_relevant_documents(query, k=10)\n",
|
||||
"docs = retriever.invoke(query, k=10)\n",
|
||||
"\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
|
||||
@@ -79,7 +79,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
@@ -234,7 +234,7 @@
|
||||
" termination_clause=self.termination_clause if self.stop else \"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" self.response = self.model(\n",
|
||||
" self.response = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=response_prompt),\n",
|
||||
@@ -263,7 +263,7 @@
|
||||
" speaker_names=speaker_names,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" choice_string = self.model(\n",
|
||||
" choice_string = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=choice_prompt),\n",
|
||||
@@ -299,7 +299,7 @@
|
||||
" ),\n",
|
||||
" next_speaker=self.next_speaker,\n",
|
||||
" )\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=next_prompt),\n",
|
||||
|
||||
@@ -71,7 +71,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
@@ -164,7 +164,7 @@
|
||||
" message_history=\"\\n\".join(self.message_history),\n",
|
||||
" recent_message=self.message_history[-1],\n",
|
||||
" )\n",
|
||||
" bid_string = self.model([SystemMessage(content=prompt)]).content\n",
|
||||
" bid_string = self.model.invoke([SystemMessage(content=prompt)]).content\n",
|
||||
" return bid_string"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -129,7 +129,7 @@
|
||||
" return obs_message\n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" act_message = self.model(self.message_history)\n",
|
||||
" act_message = self.model.invoke(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)[\"action\"])\n",
|
||||
" return action\n",
|
||||
|
||||
@@ -168,7 +168,7 @@
|
||||
"\n",
|
||||
"retriever = vector_store.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 3})\n",
|
||||
"\n",
|
||||
"retrieved_docs = retriever.get_relevant_documents(\"<your question>\")\n",
|
||||
"retrieved_docs = retriever.invoke(\"<your question>\")\n",
|
||||
"\n",
|
||||
"print(retrieved_docs[0].page_content)\n",
|
||||
"\n",
|
||||
|
||||
@@ -0,0 +1,80 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RAG using Upstage Layout Analysis and Groundedness Check\n",
|
||||
"This example illustrates RAG using [Upstage](https://python.langchain.com/docs/integrations/providers/upstage/) Layout Analysis and Groundedness Check."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import DocArrayInMemorySearch\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_core.runnables.base import RunnableSerializable\n",
|
||||
"from langchain_upstage import (\n",
|
||||
" ChatUpstage,\n",
|
||||
" UpstageEmbeddings,\n",
|
||||
" UpstageGroundednessCheck,\n",
|
||||
" UpstageLayoutAnalysisLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model = ChatUpstage()\n",
|
||||
"\n",
|
||||
"files = [\"/PATH/TO/YOUR/FILE.pdf\", \"/PATH/TO/YOUR/FILE2.pdf\"]\n",
|
||||
"\n",
|
||||
"loader = UpstageLayoutAnalysisLoader(file_path=files, split=\"element\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_documents(docs, embedding=UpstageEmbeddings())\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"output_parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"retrieved_docs = retriever.get_relevant_documents(\"How many parameters in SOLAR model?\")\n",
|
||||
"\n",
|
||||
"groundedness_check = UpstageGroundednessCheck()\n",
|
||||
"groundedness = \"\"\n",
|
||||
"while groundedness != \"grounded\":\n",
|
||||
" chain: RunnableSerializable = RunnablePassthrough() | prompt | model | output_parser\n",
|
||||
"\n",
|
||||
" result = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"context\": retrieved_docs,\n",
|
||||
" \"question\": \"How many parameters in SOLAR model?\",\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" groundedness = groundedness_check.invoke(\n",
|
||||
" {\n",
|
||||
" \"context\": retrieved_docs,\n",
|
||||
" \"answer\": result,\n",
|
||||
" }\n",
|
||||
" )"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1227,7 +1227,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = retriever.get_relevant_documents(\n",
|
||||
"results = retriever.invoke(\n",
|
||||
" \"I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace.\"\n",
|
||||
")\n",
|
||||
"for res in results:\n",
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
|
||||
@@ -19,6 +19,9 @@ poetry run python scripts/copy_templates.py
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langgraph/main/README.md -O docs/langgraph.md
|
||||
|
||||
yarn
|
||||
|
||||
poetry run quarto preview docs
|
||||
poetry run quarto render docs
|
||||
poetry run python scripts/generate_api_reference_links.py --docs_dir docs
|
||||
|
||||
yarn
|
||||
yarn start
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -194,7 +194,7 @@ Prompt templates convert raw user input to better input to the LLM.
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
prompt = ChatPromptTemplate.from_messages([
|
||||
("system", "You are world class technical documentation writer."),
|
||||
("system", "You are a world class technical documentation writer."),
|
||||
("user", "{input}")
|
||||
])
|
||||
```
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"This notebook shows how to prevent prompt injection attacks using the text classification model from `HuggingFace`.\n",
|
||||
"\n",
|
||||
"By default, it uses a *[laiyer/deberta-v3-base-prompt-injection](https://huggingface.co/laiyer/deberta-v3-base-prompt-injection)* model trained to identify prompt injections. \n",
|
||||
"By default, it uses a *[protectai/deberta-v3-base-prompt-injection-v2](https://huggingface.co/protectai/deberta-v3-base-prompt-injection-v2)* model trained to identify prompt injections. \n",
|
||||
"\n",
|
||||
"In this notebook, we will use the ONNX version of the model to speed up the inference. "
|
||||
]
|
||||
@@ -49,11 +49,15 @@
|
||||
"from optimum.onnxruntime import ORTModelForSequenceClassification\n",
|
||||
"from transformers import AutoTokenizer, pipeline\n",
|
||||
"\n",
|
||||
"# Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection\n",
|
||||
"model_path = \"laiyer/deberta-v3-base-prompt-injection\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_path)\n",
|
||||
"tokenizer.model_input_names = [\"input_ids\", \"attention_mask\"] # Hack to run the model\n",
|
||||
"model = ORTModelForSequenceClassification.from_pretrained(model_path, subfolder=\"onnx\")\n",
|
||||
"# Using https://huggingface.co/protectai/deberta-v3-base-prompt-injection-v2\n",
|
||||
"model_path = \"laiyer/deberta-v3-base-prompt-injection-v2\"\n",
|
||||
"revision = None # We recommend specifiying the revision to avoid breaking changes or supply chain attacks\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(\n",
|
||||
" model_path, revision=revision, model_input_names=[\"input_ids\", \"attention_mask\"]\n",
|
||||
")\n",
|
||||
"model = ORTModelForSequenceClassification.from_pretrained(\n",
|
||||
" model_path, revision=revision, subfolder=\"onnx\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"classifier = pipeline(\n",
|
||||
" \"text-classification\",\n",
|
||||
|
||||
@@ -194,7 +194,7 @@
|
||||
"llm = OpenAI(\n",
|
||||
" temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n",
|
||||
")\n",
|
||||
"print(llm(\"Tell me a joke\"))"
|
||||
"print(llm.invoke(\"Tell me a joke\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -270,7 +270,7 @@
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"llm_results = chat_llm(\n",
|
||||
"llm_results = chat_llm.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"Always use a lot of emojis\"),\n",
|
||||
" HumanMessage(content=\"Tell me a joke\"),\n",
|
||||
|
||||
@@ -107,7 +107,7 @@ User tracking allows you to identify your users, track their cost, conversations
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler, identify
|
||||
|
||||
with identify("user-123"):
|
||||
llm("Tell me a joke")
|
||||
llm.invoke("Tell me a joke")
|
||||
|
||||
with identify("user-456", user_props={"email": "user456@test.com"}):
|
||||
agen.run("Who is Leo DiCaprio's girlfriend?")
|
||||
|
||||
@@ -103,7 +103,7 @@
|
||||
" temperature=0,\n",
|
||||
" callbacks=[PromptLayerCallbackHandler(pl_tags=[\"chatopenai\"])],\n",
|
||||
")\n",
|
||||
"llm_results = chat_llm(\n",
|
||||
"llm_results = chat_llm.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(content=\"What comes after 1,2,3 ?\"),\n",
|
||||
" HumanMessage(content=\"Tell me another joke?\"),\n",
|
||||
@@ -129,10 +129,11 @@
|
||||
"from langchain_community.llms import GPT4All\n",
|
||||
"\n",
|
||||
"model = GPT4All(model=\"./models/gpt4all-model.bin\", n_ctx=512, n_threads=8)\n",
|
||||
"callbacks = [PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])]\n",
|
||||
"\n",
|
||||
"response = model(\n",
|
||||
"response = model.invoke(\n",
|
||||
" \"Once upon a time, \",\n",
|
||||
" callbacks=[PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])],\n",
|
||||
" config={\"callbacks\": callbacks},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -181,7 +182,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"example_prompt = promptlayer.prompts.get(\"example\", version=1, langchain=True)\n",
|
||||
"openai_llm(example_prompt.format(product=\"toasters\"))"
|
||||
"openai_llm.invoke(example_prompt.format(product=\"toasters\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -315,7 +315,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_res = chat_llm(\n",
|
||||
"chat_res = chat_llm.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"Every answer of yours must be about OpenAI.\"),\n",
|
||||
" HumanMessage(content=\"Tell me a joke\"),\n",
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output = chat([HumanMessage(content=\"write a funny joke\")])\n",
|
||||
"output = chat.invoke([HumanMessage(content=\"write a funny joke\")])\n",
|
||||
"print(\"output:\", output)"
|
||||
]
|
||||
},
|
||||
@@ -90,7 +90,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kwargs = {\"temperature\": 0.8, \"top_p\": 0.8, \"top_k\": 5}\n",
|
||||
"output = chat([HumanMessage(content=\"write a funny joke\")], **kwargs)\n",
|
||||
"output = chat.invoke([HumanMessage(content=\"write a funny joke\")], **kwargs)\n",
|
||||
"print(\"output:\", output)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -62,7 +62,7 @@
|
||||
"messages = [system_message, user_message]\n",
|
||||
"\n",
|
||||
"# chat with wasm-chat service\n",
|
||||
"response = chat(messages)\n",
|
||||
"response = chat.invoke(messages)\n",
|
||||
"\n",
|
||||
"print(f\"[Bot] {response.content}\")"
|
||||
]
|
||||
|
||||
@@ -184,7 +184,7 @@
|
||||
"\n",
|
||||
"query = \"Qual o tempo máximo para realização da prova?\"\n",
|
||||
"\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
" {\"input_documents\": docs, \"query\": query}\n",
|
||||
|
||||
@@ -142,11 +142,70 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"## Tool Calling\n",
|
||||
"ChatTongyi supports tool calling API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'name': 'get_current_weather', 'arguments': '{\"location\": \"San Francisco\"}'}, 'id': '', 'type': 'function'}]}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': 'dae79197-8780-9b7e-8c15-6a83e2a53534', 'token_usage': {'input_tokens': 229, 'output_tokens': 19, 'total_tokens': 248}}, id='run-9e06f837-582b-473b-bb1f-5e99a68ecc10-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco'}, 'id': ''}])"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.tongyi import ChatTongyi\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"tools = [\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"get_current_time\",\n",
|
||||
" \"description\": \"当你想知道现在的时间时非常有用。\",\n",
|
||||
" \"parameters\": {},\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"get_current_weather\",\n",
|
||||
" \"description\": \"当你想查询指定城市的天气时非常有用。\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"location\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"城市或县区,比如北京市、杭州市、余杭区等。\",\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"location\"],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful assistant.\"),\n",
|
||||
" HumanMessage(content=\"What is the weather like in San Francisco?\"),\n",
|
||||
"]\n",
|
||||
"chatLLM = ChatTongyi()\n",
|
||||
"llm_kwargs = {\"tools\": tools, \"result_format\": \"message\"}\n",
|
||||
"ai_message = chatLLM.bind(**llm_kwargs).invoke(messages)\n",
|
||||
"ai_message"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -119,7 +119,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chat(messages)\n",
|
||||
"response = chat.invoke(messages)\n",
|
||||
"print(response.content) # Displays the AI-generated poem"
|
||||
]
|
||||
},
|
||||
|
||||
122
docs/docs/integrations/document_loaders/browserbase.ipynb
Normal file
122
docs/docs/integrations/document_loaders/browserbase.ipynb
Normal file
@@ -0,0 +1,122 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Browserbase\n",
|
||||
"\n",
|
||||
"[Browserbase](https://browserbase.com) is a serverless platform for running headless browsers, it offers advanced debugging, session recordings, stealth mode, integrated proxies and captcha solving.\n",
|
||||
"\n",
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`).\n",
|
||||
"- Install the [Browserbase SDK](http://github.com/browserbase/python-sdk):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"% pip install browserbase"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can load webpages into LangChain using `BrowserbaseLoader`. Optionally, you can set `text_content` parameter to convert the pages to text-only representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import BrowserbaseLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = BrowserbaseLoader(\n",
|
||||
" urls=[\n",
|
||||
" \"https://example.com\",\n",
|
||||
" ],\n",
|
||||
" # Text mode\n",
|
||||
" text_content=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0].page_content[:61])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading images\n",
|
||||
"\n",
|
||||
"You can also load screenshots of webpages (as bytes) for multi-modal models.\n",
|
||||
"\n",
|
||||
"Full example using GPT-4V:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from browserbase import Browserbase\n",
|
||||
"from browserbase.helpers.gpt4 import GPT4VImage, GPT4VImageDetail\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-4-vision-preview\", max_tokens=256)\n",
|
||||
"browser = Browserbase()\n",
|
||||
"\n",
|
||||
"screenshot = browser.screenshot(\"https://browserbase.com\")\n",
|
||||
"\n",
|
||||
"result = chat.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"What color is the logo?\"},\n",
|
||||
" GPT4VImage(screenshot, GPT4VImageDetail.auto),\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(result.content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -630,7 +630,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Query retriever, should return parents (using MMR since that was set as search_type above)\n",
|
||||
"retrieved_parent_docs = retriever.get_relevant_documents(\n",
|
||||
"retrieved_parent_docs = retriever.invoke(\n",
|
||||
" \"what signs does Birch Street allow on their property?\"\n",
|
||||
")\n",
|
||||
"for chunk in retrieved_parent_docs:\n",
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
" # delete the gpt-4 model_name to use the default gpt-3.5 turbo for faster results\n",
|
||||
" gpt_4 = ChatOpenAI(temperature=0.02, model_name=\"gpt-4\")\n",
|
||||
" # Use the retriever's 'get_relevant_documents' method if needed to filter down longer docs\n",
|
||||
" relevant_nodes = figma_doc_retriever.get_relevant_documents(human_input)\n",
|
||||
" relevant_nodes = figma_doc_retriever.invoke(human_input)\n",
|
||||
" conversation = [system_message_prompt, human_message_prompt]\n",
|
||||
" chat_prompt = ChatPromptTemplate.from_messages(conversation)\n",
|
||||
" response = gpt_4(\n",
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import GoogleDriveLoader"
|
||||
"from langchain_google_community import GoogleDriveLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -339,7 +339,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import GoogleDriveLoader\n",
|
||||
"from langchain_google_community import GoogleDriveLoader\n",
|
||||
"\n",
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" folder_id=folder_id,\n",
|
||||
|
||||
125
docs/docs/integrations/document_loaders/kinetica.ipynb
Normal file
125
docs/docs/integrations/document_loaders/kinetica.ipynb
Normal file
@@ -0,0 +1,125 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Kinetica\n",
|
||||
"\n",
|
||||
"This notebooks goes over how to load documents from Kinetica"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install gpudb==7.2.0.1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.kinetica_loader import KineticaLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## Loading Environment Variables\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from langchain_community.vectorstores import (\n",
|
||||
" KineticaSettings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Kinetica needs the connection to the database.\n",
|
||||
"# This is how to set it up.\n",
|
||||
"HOST = os.getenv(\"KINETICA_HOST\", \"http://127.0.0.1:9191\")\n",
|
||||
"USERNAME = os.getenv(\"KINETICA_USERNAME\", \"\")\n",
|
||||
"PASSWORD = os.getenv(\"KINETICA_PASSWORD\", \"\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def create_config() -> KineticaSettings:\n",
|
||||
" return KineticaSettings(host=HOST, username=USERNAME, password=PASSWORD)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.kinetica_loader import KineticaLoader\n",
|
||||
"\n",
|
||||
"# The following `QUERY` is an example which will not run; this\n",
|
||||
"# needs to be substituted with a valid `QUERY` that will return\n",
|
||||
"# data and the `SCHEMA.TABLE` combination must exist in Kinetica.\n",
|
||||
"\n",
|
||||
"QUERY = \"select text, survey_id from SCHEMA.TABLE limit 10\"\n",
|
||||
"kinetica_loader = KineticaLoader(\n",
|
||||
" QUERY,\n",
|
||||
" HOST,\n",
|
||||
" USERNAME,\n",
|
||||
" PASSWORD,\n",
|
||||
")\n",
|
||||
"kinetica_documents = kinetica_loader.load()\n",
|
||||
"print(kinetica_documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.kinetica_loader import KineticaLoader\n",
|
||||
"\n",
|
||||
"# The following `QUERY` is an example which will not run; this\n",
|
||||
"# needs to be substituted with a valid `QUERY` that will return\n",
|
||||
"# data and the `SCHEMA.TABLE` combination must exist in Kinetica.\n",
|
||||
"\n",
|
||||
"QUERY = \"select text, survey_id as source from SCHEMA.TABLE limit 10\"\n",
|
||||
"snowflake_loader = KineticaLoader(\n",
|
||||
" query=QUERY,\n",
|
||||
" host=HOST,\n",
|
||||
" username=USERNAME,\n",
|
||||
" password=PASSWORD,\n",
|
||||
" metadata_columns=[\"source\"],\n",
|
||||
")\n",
|
||||
"kinetica_documents = snowflake_loader.load()\n",
|
||||
"print(kinetica_documents)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -69,7 +69,7 @@
|
||||
"source": [
|
||||
"### Send semantic topics and identities to Pebblo cloud server\n",
|
||||
"\n",
|
||||
"To send semantic data to pebblo-cloud, pass api-key to PebbloSafeLoader as an argument or alternatively, put the api-ket in `PEBBLO_API_KEY` environment variable."
|
||||
"To send semantic data to pebblo-cloud, pass api-key to PebbloSafeLoader as an argument or alternatively, put the api-key in `PEBBLO_API_KEY` environment variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -91,6 +91,41 @@
|
||||
"documents = loader.load()\n",
|
||||
"print(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Add semantic topics and identities to loaded metadata\n",
|
||||
"\n",
|
||||
"To add semantic topics and sematic entities to metadata of loaded documents, set load_semantic to True as an argument or alternatively, define a new environment variable `PEBBLO_LOAD_SEMANTIC`, and setting it to True."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
||||
"from langchain_community.document_loaders import PebbloSafeLoader\n",
|
||||
"\n",
|
||||
"loader = PebbloSafeLoader(\n",
|
||||
" CSVLoader(\"data/corp_sens_data.csv\"),\n",
|
||||
" name=\"acme-corp-rag-1\", # App name (Mandatory)\n",
|
||||
" owner=\"Joe Smith\", # Owner (Optional)\n",
|
||||
" description=\"Support productivity RAG application\", # Description (Optional)\n",
|
||||
" api_key=\"my-api-key\", # API key (Optional, can be set in the environment variable PEBBLO_API_KEY)\n",
|
||||
" load_semantic=True, # Load semantic data (Optional, default is False, can be set in the environment variable PEBBLO_LOAD_SEMANTIC)\n",
|
||||
")\n",
|
||||
"documents = loader.load()\n",
|
||||
"print(documents[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -99,7 +99,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Test the retriever\n",
|
||||
"spreedly_doc_retriever.get_relevant_documents(\"CRC\")"
|
||||
"spreedly_doc_retriever.invoke(\"CRC\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
120
docs/docs/integrations/document_loaders/upstage.ipynb
Normal file
120
docs/docs/integrations/document_loaders/upstage.ipynb
Normal file
@@ -0,0 +1,120 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "910f5772b6af13c9",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Upstage\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "433f5422ad8e1efa",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"# UpstageLayoutAnalysisLoader\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with `UpstageLayoutAnalysisLoader`.\n",
|
||||
"\n",
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"Install `langchain-upstage` package.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install -U langchain-upstage\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e6e5941c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Environment Setup\n",
|
||||
"\n",
|
||||
"Make sure to set the following environment variables:\n",
|
||||
"\n",
|
||||
"- `UPSTAGE_DOCUMENT_AI_API_KEY`: Your Upstage Document AI API key. Read [Upstage developers document](https://developers.upstage.ai/docs/getting-started/quick-start) to get your API key.\n",
|
||||
"\n",
|
||||
"> As of April 2024, you need separate access tokens for Solar and Layout Analysis. The access tokens will be consolidated soon (hopefully in May) and you'll need just one key for all features."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "21e72f3d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a05efd34",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"UPSTAGE_DOCUMENT_AI_API_KEY\"] = \"YOUR_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "2b914a7b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='SOLAR 10.7B: Scaling Large Language Models with Simple yet Effective\\nDepth Up-Scaling Dahyun Kim* , Chanjun Park*1, Sanghoon Kim*+, Wonsung Lee*†, Wonho Song*\\nYunsu Kim* , Hyeonwoo Kim* , Yungi Kim, Hyeonju Lee, Jihoo Kim\\nChangbae Ahn, Seonghoon Yang, Sukyung Lee, Hyunbyung Park, Gyoungjin Gim\\nMikyoung Cha, Hwalsuk Leet , Sunghun Kim+ Upstage AI, South Korea {kdahyun, chan jun · park, limerobot, wonsung · lee, hwalsuk lee, hunkim} @ upstage · ai Abstract We introduce SOLAR 10.7B, a large language\\nmodel (LLM) with 10.7 billion parameters,\\ndemonstrating superior performance in various\\nnatural language processing (NLP) tasks. In-\\nspired by recent efforts to efficiently up-scale\\nLLMs, we present a method for scaling LLMs\\ncalled depth up-scaling (DUS), which encom-\\npasses depthwise scaling and continued pre-\\ntraining. In contrast to other LLM up-scaling\\nmethods that use mixture-of-experts, DUS does\\nnot require complex changes to train and infer-\\nence efficiently. We show experimentally that\\nDUS is simple yet effective in scaling up high-\\nperformance LLMs from small ones. Building\\non the DUS model, we additionally present SO-\\nLAR 10.7B-Instruct, a variant fine-tuned for\\ninstruction-following capabilities, surpassing\\nMixtral-8x7B-Instruct. SOLAR 10.7B is pub-\\nlicly available under the Apache 2.0 license,\\npromoting broad access and application in the\\nLLM field 1 1 Introduction The field of natural language processing (NLP)\\nhas been significantly transformed by the introduc-\\ntion of large language models (LLMs), which have\\nenhanced our understanding and interaction with\\nhuman language (Zhao et al., 2023). These ad-\\nvancements bring challenges such as the increased\\nneed to train ever larger models (Rae et al., 2021;\\nWang et al., 2023; Pan et al., 2023; Lian, 2023;\\nYao et al., 2023; Gesmundo and Maile, 2023) OW-\\ning to the performance scaling law (Kaplan et al.,\\n2020; Hernandez et al., 2021; Anil et al., 2023;\\nKaddour et al., 2023). To efficiently tackle the\\nabove, recent works in scaling language models\\nsuch as a mixture of experts (MoE) (Shazeer et al.,\\n2017; Komatsuzaki et al., 2022) have been pro-\\nposed. While those approaches are able to effi- ciently and effectively scale-up LLMs, they often\\nrequire non-trivial changes to the training and infer-\\nence framework (Gale et al., 2023), which hinders\\nwidespread applicability. Effectively and efficiently\\nscaling up LLMs whilst also retaining the simplic-\\nity for ease of use is an important problem (Alberts\\net al., 2023; Fraiwan and Khasawneh, 2023; Sallam\\net al., 2023; Bahrini et al., 2023). Inspired by Komatsuzaki et al. (2022), we\\npresent depth up-scaling (DUS), an effective and\\nefficient method to up-scale LLMs whilst also re-\\nmaining straightforward to use. DUS consists of\\nscaling the number of layers in the base model and\\ncontinually pretraining the scaled model. Unlike\\n(Komatsuzaki et al., 2022), DUS does not scale\\nthe model using MoE and rather use a depthwise\\nscaling method analogous to Tan and Le (2019)\\nwhich is adapted for the LLM architecture. Thus,\\nthere are no additional modules or dynamism as\\nwith MoE, making DUS immediately compatible\\nwith easy-to-use LLM frameworks such as Hug-\\ngingFace (Wolf et al., 2019) with no changes to\\nthe training or inference framework for maximal\\nefficiency. Furthermore, DUS is applicable to all\\ntransformer architectures, opening up new gate-\\nways to effectively and efficiently scale-up LLMs\\nin a simple manner. Using DUS, we release SO-\\nLAR 10.7B, an LLM with 10.7 billion parameters,\\nthat outperforms existing models like Llama 2 (Tou-\\nvron et al., 2023) and Mistral 7B (Jiang et al., 2023)\\nin various benchmarks. We have also developed SOLAR 10.7B-Instruct,\\na variant fine-tuned for tasks requiring strict adher-\\nence to complex instructions. It significantly out-\\nperforms the Mixtral-8x7B-Instruct model across\\nvarious evaluation metrics, evidencing an advanced\\nproficiency that exceeds the capabilities of even\\nlarger models in terms of benchmark performance. * Equal Contribution 1 Corresponding Author\\nhttps : / /huggingface.co/upstage/\\nSOLAR-1 0 · 7B-v1 . 0 By releasing SOLAR 10.7B under the Apache\\n2.0 license, we aim to promote collaboration and in-\\nnovation in NLP. This open-source approach allows 2024\\nApr\\n4\\n[cs.CL]\\narxiv:2...117.7.13' metadata={'page': 1, 'type': 'text', 'split': 'page'}\n",
|
||||
"page_content=\"Step 1-1 Step 1-2\\nOutput Output Output\\nOutput Output Output\\n24 Layers 24Layers\\nMerge\\n8Layers\\n---- 48 Layers\\nCopy\\n8 Layers Continued\\n32Layers 32Layers\\nPretraining\\n24Layers\\n24 Layers Input\\nInput Input Input Input Input\\nStep 1. Depthwise Scaling Step2. Continued Pretraining Figure 1: Depth up-scaling for the case with n = 32, s = 48, and m = 8. Depth up-scaling is achieved through a\\ndual-stage process of depthwise scaling followed by continued pretraining. for wider access and application of these models\\nby researchers and developers globally. 2 Depth Up-Scaling To efficiently scale-up LLMs, we aim to utilize pre-\\ntrained weights of base models to scale up to larger\\nLLMs (Komatsuzaki et al., 2022). While exist-\\ning methods such as Komatsuzaki et al. (2022) use\\nMoE (Shazeer et al., 2017) to scale-up the model ar-\\nchitecture, we opt for a different depthwise scaling\\nstrategy inspired by Tan and Le (2019). We then\\ncontinually pretrain the scaled model as just scaling\\nthe model without further pretraining degrades the\\nperformance. Base model. Any n-layer transformer architec-\\nture can be used but we select the 32-layer Llama\\n2 architecture as our base model. We initialize the\\nLlama 2 architecture with pretrained weights from\\nMistral 7B, as it is one of the top performers com-\\npatible with the Llama 2 architecture. By adopting\\nthe Llama 2 architecture for our base model, we\\naim to leverage the vast pool of community re-\\nsources while introducing novel modifications to\\nfurther enhance its capabilities. Depthwise scaling. From the base model with n\\nlayers, we set the target layer count s for the scaled\\nmodel, which is largely dictated by the available\\nhardware. With the above, the depthwise scaling process\\nis as follows. The base model with n layers is\\nduplicated for subsequent modification. Then, we\\nremove the final m layers from the original model\\nand the initial m layers from its duplicate, thus\\nforming two distinct models with n - m layers.\\nThese two models are concatenated to form a scaled\\nmodel with s = 2· (n-m) layers. Note that n = 32\\nfrom our base model and we set s = 48 considering our hardware constraints and the efficiency of the\\nscaled model, i.e., fitting between 7 and 13 billion\\nparameters. Naturally, this leads to the removal of\\nm = 8 layers. The depthwise scaling process with\\nn = 32, s = 48, and m = 8 is depicted in 'Step 1:\\nDepthwise Scaling' of Fig. 1. We note that a method in the community that also\\n2 'Step 1:\\nscale the model in the same manner as\\nDepthwise Scaling' of Fig. 1 has been concurrently\\ndeveloped. Continued pretraining. The performance of the\\ndepthwise scaled model initially drops below that\\nof the base LLM. Thus, we additionally apply\\nthe continued pretraining step as shown in 'Step\\n2: Continued Pretraining' of Fig. 1. Experimen-\\ntally, we observe rapid performance recovery of\\nthe scaled model during continued pretraining, a\\nphenomenon also observed in Komatsuzaki et al.\\n(2022). We consider that the particular way of\\ndepthwise scaling has isolated the heterogeneity\\nin the scaled model which allowed for this fast\\nperformance recovery. Delving deeper into the heterogeneity of the\\nscaled model, a simpler alternative to depthwise\\nscaling could be to just repeat its layers once more,\\ni.e., from n to 2n layers. Then, the 'layer distance',\\nor the difference in the layer indices in the base\\nmodel, is only bigger than 1 where layers n and\\nn + 1 are connected, i.e., at the seam. However, this results in maximum layer distance\\nat the seam, which may be too significant of a\\ndiscrepancy for continued pretraining to quickly\\nresolve. Instead, depthwise scaling sacrifices the\\n2m middle layers, thereby reducing the discrep-\\nancy at the seam and making it easier for continued 2https : / /huggingface · co/Undi 95/\\nMistral-11B-v0 · 1\" metadata={'page': 2, 'type': 'text', 'split': 'page'}\n",
|
||||
"page_content=\"Properties Instruction Training Datasets Alignment\\n Alpaca-GPT4 OpenOrca Synth. Math-Instruct Orca DPO Pairs Ultrafeedback Cleaned Synth. Math-Alignment\\n Total # Samples 52K 2.91M 126K 12.9K 60.8K 126K\\n Maximum # Samples Used 52K 100K 52K 12.9K 60.8K 20.1K\\n Open Source O O X O O Table 1: Training datasets used for the instruction and alignment tuning stages, respectively. For the instruction\\ntuning process, we utilized the Alpaca-GPT4 (Peng et al., 2023), OpenOrca (Mukherjee et al., 2023), and Synth.\\nMath-Instruct datasets, while for the alignment tuning, we employed the Orca DPO Pairs (Intel, 2023), Ultrafeedback\\nCleaned (Cui et al., 2023; Ivison et al., 2023), and Synth. Math-Alignment datasets. The 'Total # Samples indicates\\nthe total number of samples in the entire dataset. The 'Maximum # Samples Used' indicates the actual maximum\\nnumber of samples that were used in training, which could be lower than the total number of samples in a given\\ndataset. 'Open Source' indicates whether the dataset is open-sourced. pretraining to quickly recover performance. We\\nattribute the success of DUS to reducing such dis-\\ncrepancies in both the depthwise scaling and the\\ncontinued pretraining steps. We also hypothesize\\nthat other methods of depthwise scaling could also\\nwork for DUS, as long as the discrepancy in the\\nscaled model is sufficiently contained before the\\ncontinued pretraining step. Comparison to other up-scaling methods. Un-\\nlike Komatsuzaki et al. (2022), depthwise scaled\\nmodels do not require additional modules like gat-\\ning networks or dynamic expert selection. Conse-\\nquently, scaled models in DUS do not necessitate\\na distinct training framework for optimal training\\nefficiency, nor do they require specialized CUDA\\nkernels for fast inference. A DUS model can seam-\\nlessly integrate into existing training and inference\\nframeworks while maintaining high efficiency. 3 Training Details After DUS, including continued pretraining, we\\nperform fine-tuning of SOLAR 10.7B in two stages:\\n1) instruction tuning and 2) alignment tuning. Instruction tuning. In the instruction tuning\\nstage, the model is trained to follow instructions in\\na QA format (Zhang et al., 2023). We mostly use\\nopen-source datasets but also synthesize a math QA\\ndataset to enhance the model's mathematical capa-\\nbilities. A rundown of how we crafted the dataset is\\nas follows. First, seed math data are collected from\\nthe Math (Hendrycks et al., 2021) dataset only, to\\navoid contamination with commonly used bench-\\nmark datasets such as GSM8K (Cobbe et al., 2021).\\nThen, using a process similar to MetaMath (Yu\\net al., 2023), we rephrase the questions and an-\\nswers of the seed math data. We use the resulting\\nrephrased question-answer pairs as a QA dataset and call it 'Synth. Math-Instruct*. Alignment tuning. In the alignment tuning stage,\\nthe instruction-tuned model is further fine-tuned\\nto be more aligned with human or strong AI\\n(e.g., GPT4 (OpenAI, 2023)) preferences using\\nsDPO (Kim et al., 2024a), an improved version\\nof direct preference optimization (DPO) (Rafailov\\net al., 2023). Similar to the instruction tuning stage,\\nwe use mostly open-source datasets but also syn-\\nthesize a math-focused alignment dataset utilizing\\nthe 'Synth. Math-Instruct' dataset mentioned in the\\ninstruction tuning stage. The alignment data synthesis process is as\\nfollows. We take advantage of the fact that\\nthe rephrased question-answer pairs in Synth.\\nMath-Instruct data are beneficial in enhancing the\\nmodel's mathematical capabilities (see Sec. 4.3.1).\\nThus, we speculate that the rephrased answer to the\\nrephrased question is a better answer than the orig-\\ninal answer, possibly due to the interim rephrasing\\nstep. Consequently, we set the rephrased question\\nas the prompt and use the rephrased answer as the\\nchosen response and the original answer as the re-\\njected response and create the {prompt, chosen,\\nrejected} DPO tuple. We aggregate the tuples from\\nthe rephrased question-answer pairs and call the\\nresulting dataset 'Synth. Math-Alignment*. 4 Results 4.1 Experimental Details Training datasets. We present details regarding\\nour training datasets for the instruction and align-\\nment tuning stages in Tab. 1. We do not always\\nuse the entire dataset and instead subsample a set\\namount. Note that most of our training data is\\nopen-source, and the undisclosed datasets can be\\nsubstituted for open-source alternatives such as the\" metadata={'page': 3, 'type': 'text', 'split': 'page'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_upstage import UpstageLayoutAnalysisLoader\n",
|
||||
"\n",
|
||||
"file_path = \"/PATH/TO/YOUR/FILE.pdf\"\n",
|
||||
"layzer = UpstageLayoutAnalysisLoader(file_path, split=\"page\")\n",
|
||||
"\n",
|
||||
"# For improved memory efficiency, consider using the lazy_load method to load documents page by page.\n",
|
||||
"docs = layzer.load() # or layzer.lazy_load()\n",
|
||||
"\n",
|
||||
"for doc in docs[:3]:\n",
|
||||
" print(doc)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.14"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -82,7 +82,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What is the plan for the economy?\"\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
@@ -162,9 +162,7 @@
|
||||
" base_compressor=compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\n",
|
||||
" \"What is the plan for the economy?\"\n",
|
||||
")\n",
|
||||
"compressed_docs = compression_retriever.invoke(\"What is the plan for the economy?\")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
|
||||
254
docs/docs/integrations/document_transformers/jina_rerank.ipynb
Normal file
254
docs/docs/integrations/document_transformers/jina_rerank.ipynb
Normal file
@@ -0,0 +1,254 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6ff09ab-c736-4a18-a717-563b4e29d22d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Jina Reranker"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1288789a-4c30-4fc3-90c7-dd1741a2550b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook shows how to use Jina Reranker for document compression and retrieval."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0e4d52e-3968-4f8b-9865-a886f27e5feb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-openai langchain-community langchain-text-splitters langchainhub\n",
|
||||
"\n",
|
||||
"%pip install --upgrade --quiet faiss\n",
|
||||
"\n",
|
||||
"# OR (depending on Python version)\n",
|
||||
"\n",
|
||||
"%pip install --upgrade --quiet faiss_cpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d1fc07a6-8e01-4aa5-8ed4-ca2b0bfca70c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Helper function for printing docs\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d8ec4823-fdc1-4339-8a25-da598a1e2a4c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up the base vector store retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9db25269-e798-496f-8fb9-2bb280735118",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ce01a2b5-d7f4-4902-9156-9a3a86704f40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Set the Jina and OpenAI API keys"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6692d5c5-c84a-4d42-8dd8-5ce90ff56d20",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"os.environ[\"JINA_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "981159af-fa3c-4f75-adb4-1a4de1950f2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.embeddings import JinaEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"documents = TextLoader(\n",
|
||||
" \"../../modules/state_of_the_union.txt\",\n",
|
||||
").load()\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embedding = JinaEmbeddings(model_name=\"jina-embeddings-v2-base-en\")\n",
|
||||
"retriever = FAISS.from_documents(texts, embedding).as_retriever(search_kwargs={\"k\": 20})\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b5a514b7-027a-4dd4-9cfc-63fb4d50aa66",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Doing reranking with JinaRerank"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bdd9e0ca-d728-42cb-88ad-459fb8a56b33",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's wrap our base retriever with a ContextualCompressionRetriever, using Jina Reranker as a compressor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3000019e-cc0d-4365-91d0-72247ee4d624",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import ContextualCompressionRetriever\n",
|
||||
"from langchain_community.document_compressors import JinaRerank\n",
|
||||
"\n",
|
||||
"compressor = JinaRerank()\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f314f74c-48a9-4243-8d3c-2b7f820e1e40",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "87164f04-194b-4138-8d94-f179f6f34a31",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## QA reranking with Jina Reranker"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "2b4ab60b-5a26-4cfb-9b58-3dc2d83b772b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m System Message \u001b[0m================================\n",
|
||||
"\n",
|
||||
"Answer any use questions based solely on the context below:\n",
|
||||
"\n",
|
||||
"<context>\n",
|
||||
"\u001b[33;1m\u001b[1;3m{context}\u001b[0m\n",
|
||||
"</context>\n",
|
||||
"\n",
|
||||
"=============================\u001b[1m Messages Placeholder \u001b[0m=============================\n",
|
||||
"\n",
|
||||
"\u001b[33;1m\u001b[1;3m{chat_history}\u001b[0m\n",
|
||||
"\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"\u001b[33;1m\u001b[1;3m{input}\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"from langchain.chains import create_retrieval_chain\n",
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"\n",
|
||||
"retrieval_qa_chat_prompt = hub.pull(\"langchain-ai/retrieval-qa-chat\")\n",
|
||||
"retrieval_qa_chat_prompt.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "72af3eb3-b644-4b5f-bf5f-f1dc43c96882",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"combine_docs_chain = create_stuff_documents_chain(llm, retrieval_qa_chat_prompt)\n",
|
||||
"chain = create_retrieval_chain(compression_retriever, combine_docs_chain)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "126401a7-c545-4de0-92dc-e9bc1001a6ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain.invoke({\"input\": query})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -350,7 +350,7 @@
|
||||
"retriever = FAISS.from_documents(texts, embedding).as_retriever(search_kwargs={\"k\": 20})\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
@@ -388,7 +388,7 @@
|
||||
" base_compressor=ov_compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"print([doc.metadata[\"id\"] for doc in compressed_docs])"
|
||||
|
||||
@@ -84,7 +84,13 @@
|
||||
},
|
||||
"source": [
|
||||
"## Set up the base vector store retriever\n",
|
||||
"Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs."
|
||||
"Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs. You can use any of the following Embeddings models: ([source](https://docs.voyageai.com/docs/embeddings)):\n",
|
||||
"\n",
|
||||
"- `voyage-large-2` (default)\n",
|
||||
"- `voyage-code-2`\n",
|
||||
"- `voyage-2`\n",
|
||||
"- `voyage-law-2`\n",
|
||||
"- `voyage-lite-02-instruct`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -316,11 +322,11 @@
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"retriever = FAISS.from_documents(\n",
|
||||
" texts, VoyageAIEmbeddings(model=\"voyage-2\")\n",
|
||||
" texts, VoyageAIEmbeddings(model=\"voyage-law-2\")\n",
|
||||
").as_retriever(search_kwargs={\"k\": 20})\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
@@ -382,7 +388,7 @@
|
||||
" base_compressor=compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
|
||||
689
docs/docs/integrations/graphs/apache_age.ipynb
Normal file
689
docs/docs/integrations/graphs/apache_age.ipynb
Normal file
@@ -0,0 +1,689 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c94240f5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Apache AGE\n",
|
||||
"\n",
|
||||
">[Apache AGE](https://age.apache.org/) is a PostgreSQL extension that provides graph database functionality. AGE is an acronym for A Graph Extension, and is inspired by Bitnine’s fork of PostgreSQL 10, AgensGraph, which is a multi-model database. The goal of the project is to create single storage that can handle both relational and graph model data so that users can use standard ANSI SQL along with openCypher, the Graph query language. The data elements `Apache AGE` stores are nodes, edges connecting them, and attributes of nodes and edges.\n",
|
||||
"\n",
|
||||
">This notebook shows how to use LLMs to provide a natural language interface to a graph database you can query with the `Cypher` query language.\n",
|
||||
"\n",
|
||||
">[Cypher](https://en.wikipedia.org/wiki/Cypher_(query_language)) is a declarative graph query language that allows for expressive and efficient data querying in a property graph.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dbc0ee68",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up\n",
|
||||
"\n",
|
||||
"You will need to have a running `Postgre` instance with the AGE extension installed. One option for testing is to run a docker container using the official AGE docker image.\n",
|
||||
"You can run a local docker container by running the executing the following script:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"docker run \\\n",
|
||||
" --name age \\\n",
|
||||
" -p 5432:5432 \\\n",
|
||||
" -e POSTGRES_USER=postgresUser \\\n",
|
||||
" -e POSTGRES_PASSWORD=postgresPW \\\n",
|
||||
" -e POSTGRES_DB=postgresDB \\\n",
|
||||
" -d \\\n",
|
||||
" apache/age\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Additional instructions on running in docker can be found [here](https://hub.docker.com/r/apache/age)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "62812aad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import GraphCypherQAChain\n",
|
||||
"from langchain_community.graphs.age_graph import AGEGraph\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "0928915d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conf = {\n",
|
||||
" \"database\": \"postgresDB\",\n",
|
||||
" \"user\": \"postgresUser\",\n",
|
||||
" \"password\": \"postgresPW\",\n",
|
||||
" \"host\": \"localhost\",\n",
|
||||
" \"port\": 5432,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"graph = AGEGraph(graph_name=\"age_test\", conf=conf)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "995ea9b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Seeding the database\n",
|
||||
"\n",
|
||||
"Assuming your database is empty, you can populate it using Cypher query language. The following Cypher statement is idempotent, which means the database information will be the same if you run it one or multiple times."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "fedd26b9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"graph.query(\n",
|
||||
" \"\"\"\n",
|
||||
"MERGE (m:Movie {name:\"Top Gun\"})\n",
|
||||
"WITH m\n",
|
||||
"UNWIND [\"Tom Cruise\", \"Val Kilmer\", \"Anthony Edwards\", \"Meg Ryan\"] AS actor\n",
|
||||
"MERGE (a:Actor {name:actor})\n",
|
||||
"MERGE (a)-[:ACTED_IN]->(m)\n",
|
||||
"\"\"\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "58c1a8ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Refresh graph schema information\n",
|
||||
"If the schema of database changes, you can refresh the schema information needed to generate Cypher statements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4e3de44f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"graph.refresh_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "1fe76ccd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
" Node properties are the following:\n",
|
||||
" [{'properties': [{'property': 'name', 'type': 'STRING'}], 'labels': 'Actor'}, {'properties': [{'property': 'property_a', 'type': 'STRING'}], 'labels': 'LabelA'}, {'properties': [], 'labels': 'LabelB'}, {'properties': [], 'labels': 'LabelC'}, {'properties': [{'property': 'name', 'type': 'STRING'}], 'labels': 'Movie'}]\n",
|
||||
" Relationship properties are the following:\n",
|
||||
" [{'properties': [], 'type': 'ACTED_IN'}, {'properties': [{'property': 'rel_prop', 'type': 'STRING'}], 'type': 'REL_TYPE'}]\n",
|
||||
" The relationships are the following:\n",
|
||||
" ['(:`Actor`)-[:`ACTED_IN`]->(:`Movie`)', '(:`LabelA`)-[:`REL_TYPE`]->(:`LabelB`)', '(:`LabelA`)-[:`REL_TYPE`]->(:`LabelC`)']\n",
|
||||
" \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(graph.schema)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68a3c677",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Querying the graph\n",
|
||||
"\n",
|
||||
"We can now use the graph cypher QA chain to ask question of the graph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7476ce98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ef8ee27b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': 'Tom Cruise, Val Kilmer, Anthony Edwards, Meg Ryan played in Top Gun.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"Who played in Top Gun?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2d28c4df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Limit the number of results\n",
|
||||
"You can limit the number of results from the Cypher QA Chain using the `top_k` parameter.\n",
|
||||
"The default is 10."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "df230946",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, top_k=2\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "3f1600ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': 'Tom Cruise, Val Kilmer played in Top Gun.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"Who played in Top Gun?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "88c16206",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return intermediate results\n",
|
||||
"You can return intermediate steps from the Cypher QA Chain using the `return_intermediate_steps` parameter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "e412f36b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "4f4699dc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Intermediate steps: [{'query': \"MATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\\nWHERE m.name = 'Top Gun'\\nRETURN a.name\"}, {'context': [{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]}]\n",
|
||||
"Final answer: Tom Cruise, Val Kilmer, Anthony Edwards, Meg Ryan played in Top Gun.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = chain(\"Who played in Top Gun?\")\n",
|
||||
"print(f\"Intermediate steps: {result['intermediate_steps']}\")\n",
|
||||
"print(f\"Final answer: {result['result']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6e1b054",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return direct results\n",
|
||||
"You can return direct results from the Cypher QA Chain using the `return_direct` parameter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "2d3acf10",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0), graph=graph, verbose=True, return_direct=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "b0a9d143",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': [{'name': 'Tom Cruise'},\n",
|
||||
" {'name': 'Val Kilmer'},\n",
|
||||
" {'name': 'Anthony Edwards'},\n",
|
||||
" {'name': 'Meg Ryan'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"Who played in Top Gun?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f01dfb72-24ec-4ae7-883a-ee6646889b59",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Add examples in the Cypher generation prompt\n",
|
||||
"You can define the Cypher statement you want the LLM to generate for particular questions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "59baeb88-adfa-4c26-8334-fcbff3a98efb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"CYPHER_GENERATION_TEMPLATE = \"\"\"Task:Generate Cypher statement to query a graph database.\n",
|
||||
"Instructions:\n",
|
||||
"Use only the provided relationship types and properties in the schema.\n",
|
||||
"Do not use any other relationship types or properties that are not provided.\n",
|
||||
"Schema:\n",
|
||||
"{schema}\n",
|
||||
"Note: Do not include any explanations or apologies in your responses.\n",
|
||||
"Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.\n",
|
||||
"Do not include any text except the generated Cypher statement.\n",
|
||||
"Examples: Here are a few examples of generated Cypher statements for particular questions:\n",
|
||||
"# How many people played in Top Gun?\n",
|
||||
"MATCH (m:Movie {{title:\"Top Gun\"}})<-[:ACTED_IN]-()\n",
|
||||
"RETURN count(*) AS numberOfActors\n",
|
||||
"\n",
|
||||
"The question is:\n",
|
||||
"{question}\"\"\"\n",
|
||||
"\n",
|
||||
"CYPHER_GENERATION_PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"schema\", \"question\"], template=CYPHER_GENERATION_TEMPLATE\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0),\n",
|
||||
" graph=graph,\n",
|
||||
" verbose=True,\n",
|
||||
" cypher_prompt=CYPHER_GENERATION_PROMPT,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "47c64027-cf42-493a-9c76-2d10ba753728",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (:Movie {name:\"Top Gun\"})<-[:ACTED_IN]-(:Actor)\n",
|
||||
"RETURN count(*) AS numberOfActors\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'numberofactors': 4}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'How many people played in Top Gun?',\n",
|
||||
" 'result': \"I don't know the answer.\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"How many people played in Top Gun?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3e721cad-aa87-4526-9231-2dfc0e365939",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use separate LLMs for Cypher and answer generation\n",
|
||||
"You can use the `cypher_llm` and `qa_llm` parameters to define different llms"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "6f9becc2-f579-45bf-9b50-2ce02bde92da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" graph=graph,\n",
|
||||
" cypher_llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo\"),\n",
|
||||
" qa_llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-16k\"),\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "ff18e3e3-3402-4683-aec4-a19898f23ca1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': 'Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan played in Top Gun.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"Who played in Top Gun?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eefea16b-508f-4552-8942-9d5063ed7d37",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Ignore specified node and relationship types\n",
|
||||
"\n",
|
||||
"You can use `include_types` or `exclude_types` to ignore parts of the graph schema when generating Cypher statements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "a20fa21e-fb85-41c4-aac0-53fb25e34604",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" graph=graph,\n",
|
||||
" cypher_llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo\"),\n",
|
||||
" qa_llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-16k\"),\n",
|
||||
" verbose=True,\n",
|
||||
" exclude_types=[\"Movie\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "3ad7f6b8-543e-46e4-a3b2-40fa3e66e895",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Node properties are the following:\n",
|
||||
"Actor {name: STRING},LabelA {property_a: STRING},LabelB {},LabelC {}\n",
|
||||
"Relationship properties are the following:\n",
|
||||
"ACTED_IN {},REL_TYPE {rel_prop: STRING}\n",
|
||||
"The relationships are the following:\n",
|
||||
"(:LabelA)-[:REL_TYPE]->(:LabelB),(:LabelA)-[:REL_TYPE]->(:LabelC)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Inspect graph schema\n",
|
||||
"print(chain.graph_schema)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0202e88-d700-40ed-aef9-0c969c7bf951",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Validate generated Cypher statements\n",
|
||||
"You can use the `validate_cypher` parameter to validate and correct relationship directions in generated Cypher statements"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "53665d03-7afd-433c-bdd5-750127bfb152",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = GraphCypherQAChain.from_llm(\n",
|
||||
" llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo\"),\n",
|
||||
" graph=graph,\n",
|
||||
" verbose=True,\n",
|
||||
" validate_cypher=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "19e1a591-9c10-4d7b-aa36-a5e1b778a97b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
|
||||
"Generated Cypher:\n",
|
||||
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
|
||||
"WHERE m.name = 'Top Gun'\n",
|
||||
"RETURN a.name\u001b[0m\n",
|
||||
"Full Context:\n",
|
||||
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'Who played in Top Gun?',\n",
|
||||
" 'result': 'Tom Cruise, Val Kilmer, Anthony Edwards, Meg Ryan played in Top Gun.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"Who played in Top Gun?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.19"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -19,26 +19,22 @@
|
||||
"\n",
|
||||
"To complete this tutorial, you will need [Docker](https://www.docker.com/get-started/) and [Python 3.x](https://www.python.org/) installed.\n",
|
||||
"\n",
|
||||
"Ensure you have a running `Memgraph` instance. You can download and run it in a local Docker container by executing the following script:\n",
|
||||
"Ensure you have a running Memgraph instance. To quickly run Memgraph Platform (Memgraph database + MAGE library + Memgraph Lab) for the first time, do the following:\n",
|
||||
"\n",
|
||||
"On Linux/MacOS:\n",
|
||||
"```\n",
|
||||
"docker run \\\n",
|
||||
" -it \\\n",
|
||||
" -p 7687:7687 \\\n",
|
||||
" -p 7444:7444 \\\n",
|
||||
" -p 3000:3000 \\\n",
|
||||
" -e MEMGRAPH=\"--bolt-server-name-for-init=Neo4j/\" \\\n",
|
||||
" -v mg_lib:/var/lib/memgraph memgraph/memgraph-platform\n",
|
||||
"curl https://install.memgraph.com | sh\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You will need to wait a few seconds for the database to start. If the process is completed successfully, you should see something like this:\n",
|
||||
"On Windows:\n",
|
||||
"```\n",
|
||||
"mgconsole X.X\n",
|
||||
"Connected to 'memgraph://127.0.0.1:7687'\n",
|
||||
"Type :help for shell usage\n",
|
||||
"Quit the shell by typing Ctrl-D(eof) or :quit\n",
|
||||
"memgraph>\n",
|
||||
"iwr https://windows.memgraph.com | iex\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Both commands run a script that downloads a Docker Compose file to your system, builds and starts `memgraph-mage` and `memgraph-lab` Docker services in two separate containers. \n",
|
||||
"\n",
|
||||
"Read more about the installation process on [Memgraph documentation](https://memgraph.com/docs/getting-started/install-memgraph).\n",
|
||||
"\n",
|
||||
"Now you can start playing with `Memgraph`!"
|
||||
]
|
||||
},
|
||||
@@ -89,7 +85,7 @@
|
||||
"id": "95ba37a4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We're utilizing the Python library [GQLAlchemy](https://github.com/memgraph/gqlalchemy) to establish a connection between our Memgraph database and Python script. To execute queries, we can set up a Memgraph instance as follows:"
|
||||
"We're utilizing the Python library [GQLAlchemy](https://github.com/memgraph/gqlalchemy) to establish a connection between our Memgraph database and Python script. You can establish the connection to a running Memgraph instance with the Neo4j driver as well, since it's compatible with Memgraph. To execute queries with GQLAlchemy, we can set up a Memgraph instance as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
37
docs/docs/integrations/graphs/tigergraph.mdx
Normal file
37
docs/docs/integrations/graphs/tigergraph.mdx
Normal file
@@ -0,0 +1,37 @@
|
||||
# TigerGraph
|
||||
|
||||
>[TigerGraph](https://www.tigergraph.com/tigergraph-db/) is a natively distributed and high-performance graph database.
|
||||
> The storage of data in a graph format of vertices and edges leads to rich relationships,
|
||||
> ideal for grouding LLM responses.
|
||||
|
||||
A big example of the `TigerGraph` and `LangChain` integration [presented here](https://github.com/tigergraph/graph-ml-notebooks/blob/main/applications/large_language_models/TigerGraph_LangChain_Demo.ipynb).
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Follow instructions [how to connect to the `TigerGraph` database](https://docs.tigergraph.com/pytigergraph/current/getting-started/connection).
|
||||
|
||||
Install the Python SDK:
|
||||
|
||||
```bash
|
||||
pip install pyTigerGraph
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
To utilize the `TigerGraph InquiryAI` functionality, you can import `TigerGraph` from `langchain_community.graphs`.
|
||||
|
||||
```python
|
||||
import pyTigerGraph as tg
|
||||
|
||||
conn = tg.TigerGraphConnection(host="DATABASE_HOST_HERE", graphname="GRAPH_NAME_HERE", username="USERNAME_HERE", password="PASSWORD_HERE")
|
||||
|
||||
### ==== CONFIGURE INQUIRYAI HOST ====
|
||||
conn.ai.configureInquiryAIHost("INQUIRYAI_HOST_HERE")
|
||||
|
||||
from langchain_community.graphs import TigerGraph
|
||||
|
||||
graph = TigerGraph(conn)
|
||||
result = graph.query("How many servers are there?")
|
||||
print(result)
|
||||
```
|
||||
|
||||
@@ -147,7 +147,7 @@
|
||||
"\n",
|
||||
"@ray.remote(num_cpus=0.1)\n",
|
||||
"def send_query(llm, prompt):\n",
|
||||
" resp = llm(prompt)\n",
|
||||
" resp = llm.invoke(prompt)\n",
|
||||
" return resp\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\n",
|
||||
" llm(\n",
|
||||
" llm.invoke(\n",
|
||||
" '<|system|>Enter RP mode. You are Ayumu \"Osaka\" Kasuga.<|user|>Hey Osaka. Tell me about yourself.<|model|>'\n",
|
||||
" )\n",
|
||||
")"
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
"# Load the model\n",
|
||||
"llm = BaichuanLLM()\n",
|
||||
"\n",
|
||||
"res = llm(\"What's your name?\")\n",
|
||||
"res = llm.invoke(\"What's your name?\")\n",
|
||||
"print(res)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -80,7 +80,7 @@
|
||||
"os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n",
|
||||
"\n",
|
||||
"llm = QianfanLLMEndpoint(streaming=True)\n",
|
||||
"res = llm(\"hi\")\n",
|
||||
"res = llm.invoke(\"hi\")\n",
|
||||
"print(res)"
|
||||
]
|
||||
},
|
||||
@@ -185,7 +185,7 @@
|
||||
" model=\"ERNIE-Bot-turbo\",\n",
|
||||
" endpoint=\"eb-instant\",\n",
|
||||
")\n",
|
||||
"res = llm(\"hi\")"
|
||||
"res = llm.invoke(\"hi\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -62,7 +62,7 @@
|
||||
" } \"\"\"\n",
|
||||
"\n",
|
||||
"multi_response_llm = NIBittensorLLM(top_responses=10)\n",
|
||||
"multi_resp = multi_response_llm(\"What is Neural Network Feeding Mechanism?\")\n",
|
||||
"multi_resp = multi_response_llm.invoke(\"What is Neural Network Feeding Mechanism?\")\n",
|
||||
"json_multi_resp = json.loads(multi_resp)\n",
|
||||
"pprint(json_multi_resp)"
|
||||
]
|
||||
|
||||
@@ -62,7 +62,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(llm(\"AI is going to\"))"
|
||||
"print(llm.invoke(\"AI is going to\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -85,7 +85,7 @@
|
||||
" model=\"marella/gpt-2-ggml\", callbacks=[StreamingStdOutCallbackHandler()]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = llm(\"AI is going to\")"
|
||||
"response = llm.invoke(\"AI is going to\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
],
|
||||
"source": [
|
||||
"print(\n",
|
||||
" llm(\n",
|
||||
" llm.invoke(\n",
|
||||
" \"He presented me with plausible evidence for the existence of unicorns: \",\n",
|
||||
" max_length=256,\n",
|
||||
" sampling_topk=50,\n",
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
" model=\"zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(llm(\"def fib():\"))"
|
||||
"print(llm.invoke(\"def fib():\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -203,7 +203,7 @@
|
||||
"User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?\n",
|
||||
"Assistant:\n",
|
||||
"\"\"\"\n",
|
||||
"print(llm(prompt))"
|
||||
"print(llm.invoke(prompt))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -359,7 +359,7 @@
|
||||
"}\n",
|
||||
"message = HumanMessage(content=[text_message, image_message])\n",
|
||||
"\n",
|
||||
"output = llm([message])\n",
|
||||
"output = llm.invoke([message])\n",
|
||||
"print(output.content)"
|
||||
]
|
||||
},
|
||||
@@ -432,7 +432,7 @@
|
||||
"}\n",
|
||||
"message = HumanMessage(content=[text_message, image_message])\n",
|
||||
"\n",
|
||||
"output = llm([message])\n",
|
||||
"output = llm.invoke([message])\n",
|
||||
"print(output.content)"
|
||||
]
|
||||
},
|
||||
@@ -457,7 +457,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"message2 = HumanMessage(content=\"And where the image is taken?\")\n",
|
||||
"output2 = llm([message, output, message2])\n",
|
||||
"output2 = llm.invoke([message, output, message2])\n",
|
||||
"print(output2.content)"
|
||||
]
|
||||
},
|
||||
@@ -486,7 +486,7 @@
|
||||
"}\n",
|
||||
"message = HumanMessage(content=[text_message, image_message])\n",
|
||||
"\n",
|
||||
"output = llm([message])\n",
|
||||
"output = llm.invoke([message])\n",
|
||||
"print(output.content)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -6,9 +6,9 @@
|
||||
"source": [
|
||||
"# IPEX-LLM\n",
|
||||
"\n",
|
||||
"> [IPEX-LLM](https://github.com/intel-analytics/ipex-llm/) is a low-bit LLM optimization library on Intel XPU (Xeon/Core/Flex/Arc/Max). It can make LLMs run extremely fast and consume much less memory on Intel platforms. It is open sourced under Apache 2.0 License.\n",
|
||||
"> [IPEX-LLM](https://github.com/intel-analytics/ipex-llm/) is a PyTorch library for running LLM on Intel CPU and GPU (e.g., local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency. \n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with IPEX-LLM for text generation. \n"
|
||||
"This example goes over how to use LangChain to interact with `ipex-llm` for text generation. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -49,7 +49,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage"
|
||||
"## Basic Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -58,9 +58,20 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import warnings\n",
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_community.llms import IpexLLM\n",
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"warnings.filterwarnings(\"ignore\", category=UserWarning, message=\".*padding_mask.*\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Specify the prompt template for your model. In this example, we use the [vicuna-1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5) model. If you're working with a different model, choose a proper template accordingly."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -77,7 +88,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load Model: "
|
||||
"Load the model locally using IpexLLM using `IpexLLM.from_model_id`. It will load the model directly in its Huggingface format and convert it automatically to low-bit format for inference."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -88,7 +99,7 @@
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "27c08180714a44c7ab766624d5054163",
|
||||
"model_id": "897501860fe4452b836f816c72d955dd",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
@@ -103,7 +114,7 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-03-27 00:58:43,670 - INFO - Converting the current model to sym_int4 format......\n"
|
||||
"2024-04-24 21:20:12,461 - INFO - Converting the current model to sym_int4 format......\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -130,13 +141,9 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/opt/anaconda3/envs/shane-langchain2/lib/python3.9/site-packages/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The function `run` was deprecated in LangChain 0.1.0 and will be removed in 0.2.0. Use invoke instead.\n",
|
||||
"/opt/anaconda3/envs/shane-langchain-3.11/lib/python3.11/site-packages/langchain_core/_api/deprecation.py:119: LangChainDeprecationWarning: The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 0.3.0. Use RunnableSequence, e.g., `prompt | llm` instead.\n",
|
||||
" warn_deprecated(\n",
|
||||
"/opt/anaconda3/envs/shane-langchain2/lib/python3.9/site-packages/transformers/generation/utils.py:1369: UserWarning: Using `max_length`'s default (4096) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
|
||||
" warnings.warn(\n",
|
||||
"/opt/anaconda3/envs/shane-langchain2/lib/python3.9/site-packages/ipex_llm/transformers/models/llama.py:218: UserWarning: Passing `padding_mask` is deprecated and will be removed in v4.37.Please make sure use `attention_mask` instead.`\n",
|
||||
" warnings.warn(\n",
|
||||
"/opt/anaconda3/envs/shane-langchain2/lib/python3.9/site-packages/ipex_llm/transformers/models/llama.py:218: UserWarning: Passing `padding_mask` is deprecated and will be removed in v4.37.Please make sure use `attention_mask` instead.`\n",
|
||||
"/opt/anaconda3/envs/shane-langchain-3.11/lib/python3.11/site-packages/transformers/generation/utils.py:1369: UserWarning: Using `max_length`'s default (4096) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
@@ -144,10 +151,6 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
||||
"To disable this warning, you can either:\n",
|
||||
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
||||
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
|
||||
"AI stands for \"Artificial Intelligence.\" It refers to the development of computer systems that can perform tasks that typically require human intelligence, such as visual perception, speech recognition, decision-making, and language translation. AI can be achieved through a combination of techniques such as machine learning, natural language processing, computer vision, and robotics. The ultimate goal of AI research is to create machines that can think and learn like humans, and can even exceed human capabilities in certain areas.\n"
|
||||
]
|
||||
}
|
||||
@@ -156,15 +159,99 @@
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
"question = \"What is AI?\"\n",
|
||||
"output = llm_chain.run(question)"
|
||||
"output = llm_chain.invoke(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Save/Load Low-bit Model\n",
|
||||
"Alternatively, you might save the low-bit model to disk once and use `from_model_id_low_bit` instead of `from_model_id` to reload it for later use - even across different machines. It is space-efficient, as the low-bit model demands significantly less disk space than the original model. And `from_model_id_low_bit` is also more efficient than `from_model_id` in terms of speed and memory usage, as it skips the model conversion step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To save the low-bit model, use `save_low_bit` as follows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"saved_lowbit_model_path = \"./vicuna-7b-1.5-low-bit\" # path to save low-bit model\n",
|
||||
"llm.model.save_low_bit(saved_lowbit_model_path)\n",
|
||||
"del llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load the model from saved lowbit model path as follows. \n",
|
||||
"> Note that the saved path for the low-bit model only includes the model itself but not the tokenizers. If you wish to have everything in one place, you will need to manually download or copy the tokenizer files from the original model's directory to the location where the low-bit model is saved."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-04-24 21:20:35,874 - INFO - Converting the current model to sym_int4 format......\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_lowbit = IpexLLM.from_model_id_low_bit(\n",
|
||||
" model_id=saved_lowbit_model_path,\n",
|
||||
" tokenizer_id=\"lmsys/vicuna-7b-v1.5\",\n",
|
||||
" # tokenizer_name=saved_lowbit_model_path, # copy the tokenizers to saved path if you want to use it this way\n",
|
||||
" model_kwargs={\"temperature\": 0, \"max_length\": 64, \"trust_remote_code\": True},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Use the loaded model in Chains:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/opt/anaconda3/envs/shane-langchain-3.11/lib/python3.11/site-packages/transformers/generation/utils.py:1369: UserWarning: Using `max_length`'s default (4096) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"AI stands for \"Artificial Intelligence.\" It refers to the development of computer systems that can perform tasks that typically require human intelligence, such as visual perception, speech recognition, decision-making, and language translation. AI can be achieved through a combination of techniques such as machine learning, natural language processing, computer vision, and robotics. The ultimate goal of AI research is to create machines that can think and learn like humans, and can even exceed human capabilities in certain areas.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm_lowbit)\n",
|
||||
"\n",
|
||||
"question = \"What is AI?\"\n",
|
||||
"output = llm_chain.invoke(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -183,7 +270,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -57,7 +57,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = llm(\"### Instruction:\\nWhat is the first book of the bible?\\n### Response:\")"
|
||||
"response = llm.invoke(\n",
|
||||
" \"### Instruction:\\nWhat is the first book of the bible?\\n### Response:\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"llm = Konko(model=\"mistralai/mistral-7b-v0.1\", temperature=0.1, max_tokens=128)\n",
|
||||
"\n",
|
||||
"input_ = \"\"\"You are a helpful assistant. Explain Big Bang Theory briefly.\"\"\"\n",
|
||||
"print(llm(input_))"
|
||||
"print(llm.invoke(input_))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1020,7 +1020,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Why is the Moon always showing the same side?\"))"
|
||||
"print(llm.invoke(\"Why is the Moon always showing the same side?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1044,7 +1044,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Why is the Moon always showing the same side?\"))"
|
||||
"print(llm.invoke(\"Why is the Moon always showing the same side?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1109,7 +1109,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Why is the Moon always showing the same side?\"))"
|
||||
"print(llm.invoke(\"Why is the Moon always showing the same side?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1133,7 +1133,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"How come we always see one face of the moon?\"))"
|
||||
"print(llm.invoke(\"How come we always see one face of the moon?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1238,7 +1238,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Is a true fakery the same as a fake truth?\"))"
|
||||
"print(llm.invoke(\"Is a true fakery the same as a fake truth?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1262,7 +1262,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Is a true fakery the same as a fake truth?\"))"
|
||||
"print(llm.invoke(\"Is a true fakery the same as a fake truth?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1327,7 +1327,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Are there truths that are false?\"))"
|
||||
"print(llm.invoke(\"Are there truths that are false?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1351,7 +1351,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"\n",
|
||||
"print(llm(\"Is is possible that something false can be also true?\"))"
|
||||
"print(llm.invoke(\"Is is possible that something false can be also true?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -59,21 +59,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "6fb585dd",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -89,7 +88,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 3,
|
||||
"id": "3f3458d9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -113,19 +112,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"id": "a641dbd9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
"llm_chain = prompt | llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"id": "9f844993",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -134,10 +133,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Justin Bieber was born in 1994, so the NFL team that won the Super Bowl in 1994 was the Dallas Cowboys.'"
|
||||
"' Justin Bieber was born on March 1, 1994. The Super Bowl is typically played in late January or early February. So, we need to look at the Super Bowl from 1994. In 1994, the Super Bowl was Super Bowl XXVIII, played on January 30, 1994. The winning team of that Super Bowl was the Dallas Cowboys.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -145,7 +144,7 @@
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
"llm_chain.invoke(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -173,7 +172,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -187,7 +186,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
"version": "3.9.6"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -63,12 +63,13 @@
|
||||
"source": [
|
||||
"from langchain_community.llms import Predibase\n",
|
||||
"\n",
|
||||
"# With a fine-tuned adapter hosted at Predibase (adapter_version can be specified; omitting it is equivalent to the most recent version).\n",
|
||||
"# With a fine-tuned adapter hosted at Predibase (adapter_version must be specified).\n",
|
||||
"model = Predibase(\n",
|
||||
" model=\"mistral-7b\",\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
" predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n",
|
||||
" adapter_id=\"e2e_nlg\",\n",
|
||||
" adapter_version=1,\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -83,8 +84,9 @@
|
||||
"# With a fine-tuned adapter hosted at HuggingFace (adapter_version does not apply and will be ignored).\n",
|
||||
"model = Predibase(\n",
|
||||
" model=\"mistral-7b\",\n",
|
||||
" adapter_id=\"predibase/e2e_nlg\",\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
" predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n",
|
||||
" adapter_id=\"predibase/e2e_nlg\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -94,7 +96,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = model(\"Can you recommend me a nice dry wine?\")\n",
|
||||
"response = model.invoke(\"Can you recommend me a nice dry wine?\")\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
@@ -122,7 +124,9 @@
|
||||
"from langchain_community.llms import Predibase\n",
|
||||
"\n",
|
||||
"model = Predibase(\n",
|
||||
" model=\"mistral-7b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
|
||||
" model=\"mistral-7b\",\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
" predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -136,12 +140,13 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# With a fine-tuned adapter hosted at Predibase (adapter_version can be specified; omitting it is equivalent to the most recent version).\n",
|
||||
"# With a fine-tuned adapter hosted at Predibase (adapter_version must be specified).\n",
|
||||
"model = Predibase(\n",
|
||||
" model=\"mistral-7b\",\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
" predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n",
|
||||
" adapter_id=\"e2e_nlg\",\n",
|
||||
" adapter_version=1,\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -154,8 +159,9 @@
|
||||
"# With a fine-tuned adapter hosted at HuggingFace (adapter_version does not apply and will be ignored).\n",
|
||||
"llm = Predibase(\n",
|
||||
" model=\"mistral-7b\",\n",
|
||||
" adapter_id=\"predibase/e2e_nlg\",\n",
|
||||
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
|
||||
" predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n",
|
||||
" adapter_id=\"predibase/e2e_nlg\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -247,13 +253,14 @@
|
||||
"\n",
|
||||
"model = Predibase(\n",
|
||||
" model=\"my-base-LLM\",\n",
|
||||
" adapter_id=\"my-finetuned-adapter-id\", # Supports both, Predibase-hosted and HuggingFace-hosted model repositories.\n",
|
||||
" # adapter_version=1, # optional (returns the latest, if omitted)\n",
|
||||
" predibase_api_key=os.environ.get(\n",
|
||||
" \"PREDIBASE_API_TOKEN\"\n",
|
||||
" ), # Adapter argument is optional.\n",
|
||||
" predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)\n",
|
||||
" adapter_id=\"my-finetuned-adapter-id\", # Supports both, Predibase-hosted and HuggingFace-hosted adapter repositories.\n",
|
||||
" adapter_version=1, # required for Predibase-hosted adapters (ignored for HuggingFace-hosted adapters)\n",
|
||||
")\n",
|
||||
"# replace my-finetuned-LLM with the name of your model in Predibase"
|
||||
"# replace my-base-LLM with the name of your choice of a serverless base model in Predibase"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -262,7 +269,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# response = model(\"Can you help categorize the following emails into positive, negative, and neutral?\")"
|
||||
"# response = model.invoke(\"Can you help categorize the following emails into positive, negative, and neutral?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -323,7 +323,7 @@
|
||||
"User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?\n",
|
||||
"Assistant:\n",
|
||||
"\"\"\"\n",
|
||||
"_ = llm(prompt)"
|
||||
"_ = llm.invoke(prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -376,13 +376,13 @@
|
||||
"Assistant:\n",
|
||||
"\"\"\"\n",
|
||||
"start_time = time.perf_counter()\n",
|
||||
"raw_output = llm(prompt) # raw output, no stop\n",
|
||||
"raw_output = llm.invoke(prompt) # raw output, no stop\n",
|
||||
"end_time = time.perf_counter()\n",
|
||||
"print(f\"Raw output:\\n {raw_output}\")\n",
|
||||
"print(f\"Raw output runtime: {end_time - start_time} seconds\")\n",
|
||||
"\n",
|
||||
"start_time = time.perf_counter()\n",
|
||||
"stopped_output = llm(prompt, stop=[\"\\n\\n\"]) # stop on double newlines\n",
|
||||
"stopped_output = llm.invoke(prompt, stop=[\"\\n\\n\"]) # stop on double newlines\n",
|
||||
"end_time = time.perf_counter()\n",
|
||||
"print(f\"Stopped output:\\n {stopped_output}\")\n",
|
||||
"print(f\"Stopped output runtime: {end_time - start_time} seconds\")"
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
"# Load the model\n",
|
||||
"llm = SparkLLM()\n",
|
||||
"\n",
|
||||
"res = llm(\"What's your name?\")\n",
|
||||
"res = llm.invoke(\"What's your name?\")\n",
|
||||
"print(res)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -2,26 +2,21 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Zep\n",
|
||||
"> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n",
|
||||
"\n",
|
||||
"## Fast, Scalable Building Blocks for LLM Apps\n",
|
||||
"Zep is an open source platform for productionizing LLM apps. Go from a prototype\n",
|
||||
"built in LangChain or LlamaIndex, or a custom app, to production in minutes without\n",
|
||||
"rewriting code.\n",
|
||||
">[Zep](https://www.getzep.com) is a long-term memory service for AI Assistant apps.\n",
|
||||
"> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,\n",
|
||||
"> while also reducing hallucinations, latency, and cost.\n",
|
||||
"\n",
|
||||
"Key Features:\n",
|
||||
"> Interested in Zep Cloud? See [Zep Cloud Installation Guide](https://help.getzep.com/sdks) and [Zep Cloud Memory Example](https://help.getzep.com/langchain/examples/messagehistory-example)\n",
|
||||
"\n",
|
||||
"- **Fast!** Zep operates independently of the your chat loop, ensuring a snappy user experience.\n",
|
||||
"- **Chat History Memory, Archival, and Enrichment**, populate your prompts with relevant chat history, sumamries, named entities, intent data, and more.\n",
|
||||
"- **Vector Search over Chat History and Documents** Automatic embedding of documents, chat histories, and summaries. Use Zep's similarity or native MMR Re-ranked search to find the most relevant.\n",
|
||||
"- **Manage Users and their Chat Sessions** Users and their Chat Sessions are first-class citizens in Zep, allowing you to manage user interactions with your bots or agents easily.\n",
|
||||
"- **Records Retention and Privacy Compliance** Comply with corporate and regulatory mandates for records retention while ensuring compliance with privacy regulations such as CCPA and GDPR. Fulfill *Right To Be Forgotten* requests with a single API call\n",
|
||||
"\n",
|
||||
"Zep project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n",
|
||||
"Docs: [https://docs.getzep.com/](https://docs.getzep.com/)\n",
|
||||
"## Open Source Installation and Setup\n",
|
||||
"\n",
|
||||
"> Zep Open Source project: [https://github.com/getzep/zep](https://github.com/getzep/zep)\n",
|
||||
">\n",
|
||||
"> Zep Open Source Docs: [https://docs.getzep.com/](https://docs.getzep.com/)\n",
|
||||
"\n",
|
||||
"## Example\n",
|
||||
"\n",
|
||||
@@ -34,7 +29,10 @@
|
||||
"2. Running an agent and having message automatically added to the store.\n",
|
||||
"3. Viewing the enriched messages.\n",
|
||||
"4. Vector search over the conversation history."
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -259,11 +257,11 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: Do I need to use a tool? No\n",
|
||||
"AI: Parable of the Sower is a prescient novel that speaks to the challenges facing contemporary society, such as climate change, inequality, and violence. It is a cautionary tale that warns of the dangers of unchecked greed and the need for individuals to take responsibility for their own lives and the lives of those around them.\u001b[0m\n",
|
||||
"\u001B[1m> Entering new chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3mThought: Do I need to use a tool? No\n",
|
||||
"AI: Parable of the Sower is a prescient novel that speaks to the challenges facing contemporary society, such as climate change, inequality, and violence. It is a cautionary tale that warns of the dangers of unchecked greed and the need for individuals to take responsibility for their own lives and the lives of those around them.\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -394,10 +392,7 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
||||
@@ -4,6 +4,9 @@ All functionality related to [Google Cloud Platform](https://cloud.google.com/)
|
||||
|
||||
## LLMs
|
||||
|
||||
We recommend individual developers to start with Gemini API (`langchain-google-genai`) and move to Vertex AI (`langchain-google-vertexai`) when they need access to commercial support and higher rate limits. If you’re already Cloud-friendly or Cloud-native, then you can get started in Vertex AI straight away.
|
||||
Please, find more information [here](https://ai.google.dev/gemini-api/docs/migrate-to-cloud).
|
||||
|
||||
### Google Generative AI
|
||||
|
||||
Access GoogleAI `Gemini` models such as `gemini-pro` and `gemini-pro-vision` through the `GoogleGenerativeAI` class.
|
||||
@@ -160,16 +163,16 @@ from langchain_google_alloydb_pg import AlloyDBEngine, AlloyDBLoader
|
||||
|
||||
> [Google Cloud BigQuery](https://cloud.google.com/bigquery) is a serverless and cost-effective enterprise data warehouse that works across clouds and scales with your data in Google Cloud.
|
||||
|
||||
We need to install `google-cloud-bigquery` python package.
|
||||
We need to install `langchain-google-community` with Big Query dependencies:
|
||||
|
||||
```bash
|
||||
pip install google-cloud-bigquery
|
||||
pip install langchain-google-community[bigquery]
|
||||
```
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/google_bigquery).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import BigQueryLoader
|
||||
from langchain_google_community import BigQueryLoader
|
||||
```
|
||||
|
||||
### Bigtable
|
||||
@@ -236,10 +239,10 @@ from langchain_google_cloud_sql_pg import PostgresEngine, PostgresLoader
|
||||
|
||||
>[Cloud Storage](https://en.wikipedia.org/wiki/Google_Cloud_Storage) is a managed service for storing unstructured data in Google Cloud.
|
||||
|
||||
We need to install `google-cloud-storage` python package.
|
||||
We need to install `langchain-google-community` with Google Cloud Storage dependencies.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-storage
|
||||
pip install langchain-google-community[gcs]
|
||||
```
|
||||
|
||||
There are two loaders for the `Google Cloud Storage`: the `Directory` and the `File` loaders.
|
||||
@@ -247,12 +250,12 @@ There are two loaders for the `Google Cloud Storage`: the `Directory` and the `F
|
||||
See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_directory).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GCSDirectoryLoader
|
||||
from langchain_google_community import GCSDirectoryLoader
|
||||
```
|
||||
See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_file).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GCSFileLoader
|
||||
from langchain_google_community import GCSFileLoader
|
||||
```
|
||||
|
||||
### El Carro for Oracle Workloads
|
||||
@@ -277,16 +280,16 @@ from langchain_google_el_carro import ElCarroLoader
|
||||
|
||||
Currently, only `Google Docs` are supported.
|
||||
|
||||
We need to install several python packages.
|
||||
We need to install `langchain-google-community` with Google Drive dependencies.
|
||||
|
||||
```bash
|
||||
pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib
|
||||
pip install langchain-google-community[drive]
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_drive).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GoogleDriveLoader
|
||||
from langchain_google_community import GoogleDriveLoader
|
||||
```
|
||||
|
||||
### Firestore (Native Mode)
|
||||
@@ -356,16 +359,16 @@ from langchain_google_spanner import SpannerLoader
|
||||
|
||||
This document loader transcribes audio files and outputs the text results as Documents.
|
||||
|
||||
First, we need to install the python package.
|
||||
First, we need to install `langchain-google-community` with speech-to-text dependencies.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-speech
|
||||
pip install langchain-google-community[speech]
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_speech_to_text).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import GoogleSpeechToTextLoader
|
||||
from langchain_google_community import SpeechToTextLoader
|
||||
```
|
||||
|
||||
## Document Transformers
|
||||
@@ -383,15 +386,14 @@ We can get it either programmatically or copy from the `Prediction endpoint` sec
|
||||
tab in the Google Cloud Console.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-documentai
|
||||
pip install google-cloud-documentai-toolbox
|
||||
pip install langchain-google-community[docai]
|
||||
```
|
||||
|
||||
See a [usage example](/docs/integrations/document_transformers/google_docai).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders.blob_loaders import Blob
|
||||
from langchain_community.document_loaders.parsers import DocAIParser
|
||||
from langchain_core.document_loaders.blob_loaders import Blob
|
||||
from langchain_google_community import DocAIParser
|
||||
```
|
||||
|
||||
### Google Translate
|
||||
@@ -402,18 +404,16 @@ from langchain_community.document_loaders.parsers import DocAIParser
|
||||
|
||||
The `GoogleTranslateTransformer` allows you to translate text and HTML with the [Google Cloud Translation API](https://cloud.google.com/translate).
|
||||
|
||||
To use it, you should have the `google-cloud-translate` python package installed, and a Google Cloud project with the [Translation API enabled](https://cloud.google.com/translate/docs/setup). This transformer uses the [Advanced edition (v3)](https://cloud.google.com/translate/docs/intro-to-v3).
|
||||
|
||||
First, we need to install the python package.
|
||||
First, we need to install the `langchain-google-community` with translate dependencies.
|
||||
|
||||
```bash
|
||||
pip install google-cloud-translate
|
||||
pip install langchain-google-community[translate]
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/document_transformers/google_translate).
|
||||
|
||||
```python
|
||||
from langchain_community.document_transformers import GoogleTranslateTransformer
|
||||
from langchain_google_community import GoogleTranslateTransformer
|
||||
```
|
||||
|
||||
## Vector Stores
|
||||
@@ -620,7 +620,7 @@ docai_wh_retriever = GoogleDocumentAIWarehouseRetriever(
|
||||
project_number=...
|
||||
)
|
||||
query = ...
|
||||
documents = docai_wh_retriever.get_relevant_documents(
|
||||
documents = docai_wh_retriever.invoke(
|
||||
query, user_ldap=...
|
||||
)
|
||||
```
|
||||
@@ -643,7 +643,7 @@ pip install google-cloud-text-to-speech
|
||||
See a [usage example and authorization instructions](/docs/integrations/tools/google_cloud_texttospeech).
|
||||
|
||||
```python
|
||||
from langchain.tools import GoogleCloudTextToSpeechTool
|
||||
from langchain_google_community import TextToSpeechTool
|
||||
```
|
||||
|
||||
### Google Drive
|
||||
@@ -736,7 +736,7 @@ from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
|
||||
`GOOGLE_API_KEY` and `GOOGLE_CSE_ID` respectively.
|
||||
|
||||
```python
|
||||
from langchain_community.utilities import GoogleSearchAPIWrapper
|
||||
from langchain_google_community import GoogleSearchAPIWrapper
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_search).
|
||||
@@ -770,16 +770,16 @@ from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
|
||||
> [Google Gmail](https://en.wikipedia.org/wiki/Gmail) is a free email service provided by Google.
|
||||
This toolkit works with emails through the `Gmail API`.
|
||||
|
||||
We need to install several python packages.
|
||||
We need to install `langchain-google-community` with required dependencies:
|
||||
|
||||
```bash
|
||||
pip install google-api-python-client google-auth-oauthlib google-auth-httplib2
|
||||
pip install langchain-google-community[gmail]
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/toolkits/gmail).
|
||||
|
||||
```python
|
||||
from langchain_community.agent_toolkits import GmailToolkit
|
||||
from langchain_google_community import GmailToolkit
|
||||
```
|
||||
|
||||
## Memory
|
||||
@@ -945,16 +945,16 @@ from langchain_google_el_carro import ElCarroChatMessageHistory
|
||||
> [Gmail](https://en.wikipedia.org/wiki/Gmail) is a free email service provided by Google.
|
||||
This loader works with emails through the `Gmail API`.
|
||||
|
||||
We need to install several python packages.
|
||||
We need to install `langchain-google-community` with underlying dependencies.
|
||||
|
||||
```bash
|
||||
pip install google-api-python-client google-auth-oauthlib google-auth-httplib2
|
||||
pip install langchain-google-community[gmail]
|
||||
```
|
||||
|
||||
See a [usage example and authorization instructions](/docs/integrations/chat_loaders/gmail).
|
||||
|
||||
```python
|
||||
from langchain_community.chat_loaders.gmail import GMailLoader
|
||||
from langchain_google_community import GMailLoader
|
||||
```
|
||||
|
||||
## 3rd Party Integrations
|
||||
|
||||
@@ -5,6 +5,13 @@ sidebar_class_name: hidden
|
||||
|
||||
# Providers
|
||||
|
||||
:::info
|
||||
|
||||
If you'd like to write your own integration, see [Extending LangChain](/docs/guides/development/extending_langchain/).
|
||||
If you'd like to contribute an integration, see [Contributing integrations](/docs/contributing/integrations/).
|
||||
|
||||
:::
|
||||
|
||||
LangChain integrates with many providers.
|
||||
|
||||
## Partner Packages
|
||||
@@ -37,7 +44,6 @@ These providers have standalone `langchain-{provider}` packages for improved ver
|
||||
|
||||
## Featured Community Providers
|
||||
|
||||
- [AWS](/docs/integrations/platforms/aws)
|
||||
- [Hugging Face](/docs/integrations/platforms/huggingface)
|
||||
- [Microsoft](/docs/integrations/platforms/microsoft)
|
||||
|
||||
|
||||
28
docs/docs/integrations/providers/browserbase.mdx
Normal file
28
docs/docs/integrations/providers/browserbase.mdx
Normal file
@@ -0,0 +1,28 @@
|
||||
# Browserbase
|
||||
|
||||
>[Browserbase](https://browserbase.com) is a serverless platform for running headless browsers, it offers advanced debugging, session recordings, stealth mode, integrated proxies and captcha solving.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`).
|
||||
- Install the [Browserbase SDK](http://github.com/browserbase/python-sdk):
|
||||
|
||||
```python
|
||||
pip install browserbase
|
||||
```
|
||||
|
||||
## Document loader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/browserbase).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import BrowserbaseLoader
|
||||
```
|
||||
|
||||
## Multi-Modal
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/browserbase).
|
||||
|
||||
```python
|
||||
from browserbase.helpers.gpt4 import GPT4VImage, GPT4VImageDetail
|
||||
```
|
||||
@@ -83,7 +83,7 @@ from langchain.retrievers import CohereRagRetriever
|
||||
from langchain_core.documents import Document
|
||||
|
||||
rag = CohereRagRetriever(llm=ChatCohere())
|
||||
print(rag.get_relevant_documents("What is cohere ai?"))
|
||||
print(rag.invoke("What is cohere ai?"))
|
||||
```
|
||||
|
||||
Usage of the Cohere [RAG Retriever](/docs/integrations/retrievers/cohere)
|
||||
|
||||
@@ -23,7 +23,7 @@ It provides a unified interface for all models:
|
||||
```python
|
||||
llm = CTransformers(model='/path/to/ggml-gpt-2.bin', model_type='gpt2')
|
||||
|
||||
print(llm('AI is going to'))
|
||||
print(llm.invoke('AI is going to'))
|
||||
```
|
||||
|
||||
If you are getting `illegal instruction` error, try using `lib='avx'` or `lib='basic'`:
|
||||
|
||||
@@ -22,7 +22,7 @@ It provides a unified interface for all models:
|
||||
```python
|
||||
llm = DeepSparse(model='zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none')
|
||||
|
||||
print(llm('def fib():'))
|
||||
print(llm.invoke('def fib():'))
|
||||
```
|
||||
|
||||
Additional parameters can be passed using the `config` parameter:
|
||||
|
||||
@@ -83,7 +83,7 @@ def langchain_llm() -> str:
|
||||
temperature=0.2,
|
||||
callbacks=[FlyteCallbackHandler()],
|
||||
)
|
||||
return llm([HumanMessage(content="Tell me a joke")]).content
|
||||
return llm.invoke([HumanMessage(content="Tell me a joke")]).content
|
||||
```
|
||||
|
||||
### Chain
|
||||
|
||||
@@ -27,7 +27,7 @@ from langchain_community.llms import GPT4All
|
||||
model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8)
|
||||
|
||||
# Generate text
|
||||
response = model("Once upon a time, ")
|
||||
response = model.invoke("Once upon a time, ")
|
||||
```
|
||||
|
||||
You can also customize the generation parameters, such as n_predict, temp, top_p, top_k, and others.
|
||||
|
||||
@@ -29,7 +29,7 @@ openai.api_base = "https://oai.hconeai.com/v1"
|
||||
|
||||
llm = OpenAI(temperature=0.9, headers={"Helicone-Cache-Enabled": "true"})
|
||||
text = "What is a helicone?"
|
||||
print(llm(text))
|
||||
print(llm.invoke(text))
|
||||
```
|
||||
|
||||
[Helicone caching docs](https://docs.helicone.ai/advanced-usage/caching)
|
||||
@@ -47,7 +47,7 @@ llm = OpenAI(temperature=0.9, headers={
|
||||
"Helicone-Property-App": "mobile",
|
||||
})
|
||||
text = "What is a helicone?"
|
||||
print(llm(text))
|
||||
print(llm.invoke(text))
|
||||
```
|
||||
|
||||
[Helicone property docs](https://docs.helicone.ai/advanced-usage/custom-properties)
|
||||
|
||||
@@ -37,3 +37,13 @@ See a [usage example](/docs/integrations/llms/ibm_watsonx).
|
||||
```python
|
||||
from langchain_ibm import WatsonxLLM
|
||||
```
|
||||
|
||||
## Embedding Models
|
||||
|
||||
### WatsonxEmbeddings
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/ibm_watsonx).
|
||||
|
||||
```python
|
||||
from langchain_ibm import WatsonxEmbeddings
|
||||
```
|
||||
|
||||
@@ -26,3 +26,19 @@ See [Kinetica Vectorsore API](/docs/integrations/vectorstores/kinetica) for usag
|
||||
from langchain_community.vectorstores import Kinetica
|
||||
```
|
||||
|
||||
## Document Loader
|
||||
|
||||
The Kinetica Document loader can be used to load LangChain Documents from the
|
||||
Kinetica database.
|
||||
|
||||
See [Kinetica Document Loader](/docs/integrations/document_loaders/kinetica) for usage
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders.kinetica_loader import KineticaLoader
|
||||
```
|
||||
|
||||
## Retriever
|
||||
|
||||
The Kinetica Retriever can return documents given an unstructured query.
|
||||
|
||||
See [Kinetica VectorStore based Retriever](/docs/integrations/retrievers/kinetica) for usage
|
||||
|
||||
@@ -44,7 +44,7 @@ See a usage [example](/docs/integrations/llms/konko).
|
||||
from langchain.llms import Konko
|
||||
llm = Konko(max_tokens=800, model='mistralai/Mistral-7B-v0.1')
|
||||
prompt = "Generate a Product Description for Apple Iphone 15"
|
||||
response = llm(prompt)
|
||||
response = llm.invoke(prompt)
|
||||
```
|
||||
|
||||
## Chat Models
|
||||
|
||||
@@ -22,5 +22,5 @@ from metal_sdk.metal import Metal
|
||||
metal = Metal("API_KEY", "CLIENT_ID", "INDEX_ID");
|
||||
retriever = MetalRetriever(metal, params={"limit": 2})
|
||||
|
||||
docs = retriever.get_relevant_documents("search term")
|
||||
docs = retriever.invoke("search term")
|
||||
```
|
||||
|
||||
@@ -17,9 +17,13 @@ os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}"
|
||||
|
||||
from langchain_community.llms import Predibase
|
||||
|
||||
model = Predibase(model="mistral-7b"", predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"))
|
||||
model = Predibase(
|
||||
model="mistral-7b",
|
||||
predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"),
|
||||
predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)
|
||||
)
|
||||
|
||||
response = model("Can you recommend me a nice dry wine?")
|
||||
response = model.invoke("Can you recommend me a nice dry wine?")
|
||||
print(response)
|
||||
```
|
||||
|
||||
@@ -31,10 +35,16 @@ os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}"
|
||||
|
||||
from langchain_community.llms import Predibase
|
||||
|
||||
# The fine-tuned adapter is hosted at Predibase (adapter_version can be specified; omitting it is equivalent to the most recent version).
|
||||
model = Predibase(model="mistral-7b"", adapter_id="e2e_nlg", adapter_version=1, predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"))
|
||||
# The fine-tuned adapter is hosted at Predibase (adapter_version must be specified).
|
||||
model = Predibase(
|
||||
model="mistral-7b",
|
||||
predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"),
|
||||
predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)
|
||||
adapter_id="e2e_nlg",
|
||||
adapter_version=1,
|
||||
)
|
||||
|
||||
response = model("Can you recommend me a nice dry wine?")
|
||||
response = model.invoke("Can you recommend me a nice dry wine?")
|
||||
print(response)
|
||||
```
|
||||
|
||||
@@ -47,8 +57,13 @@ os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}"
|
||||
from langchain_community.llms import Predibase
|
||||
|
||||
# The fine-tuned adapter is hosted at HuggingFace (adapter_version does not apply and will be ignored).
|
||||
model = Predibase(model="mistral-7b"", adapter_id="predibase/e2e_nlg", predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"))
|
||||
model = Predibase(
|
||||
model="mistral-7b",
|
||||
predibase_api_key=os.environ.get("PREDIBASE_API_TOKEN"),
|
||||
predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted)
|
||||
adapter_id="predibase/e2e_nlg",
|
||||
)
|
||||
|
||||
response = model("Can you recommend me a nice dry wine?")
|
||||
response = model.invoke("Can you recommend me a nice dry wine?")
|
||||
print(response)
|
||||
```
|
||||
|
||||
@@ -199,7 +199,7 @@
|
||||
" base_compressor=RAG.as_langchain_document_compressor(), base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What animation studio did Miyazaki found\"\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -44,7 +44,7 @@ def generate_prompt(instruction, input=None):
|
||||
|
||||
|
||||
model = RWKV(model="./models/RWKV-4-Raven-3B-v7-Eng-20230404-ctx4096.pth", strategy="cpu fp32", tokens_path="./rwkv/20B_tokenizer.json")
|
||||
response = model(generate_prompt("Once upon a time, "))
|
||||
response = model.invoke(generate_prompt("Once upon a time, "))
|
||||
```
|
||||
## Model File
|
||||
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
# TigerGraph
|
||||
|
||||
What is `TigerGraph`?
|
||||
|
||||
**TigerGraph in a nutshell:**
|
||||
|
||||
- `TigerGraph` is a natively distributed and high-performance graph database.
|
||||
- The storage of data in a graph format of vertices and edges leads to rich relationships, ideal for grouding LLM responses.
|
||||
- Get started quickly with `TigerGraph` by visiting [their website](https://tigergraph.com/).
|
||||
>[TigerGraph](https://www.tigergraph.com/tigergraph-db/) is a natively distributed and high-performance graph database.
|
||||
> The storage of data in a graph format of vertices and edges leads to rich relationships,
|
||||
> ideal for grouding LLM responses.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Follow instructions [how to connect to the `TigerGraph` database](https://docs.tigergraph.com/pytigergraph/current/getting-started/connection).
|
||||
|
||||
Install the Python SDK:
|
||||
|
||||
```bash
|
||||
@@ -18,22 +16,10 @@ pip install pyTigerGraph
|
||||
|
||||
## Graph store
|
||||
|
||||
### TigerGraph Store
|
||||
### TigerGraph
|
||||
|
||||
To utilize the `TigerGraph InquiryAI` functionality, you can import `TigerGraph` from `langchain_community.graphs`.
|
||||
See a [usage example](/docs/integrations/graphs/tigergraph).
|
||||
|
||||
```python
|
||||
import pyTigerGraph as tg
|
||||
|
||||
conn = tg.TigerGraphConnection(host="DATABASE_HOST_HERE", graphname="GRAPH_NAME_HERE", username="USERNAME_HERE", password="PASSWORD_HERE")
|
||||
|
||||
### ==== CONFIGURE INQUIRYAI HOST ====
|
||||
conn.ai.configureInquiryAIHost("INQUIRYAI_HOST_HERE")
|
||||
|
||||
from langchain_community.graphs import TigerGraph
|
||||
|
||||
graph = TigerGraph(conn)
|
||||
result = graph.query("How many servers are there?")
|
||||
print(result)
|
||||
```
|
||||
|
||||
|
||||
@@ -15,7 +15,9 @@
|
||||
"source": [
|
||||
"## Solar LLM\n",
|
||||
"\n",
|
||||
"**Solar Mini Chat** is a fast yet powerful advanced large language model focusing on English and Korean. It has been specifically fine-tuned for multi-turn chat purposes, showing enhanced performance across a wide range of natural language processing tasks, like multi-turn conversation or tasks that require an understanding of long contexts, such as RAG (Retrieval-Augmented Generation), compared to other models of a similar size. This fine-tuning equips it with the ability to handle longer conversations more effectively, making it particularly adept for interactive applications."
|
||||
"**Solar Mini Chat** is a fast yet powerful advanced large language model focusing on English and Korean. It has been specifically fine-tuned for multi-turn chat purposes, showing enhanced performance across a wide range of natural language processing tasks, like multi-turn conversation or tasks that require an understanding of long contexts, such as RAG (Retrieval-Augmented Generation), compared to other models of a similar size. This fine-tuning equips it with the ability to handle longer conversations more effectively, making it particularly adept for interactive applications.\n",
|
||||
"\n",
|
||||
"Other than Solar, Upstage also offers features for real-world RAG (retrieval-augmented generation), such as **Groundedness Check** and **Layout Analysis**. "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -35,7 +37,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Get an [access token](https://console.upstage.ai) and set it as an environment variable (`UPSTAGE_API_KEY`)"
|
||||
"Get [API Keys](https://console.upstage.ai) and set environment variables `UPSTAGE_API_KEY` and `UPSTAGE_DOCUMENT_AI_API_KEY`.\n",
|
||||
"\n",
|
||||
"> As of April 2024, you need separate API Keys for Solar and Document AI(Layout Analysis). The API Keys will be consolidated soon (hopefully in May) and you'll need just one key for all features."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -48,6 +52,8 @@
|
||||
"| --- | --- | --- | --- |\n",
|
||||
"| Chat | Build assistants using Solar Mini Chat | `from langchain_upstage import ChatUpstage` | [Go](../../chat/upstage) |\n",
|
||||
"| Text Embedding | Embed strings to vectors | `from langchain_upstage import UpstageEmbeddings` | [Go](../../text_embedding/upstage) |\n",
|
||||
"| Groundedness Check | Verify groundedness of assistant's response | `from langchain_upstage import UpstageGroundednessCheck` | [Go](../../tools/upstage_groundedness_check) |\n",
|
||||
"| Layout Analysis | Serialize documents with tables and figures | `from langchain_upstage import UpstageLayoutAnalysisLoader` | [Go](../../document_loaders/upstage) |\n",
|
||||
"\n",
|
||||
"See [documentations](https://developers.upstage.ai/) for more details about the features."
|
||||
]
|
||||
@@ -69,7 +75,8 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"UPSTAGE_API_KEY\"] = \"YOUR_API_KEY\""
|
||||
"os.environ[\"UPSTAGE_API_KEY\"] = \"YOUR_API_KEY\"\n",
|
||||
"os.environ[\"UPSTAGE_DOCUMENT_AI_API_KEY\"] = \"YOUR_DOCUMENT_AI_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -120,6 +127,62 @@
|
||||
"query_result = embeddings.embed_query(\"What does Sam do?\")\n",
|
||||
"print(query_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Groundedness Check"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_upstage import UpstageGroundednessCheck\n",
|
||||
"\n",
|
||||
"groundedness_check = UpstageGroundednessCheck()\n",
|
||||
"\n",
|
||||
"request_input = {\n",
|
||||
" \"context\": \"Mauna Kea is an inactive volcano on the island of Hawaii. Its peak is 4,207.3 m above sea level, making it the highest point in Hawaii and second-highest peak of an island on Earth.\",\n",
|
||||
" \"answer\": \"Mauna Kea is 5,207.3 meters tall.\",\n",
|
||||
"}\n",
|
||||
"response = groundedness_check.invoke(request_input)\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Layout Analysis"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_upstage import UpstageLayoutAnalysisLoader\n",
|
||||
"\n",
|
||||
"file_path = \"/PATH/TO/YOUR/FILE.pdf\"\n",
|
||||
"layzer = UpstageLayoutAnalysisLoader(file_path, split=\"page\")\n",
|
||||
"\n",
|
||||
"# For improved memory efficiency, consider using the lazy_load method to load documents page by page.\n",
|
||||
"docs = layzer.load() # or layzer.lazy_load()\n",
|
||||
"\n",
|
||||
"for doc in docs[:3]:\n",
|
||||
" print(doc)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -154,9 +154,7 @@
|
||||
"openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n",
|
||||
"llm = OpenAI(openai_api_key=openai_api_key, temperature=0)\n",
|
||||
"retriever = vectara.as_retriever()\n",
|
||||
"d = retriever.get_relevant_documents(\n",
|
||||
" \"What did the president say about Ketanji Brown Jackson\", k=2\n",
|
||||
")\n",
|
||||
"d = retriever.invoke(\"What did the president say about Ketanji Brown Jackson\", k=2)\n",
|
||||
"print(d)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,23 +1,40 @@
|
||||
# Zep
|
||||
> Recall, understand, and extract data from chat histories. Power personalized AI experiences.
|
||||
|
||||
>[Zep](http://www.getzep.com) is an open source platform for productionizing LLM apps. Go from a prototype
|
||||
built in LangChain or LlamaIndex, or a custom app, to production in minutes without
|
||||
rewriting code.
|
||||
>[Zep](https://www.getzep.com) is a long-term memory service for AI Assistant apps.
|
||||
> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,
|
||||
> while also reducing hallucinations, latency, and cost.
|
||||
|
||||
>Key Features:
|
||||
## How Zep works
|
||||
|
||||
Zep persists and recalls chat histories, and automatically generates summaries and other artifacts from these chat histories.
|
||||
It also embeds messages and summaries, enabling you to search Zep for relevant context from past conversations.
|
||||
Zep does all of this asynchronously, ensuring these operations don't impact your user's chat experience.
|
||||
Data is persisted to database, allowing you to scale out when growth demands.
|
||||
|
||||
Zep also provides a simple, easy to use abstraction for document vector search called Document Collections.
|
||||
This is designed to complement Zep's core memory features, but is not designed to be a general purpose vector database.
|
||||
|
||||
Zep allows you to be more intentional about constructing your prompt:
|
||||
- automatically adding a few recent messages, with the number customized for your app;
|
||||
- a summary of recent conversations prior to the messages above;
|
||||
- and/or contextually relevant summaries or messages surfaced from the entire chat session.
|
||||
- and/or relevant Business data from Zep Document Collections.
|
||||
|
||||
## What is Zep Cloud?
|
||||
[Zep Cloud](https://www.getzep.com) is a managed service with Zep Open Source at its core.
|
||||
In addition to Zep Open Source's memory management features, Zep Cloud offers:
|
||||
- **Fact Extraction**: Automatically build fact tables from conversations, without having to define a data schema upfront.
|
||||
- **Dialog Classification**: Instantly and accurately classify chat dialog. Understand user intent and emotion, segment users, and more. Route chains based on semantic context, and trigger events.
|
||||
- **Structured Data Extraction**: Quickly extract business data from chat conversations using a schema you define. Understand what your Assistant should ask for next in order to complete its task.
|
||||
|
||||
> Interested in Zep Cloud? See [Zep Cloud Installation Guide](https://help.getzep.com/sdks), [Zep Cloud Message History Example](https://help.getzep.com/langchain/examples/messagehistory-example), [Zep Cloud Vector Store Example](https://help.getzep.com/langchain/examples/vectorstore-example)
|
||||
|
||||
## Open Source Installation and Setup
|
||||
|
||||
> Zep Open Source project: [https://github.com/getzep/zep](https://github.com/getzep/zep)
|
||||
>
|
||||
>- **Fast!** Zep operates independently of the your chat loop, ensuring a snappy user experience.
|
||||
>- **Chat History Memory, Archival, and Enrichment**, populate your prompts with relevant chat history, sumamries, named entities, intent data, and more.
|
||||
>- **Vector Search over Chat History and Documents** Automatic embedding of documents, chat histories, and summaries. Use Zep's similarity or native MMR Re-ranked search to find the most relevant.
|
||||
>- **Manage Users and their Chat Sessions** Users and their Chat Sessions are first-class citizens in Zep, allowing you to manage user interactions with your bots or agents easily.
|
||||
>- **Records Retention and Privacy Compliance** Comply with corporate and regulatory mandates for records retention while ensuring compliance with privacy regulations such as CCPA and GDPR. Fulfill *Right To Be Forgotten* requests with a single API call
|
||||
|
||||
>Zep project: [https://github.com/getzep/zep](https://github.com/getzep/zep)
|
||||
>
|
||||
>Docs: [https://docs.getzep.com/](https://docs.getzep.com/)
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
> Zep Open Source Docs: [https://docs.getzep.com/](https://docs.getzep.com/)
|
||||
|
||||
1. Install the Zep service. See the [Zep Quick Start Guide](https://docs.getzep.com/deployment/quickstart/).
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"what is langchain\")"
|
||||
"retriever.invoke(\"what is langchain\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -83,7 +83,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"Can AI-driven music therapy contribute to the rehabilitation of patients with disorders of consciousness?\"\n",
|
||||
"documents = retriever.get_relevant_documents(query=query)"
|
||||
"documents = retriever.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -108,7 +108,7 @@
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Retrieve documents with filters and size params\n",
|
||||
"documents = retriever.get_relevant_documents(query=query, size=5, filters=filters)"
|
||||
"documents = retriever.invoke(query, size=5, filters=filters)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = retriever.get_relevant_documents(query=\"1605.08386\")"
|
||||
"docs = retriever.invoke(\"1605.08386\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -162,7 +162,7 @@
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ········\n"
|
||||
|
||||
@@ -117,7 +117,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"what is langchain?\")"
|
||||
"retriever.invoke(\"what is langchain?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -263,7 +263,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"What is Azure OpenAI?\")"
|
||||
"retriever.invoke(\"What is Azure OpenAI?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user