mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-08 02:00:06 +00:00
Compare commits
54 Commits
langchain-
...
cc/api_cha
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
02dd572763 | ||
|
|
0fdbaf4a8d | ||
|
|
871bf5a841 | ||
|
|
8b7cffc363 | ||
|
|
58dd69f7f2 | ||
|
|
dfbd12b384 | ||
|
|
38d30e285a | ||
|
|
89bcca3542 | ||
|
|
cd563fb628 | ||
|
|
256bad3251 | ||
|
|
5fdbdd6bec | ||
|
|
221486687a | ||
|
|
d6631919f4 | ||
|
|
235eb38d3e | ||
|
|
7dd6b32991 | ||
|
|
4b1b7959a2 | ||
|
|
afee851645 | ||
|
|
a73e2222d4 | ||
|
|
e160b669c8 | ||
|
|
d59c656ea5 | ||
|
|
2394807033 | ||
|
|
acfce30017 | ||
|
|
8f3c052db1 | ||
|
|
29a3b3a711 | ||
|
|
20fe4deea0 | ||
|
|
3a55f4bfe9 | ||
|
|
fea9ff3831 | ||
|
|
b55f6105c6 | ||
|
|
4585eaef1b | ||
|
|
f337f3ed36 | ||
|
|
22175738ac | ||
|
|
12c3454fd9 | ||
|
|
e271965d1e | ||
|
|
b9bea36dd4 | ||
|
|
da06d4d7af | ||
|
|
5f73c836a6 | ||
|
|
597be7d501 | ||
|
|
379803751e | ||
|
|
ad18afc3ec | ||
|
|
464a525a5a | ||
|
|
0f45ac4088 | ||
|
|
ac41c97d21 | ||
|
|
aaf788b7cb | ||
|
|
47ae06698f | ||
|
|
03881c6743 | ||
|
|
2d6b0bf3e3 | ||
|
|
ee3955c68c | ||
|
|
325068bb53 | ||
|
|
bff6ca78a2 | ||
|
|
6878bc39b5 | ||
|
|
55e66aa40c | ||
|
|
9b7db08184 | ||
|
|
8691a5a37f | ||
|
|
4919d5d6df |
1
.github/workflows/_release.yml
vendored
1
.github/workflows/_release.yml
vendored
@@ -290,6 +290,7 @@ jobs:
|
||||
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
|
||||
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
UNSTRUCTURED_API_KEY: ${{ secrets.UNSTRUCTURED_API_KEY }}
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
|
||||
@@ -38,6 +38,8 @@ generate-files:
|
||||
|
||||
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/tool_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/document_loader_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
|
||||
|
||||
Binary file not shown.
@@ -267,9 +267,9 @@
|
||||
"We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
"/>\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
@@ -541,7 +541,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -270,7 +270,7 @@
|
||||
"source": [
|
||||
"### StructuredTool\n",
|
||||
"\n",
|
||||
"The `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
|
||||
"The `StructuredTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -81,7 +81,6 @@ These are the core building blocks you can use when building applications.
|
||||
- [How to: stream a response back](/docs/how_to/chat_streaming)
|
||||
- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
|
||||
- [How to: track response metadata across providers](/docs/how_to/response_metadata)
|
||||
- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: stream tool calls](/docs/how_to/tool_streaming)
|
||||
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
|
||||
|
||||
@@ -15,7 +15,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "25b0b0fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_openai langchain_community\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"# Please manually enter OpenAI Key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "0aa6d335",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -23,13 +39,14 @@
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# To make the caching really obvious, lets use a slower model.\n",
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||
"# To make the caching really obvious, lets use a slower and older model.\n",
|
||||
"# Caching supports newer chat models as well.\n",
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 3,
|
||||
"id": "f168ff0d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -37,17 +54,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 13.7 ms, sys: 6.54 ms, total: 20.2 ms\n",
|
||||
"Wall time: 330 ms\n"
|
||||
"CPU times: user 546 ms, sys: 379 ms, total: 925 ms\n",
|
||||
"Wall time: 1.11 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
|
||||
"\"\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -59,12 +76,12 @@
|
||||
"set_llm_cache(InMemoryCache())\n",
|
||||
"\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm.predict(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 4,
|
||||
"id": "ce7620fb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -72,17 +89,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 436 µs, sys: 921 µs, total: 1.36 ms\n",
|
||||
"Wall time: 1.36 ms\n"
|
||||
"CPU times: user 192 µs, sys: 77 µs, total: 269 µs\n",
|
||||
"Wall time: 270 µs\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
|
||||
"\"\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -90,7 +107,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm.predict(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -103,7 +120,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"id": "2e65de83",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -113,7 +130,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"id": "0be83715",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -126,7 +143,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"id": "9b427ce7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -134,17 +151,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 29.3 ms, sys: 17.3 ms, total: 46.7 ms\n",
|
||||
"Wall time: 364 ms\n"
|
||||
"CPU times: user 10.6 ms, sys: 4.21 ms, total: 14.8 ms\n",
|
||||
"Wall time: 851 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nWhy did the tomato turn red?\\n\\nBecause it saw the salad dressing!'"
|
||||
"\"\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -152,12 +169,12 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm.predict(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 8,
|
||||
"id": "87f52611",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -165,17 +182,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 4.58 ms, sys: 2.23 ms, total: 6.8 ms\n",
|
||||
"Wall time: 4.68 ms\n"
|
||||
"CPU times: user 59.7 ms, sys: 63.6 ms, total: 123 ms\n",
|
||||
"Wall time: 134 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nWhy did the tomato turn red?\\n\\nBecause it saw the salad dressing!'"
|
||||
"\"\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -183,7 +200,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm.predict(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -211,7 +228,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -284,17 +284,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 1,
|
||||
"id": "173e1a9c-2a18-4669-b0de-136f39197786",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Arr, matey! I be sailin' the high seas with me crew, searchin' for buried treasure and adventure! How be ye doin' on this fine day?\""
|
||||
"\"Arrr, I be doin' well, me heartie! Just sailin' the high seas in search of treasure and adventure. How be ye?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -316,14 +316,20 @@
|
||||
"\n",
|
||||
"history = InMemoryChatMessageHistory()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_history():\n",
|
||||
" return history\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(chain, lambda x: history)\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_history,\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke(\n",
|
||||
" {\"input\": \"how are you?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"42\"}},\n",
|
||||
")"
|
||||
"wrapped_chain.invoke({\"input\": \"how are you?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -340,17 +346,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 2,
|
||||
"id": "4e05994f-1fbc-4699-bf2e-62cb0e4deeb8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Ahoy there! What be ye wantin' from this old pirate?\", response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 29, 'total_tokens': 44}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-1846d5f5-0dda-43b6-bb49-864e541f9c29-0', usage_metadata={'input_tokens': 29, 'output_tokens': 15, 'total_tokens': 44})"
|
||||
"'Ahoy matey! What can this old pirate do for ye today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -370,9 +376,16 @@
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(chain, get_session_history)\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_session_history,\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke(\"Hello!\", config={\"configurable\": {\"session_id\": \"abc123\"}})"
|
||||
"wrapped_chain.invoke(\n",
|
||||
" {\"input\": \"Hello!\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -790,7 +803,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -302,9 +302,6 @@
|
||||
"\n",
|
||||
"NVIDIA also supports multimodal inputs, meaning you can provide both images and text for the model to reason over. An example model supporting multimodal inputs is `nvidia/neva-22b`.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"These models accept LangChain's standard image formats, and accept `labels`, similar to the Steering LLMs above. In addition to `creativity`, `complexity`, and `verbosity`, these models support a `quality` toggle.\n",
|
||||
"\n",
|
||||
"Below is an example use:"
|
||||
]
|
||||
},
|
||||
@@ -447,92 +444,6 @@
|
||||
"llm.invoke(f'What\\'s in this image?\\n<img src=\"{base64_with_mime_type}\" />')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3e61d868",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### **Advanced Use Case:** Forcing Payload \n",
|
||||
"\n",
|
||||
"You may notice that some newer models may have strong parameter expectations that the LangChain connector may not support by default. For example, we cannot invoke the [Kosmos](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/ai-foundation/models/kosmos-2) model at the time of this notebook's latest release due to the lack of a streaming argument on the server side: "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d143e0d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_nvidia_ai_endpoints import ChatNVIDIA\n",
|
||||
"\n",
|
||||
"kosmos = ChatNVIDIA(model=\"microsoft/kosmos-2\")\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"# kosmos.invoke(\n",
|
||||
"# [\n",
|
||||
"# HumanMessage(\n",
|
||||
"# content=[\n",
|
||||
"# {\"type\": \"text\", \"text\": \"Describe this image:\"},\n",
|
||||
"# {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
"# ]\n",
|
||||
"# )\n",
|
||||
"# ]\n",
|
||||
"# )\n",
|
||||
"\n",
|
||||
"# Exception: [422] Unprocessable Entity\n",
|
||||
"# body -> stream\n",
|
||||
"# Extra inputs are not permitted (type=extra_forbidden)\n",
|
||||
"# RequestID: 35538c9a-4b45-4616-8b75-7ef816fccf38"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e230b70",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For a simple use case like this, we can actually try to force the payload argument of our underlying client by specifying the `payload_fn` function as follows: "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0925b2b1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def drop_streaming_key(d):\n",
|
||||
" \"\"\"Takes in payload dictionary, outputs new payload dictionary\"\"\"\n",
|
||||
" if \"stream\" in d:\n",
|
||||
" d.pop(\"stream\")\n",
|
||||
" return d\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Override the payload passthrough. Default is to pass through the payload as is.\n",
|
||||
"kosmos = ChatNVIDIA(model=\"microsoft/kosmos-2\")\n",
|
||||
"kosmos.client.payload_fn = drop_streaming_key\n",
|
||||
"\n",
|
||||
"kosmos.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"Describe this image:\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fe6e1758",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For more advanced or custom use-cases (i.e. supporting the diffusion models), you may be interested in leveraging the `NVEModel` client as a requests backbone. The `NVIDIAEmbeddings` class is a good source of inspiration for this. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "137662a6",
|
||||
@@ -540,7 +451,7 @@
|
||||
"id": "137662a6"
|
||||
},
|
||||
"source": [
|
||||
"## Example usage within RunnableWithMessageHistory "
|
||||
"## Example usage within a RunnableWithMessageHistory"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -630,14 +541,14 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "uHIMZxVSVNBC",
|
||||
"id": "LyD1xVKmVSs4",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 284
|
||||
"height": 350
|
||||
},
|
||||
"id": "uHIMZxVSVNBC",
|
||||
"outputId": "79acc89d-a820-4f2c-bac2-afe99da95580"
|
||||
"id": "LyD1xVKmVSs4",
|
||||
"outputId": "a1714513-a8fd-4d14-f974-233e39d5c4f5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -646,6 +557,79 @@
|
||||
" config=config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f3cbbba0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"Starting in v0.2, `ChatNVIDIA` supports [bind_tools](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html#langchain_core.language_models.chat_models.BaseChatModel.bind_tools).\n",
|
||||
"\n",
|
||||
"`ChatNVIDIA` provides integration with the variety of models on [build.nvidia.com](https://build.nvidia.com) as well as local NIMs. Not all these models are trained for tool calling. Be sure to select a model that does have tool calling for your experimention and applications."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6f7b535e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can get a list of models that are known to support tool calling with,"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e36c8911",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool_models = [\n",
|
||||
" model for model in ChatNVIDIA.get_available_models() if model.supports_tools\n",
|
||||
"]\n",
|
||||
"tool_models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b01d75a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With a tool capable model,"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bd54f174",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import Field\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def get_current_weather(\n",
|
||||
" location: str = Field(..., description=\"The location to get the weather for.\"),\n",
|
||||
"):\n",
|
||||
" \"\"\"Get the current weather for a location.\"\"\"\n",
|
||||
" ...\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatNVIDIA(model=tool_models[0].id).bind_tools(tools=[get_current_weather])\n",
|
||||
"response = llm.invoke(\"What is the weather in Boston?\")\n",
|
||||
"response.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e08df68c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [How to use chat models to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/) for additional examples."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -667,7 +651,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.2"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | \n",
|
||||
"| ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
|
||||
@@ -82,9 +82,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# By default it will use the model which was deployed through the platform\n",
|
||||
"# in my case it will is \"claude-3-haiku\"\n",
|
||||
"# in my case it will is \"gpt-4o\"\n",
|
||||
"\n",
|
||||
"chat = ChatPremAI(project_id=8)"
|
||||
"chat = ChatPremAI(project_id=1234, model_name=\"gpt-4o\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -107,7 +107,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I am an artificial intelligence created by Anthropic. I'm here to help with a wide variety of tasks, from research and analysis to creative projects and open-ended conversation. I have general knowledge and capabilities, but I'm not a real person - I'm an AI assistant. Please let me know if you have any other questions!\n"
|
||||
"I am an AI language model created by OpenAI, designed to assist with answering questions and providing information based on the context provided. How can I help you today?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -133,7 +133,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I am an artificial intelligence created by Anthropic. My purpose is to assist and converse with humans in a friendly and helpful way. I have a broad knowledge base that I can use to provide information, answer questions, and engage in discussions on a wide range of topics. Please let me know if you have any other questions - I'm here to help!\")"
|
||||
"AIMessage(content=\"I'm your friendly assistant! How can I help you today?\", response_metadata={'document_chunks': [{'repository_id': 1985, 'document_id': 1306, 'chunk_id': 173899, 'document_name': '[D] Difference between sparse and dense informati…', 'similarity_score': 0.3209080100059509, 'content': \"with the difference or anywhere\\nwhere I can read about it?\\n\\n\\n 17 9\\n\\n\\n u/ScotiabankCanada • Promoted\\n\\n\\n Accelerate your study permit process\\n with Scotiabank's Student GIC\\n Program. We're here to help you tur…\\n\\n\\n startright.scotiabank.com Learn More\\n\\n\\n Add a Comment\\n\\n\\nSort by: Best\\n\\n\\n DinosParkour • 1y ago\\n\\n\\n Dense Retrieval (DR) m\"}]}, id='run-510bbd0e-3f8f-4095-9b1f-c2d29fd89719-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -160,10 +160,18 @@
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/anindya/prem/langchain/libs/community/langchain_community/chat_models/premai.py:355: UserWarning: WARNING: Parameter top_p is not supported in kwargs.\n",
|
||||
" warnings.warn(f\"WARNING: Parameter {key} is not supported in kwargs.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I am an artificial intelligence created by Anthropic')"
|
||||
"AIMessage(content=\"Hello! I'm your friendly assistant. How can I\", response_metadata={'document_chunks': [{'repository_id': 1985, 'document_id': 1306, 'chunk_id': 173899, 'document_name': '[D] Difference between sparse and dense informati…', 'similarity_score': 0.3209080100059509, 'content': \"with the difference or anywhere\\nwhere I can read about it?\\n\\n\\n 17 9\\n\\n\\n u/ScotiabankCanada • Promoted\\n\\n\\n Accelerate your study permit process\\n with Scotiabank's Student GIC\\n Program. We're here to help you tur…\\n\\n\\n startright.scotiabank.com Learn More\\n\\n\\n Add a Comment\\n\\n\\nSort by: Best\\n\\n\\n DinosParkour • 1y ago\\n\\n\\n Dense Retrieval (DR) m\"}]}, id='run-c4b06b98-4161-4cca-8495-fd2fc98fa8f8-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -195,13 +203,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"what is the diameter of individual Galaxy\"\n",
|
||||
"query = \"Which models are used for dense retrieval\"\n",
|
||||
"repository_ids = [\n",
|
||||
" 1991,\n",
|
||||
" 1985,\n",
|
||||
"]\n",
|
||||
"repositories = dict(ids=repository_ids, similarity_threshold=0.3, limit=3)"
|
||||
]
|
||||
@@ -219,9 +227,34 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Dense retrieval models typically include:\n",
|
||||
"\n",
|
||||
"1. **BERT-based Models**: Such as DPR (Dense Passage Retrieval) which uses BERT for encoding queries and passages.\n",
|
||||
"2. **ColBERT**: A model that combines BERT with late interaction mechanisms.\n",
|
||||
"3. **ANCE (Approximate Nearest Neighbor Negative Contrastive Estimation)**: Uses BERT and focuses on efficient retrieval.\n",
|
||||
"4. **TCT-ColBERT**: A variant of ColBERT that uses a two-tower\n",
|
||||
"{\n",
|
||||
" \"document_chunks\": [\n",
|
||||
" {\n",
|
||||
" \"repository_id\": 1985,\n",
|
||||
" \"document_id\": 1306,\n",
|
||||
" \"chunk_id\": 173899,\n",
|
||||
" \"document_name\": \"[D] Difference between sparse and dense informati\\u2026\",\n",
|
||||
" \"similarity_score\": 0.3209080100059509,\n",
|
||||
" \"content\": \"with the difference or anywhere\\nwhere I can read about it?\\n\\n\\n 17 9\\n\\n\\n u/ScotiabankCanada \\u2022 Promoted\\n\\n\\n Accelerate your study permit process\\n with Scotiabank's Student GIC\\n Program. We're here to help you tur\\u2026\\n\\n\\n startright.scotiabank.com Learn More\\n\\n\\n Add a Comment\\n\\n\\nSort by: Best\\n\\n\\n DinosParkour \\u2022 1y ago\\n\\n\\n Dense Retrieval (DR) m\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
@@ -262,7 +295,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -288,7 +321,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template_id = \"78069ce8-xxxxx-xxxxx-xxxx-xxx\"\n",
|
||||
"response = chat.invoke([human_message], template_id=template_id)\n",
|
||||
"response = chat.invoke([human_messages], template_id=template_id)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
@@ -310,14 +343,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! As an AI language model, I don't have feelings or a physical state, but I'm functioning properly and ready to assist you with any questions or tasks you might have. How can I help you today?"
|
||||
"It looks like your message got cut off. If you need information about Dense Retrieval (DR) or any other topic, please provide more details or clarify your question."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -338,14 +371,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! As an AI language model, I don't have feelings or a physical form, but I'm functioning properly and ready to assist you. How can I help you today?"
|
||||
"Woof! 🐾 How can I help you today? Want to play fetch or maybe go for a walk 🐶🦴"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -365,6 +398,275 @@
|
||||
" sys.stdout.write(chunk.content)\n",
|
||||
" sys.stdout.flush()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Tool/Function Calling\n",
|
||||
"\n",
|
||||
"LangChain PremAI supports tool/function calling. Tool/function calling allows a model to respond to a given prompt by generating output that matches a user-defined schema. \n",
|
||||
"\n",
|
||||
"- You can learn all about tool calling in details [in our documentation here](https://docs.premai.io/get-started/function-calling).\n",
|
||||
"- You can learn more about langchain tool calling in [this part of the docs](https://python.langchain.com/v0.1/docs/modules/model_io/chat/function_calling).\n",
|
||||
"\n",
|
||||
"**NOTE:**\n",
|
||||
"The current version of LangChain ChatPremAI do not support function/tool calling with streaming support. Streaming support along with function calling will come soon. \n",
|
||||
"\n",
|
||||
"#### Passing tools to model\n",
|
||||
"\n",
|
||||
"In order to pass tools and let the LLM choose the tool it needs to call, we need to pass a tool schema. A tool schema is the function definition along with proper docstring on what does the function do, what each argument of the function is etc. Below are some simple arithmetic functions with their schema. \n",
|
||||
"\n",
|
||||
"**NOTE:** When defining function/tool schema, do not forget to add information around the function arguments, otherwise it would throw error."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the schema for function arguments\n",
|
||||
"class OperationInput(BaseModel):\n",
|
||||
" a: int = Field(description=\"First number\")\n",
|
||||
" b: int = Field(description=\"Second number\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Now define the function where schema for argument will be OperationInput\n",
|
||||
"@tool(\"add\", args_schema=OperationInput, return_direct=True)\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(\"multiply\", args_schema=OperationInput, return_direct=True)\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" a: first int\n",
|
||||
" b: second int\n",
|
||||
" \"\"\"\n",
|
||||
" return a * b"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Binding tool schemas with our LLM\n",
|
||||
"\n",
|
||||
"We will now use the `bind_tools` method to convert our above functions to a \"tool\" and binding it with the model. This means we are going to pass these tool informations everytime we invoke the model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [add, multiply]\n",
|
||||
"llm_with_tools = chat.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After this, we get the response from the model which is now binded with the tools. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What is 3 * 12? Also, what is 11 + 49?\"\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, when our chat model is binded with tools, then based on the given prompt, it calls the correct set of the tools and sequentially. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'multiply',\n",
|
||||
" 'args': {'a': 3, 'b': 12},\n",
|
||||
" 'id': 'call_A9FL20u12lz6TpOLaiS6rFa8'},\n",
|
||||
" {'name': 'add',\n",
|
||||
" 'args': {'a': 11, 'b': 49},\n",
|
||||
" 'id': 'call_MPKYGLHbf39csJIyb5BZ9xIk'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We append this message shown above to the LLM which acts as a context and makes the LLM aware that what all functions it has called. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages.append(ai_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Since tool calling happens into two phases, where:\n",
|
||||
"\n",
|
||||
"1. in our first call, we gathered all the tools that the LLM decided to tool, so that it can get the result as an added context to give more accurate and hallucination free result. \n",
|
||||
"\n",
|
||||
"2. in our second call, we will parse those set of tools decided by LLM and run them (in our case it will be the functions we defined, with the LLM's extracted arguments) and pass this result to the LLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import ToolMessage\n",
|
||||
"\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, we call the LLM (binded with the tools) with the function response added in it's context. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The final answers are:\n",
|
||||
"\n",
|
||||
"- 3 * 12 = 36\n",
|
||||
"- 11 + 49 = 60\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = llm_with_tools.invoke(messages)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Defining tool schemas: Pydantic class\n",
|
||||
"\n",
|
||||
"Above we have shown how to define schema using `tool` decorator, however we can equivalently define the schema using Pydantic. Pydantic is useful when your tool inputs are more complex:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class add(BaseModel):\n",
|
||||
" \"\"\"Add two integers together.\"\"\"\n",
|
||||
"\n",
|
||||
" a: int = Field(..., description=\"First integer\")\n",
|
||||
" b: int = Field(..., description=\"Second integer\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class multiply(BaseModel):\n",
|
||||
" \"\"\"Multiply two integers together.\"\"\"\n",
|
||||
"\n",
|
||||
" a: int = Field(..., description=\"First integer\")\n",
|
||||
" b: int = Field(..., description=\"Second integer\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, we can bind them to chat models and directly get the result:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[multiply(a=3, b=12), add(a=11, b=49)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = llm_with_tools | PydanticToolsParser(tools=[multiply, add])\n",
|
||||
"chain.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, as done above, we parse this and run this functions and call the LLM once again to get the result."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -383,7 +685,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
"version": "3.9.19"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
188
docs/docs/integrations/document_loaders/scrapingant.ipynb
Normal file
188
docs/docs/integrations/document_loaders/scrapingant.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -5,7 +5,7 @@
|
||||
"id": "20deed05",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Unstructured File\n",
|
||||
"# Unstructured\n",
|
||||
"\n",
|
||||
"This notebook covers how to use `Unstructured` package to load files of many types. `Unstructured` currently supports loading of text files, powerpoints, html, pdfs, images, and more.\n",
|
||||
"\n",
|
||||
@@ -14,79 +14,69 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "2886982e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# # Install package\n",
|
||||
"%pip install --upgrade --quiet \"unstructured[all-docs]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "54d62efd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# # Install other dependencies\n",
|
||||
"# # https://github.com/Unstructured-IO/unstructured/blob/main/docs/source/installing.rst\n",
|
||||
"# !brew install libmagic\n",
|
||||
"# !brew install poppler\n",
|
||||
"# !brew install tesseract\n",
|
||||
"# # If parsing xml / html documents:\n",
|
||||
"# !brew install libxml2\n",
|
||||
"# !brew install libxslt"
|
||||
"# Install package, compatible with API partitioning\n",
|
||||
"%pip install --upgrade --quiet \"langchain-unstructured\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "af6a64f5",
|
||||
"cell_type": "markdown",
|
||||
"id": "e75e2a6d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import nltk\n",
|
||||
"# nltk.download('punkt')"
|
||||
"### Local Partitioning (Optional)\n",
|
||||
"\n",
|
||||
"By default, `langchain-unstructured` installs a smaller footprint that requires\n",
|
||||
"offloading of the partitioning logic to the Unstructured API.\n",
|
||||
"\n",
|
||||
"If you would like to run the partitioning logic locally, you will need to install\n",
|
||||
"a combination of system dependencies, as outlined in the \n",
|
||||
"[Unstructured documentation here](https://docs.unstructured.io/open-source/installation/full-installation).\n",
|
||||
"\n",
|
||||
"For example, on Macs you can install the required dependencies with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"# base dependencies\n",
|
||||
"brew install libmagic poppler tesseract\n",
|
||||
"\n",
|
||||
"# If parsing xml / html documents:\n",
|
||||
"brew install libxml2 libxslt\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You can install the required `pip` dependencies with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install \"langchain-unstructured[local]\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a9c1c775",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Quickstart\n",
|
||||
"\n",
|
||||
"To simply load a file as a document, you can use the LangChain `DocumentLoader.load` \n",
|
||||
"interface:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"id": "79d3e549",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.\\n\\nLast year COVID-19 kept us apart. This year we are finally together again.\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.\\n\\nWith a duty to one another to the American people to the Constit'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(\"./example_data/state_of_the_union.txt\")\n",
|
||||
"loader = UnstructuredLoader(\"./example_data/state_of_the_union.txt\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -99,113 +89,31 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"id": "092d9a0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'1/22/23, 6:30 PM - User 1: Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!\\n\\n1/22/23, 8:24 PM - User 2: Goodmorning! $50 is too low.\\n\\n1/23/23, 2:59 AM - User 1: How much do you want?\\n\\n1/23/23, 3:00 AM - User 2: Online is at least $100\\n\\n1/23/23, 3:01 AM - User 2: Here is $129\\n\\n1/23/23, 3:01 AM - User 2: <Media omitted>\\n\\n1/23/23, 3:01 AM - User 1: Im not int'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"whatsapp_chat.txt : 1/22/23, 6:30 PM - User 1: Hi! Im interested in your bag. Im offering $50. Let me know if you are in\n",
|
||||
"state_of_the_union.txt : May God bless you all. May God protect our troops.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"files = [\"./example_data/whatsapp_chat.txt\", \"./example_data/layout-parser-paper.pdf\"]\n",
|
||||
"file_paths = [\n",
|
||||
" \"./example_data/whatsapp_chat.txt\",\n",
|
||||
" \"./example_data/state_of_the_union.txt\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(files)\n",
|
||||
"loader = UnstructuredLoader(file_paths)\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[0].page_content[:400]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7874d01d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retain Elements\n",
|
||||
"\n",
|
||||
"Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode=\"elements\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ff5b616d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
|
||||
" Document(page_content='Last year COVID-19 kept us apart. This year we are finally together again.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
|
||||
" Document(page_content='Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'}),\n",
|
||||
" Document(page_content='With a duty to one another to the American people to the Constitution.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='And with an unwavering resolve that freedom will always triumph over tyranny.', metadata={'source': './example_data/state_of_the_union.txt', 'file_directory': './example_data', 'filename': 'state_of_the_union.txt', 'last_modified': '2024-07-01T11:18:22', 'languages': ['eng'], 'filetype': 'text/plain', 'category': 'NarrativeText'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/state_of_the_union.txt\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "672733fd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define a Partitioning Strategy\n",
|
||||
"\n",
|
||||
"Unstructured document loader allow users to pass in a `strategy` parameter that lets `unstructured` know how to partition the document. Currently supported strategies are `\"hi_res\"` (the default) and `\"fast\"`. Hi res partitioning strategies are more accurate, but take longer to process. Fast strategies partition the document more quickly, but trade-off accuracy. Not all document types have separate hi res and fast partitioning strategies. For those document types, the `strategy` kwarg is ignored. In some cases, the high res strategy will fallback to fast if there is a dependency missing (i.e. a model for document partitioning). You can see how to apply a strategy to an `UnstructuredFileLoader` below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "767238a4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", strategy=\"fast\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"docs[5:10]"
|
||||
"print(docs[0].metadata.get(\"filename\"), \": \", docs[0].page_content[:100])\n",
|
||||
"print(docs[-1].metadata.get(\"filename\"), \": \", docs[-1].page_content[:100])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -215,37 +123,52 @@
|
||||
"source": [
|
||||
"## PDF Example\n",
|
||||
"\n",
|
||||
"Processing PDF documents works exactly the same way. Unstructured detects the file type and extracts the same types of elements. Modes of operation are \n",
|
||||
"- `single` all the text from all elements are combined into one (default)\n",
|
||||
"- `elements` maintain individual elements\n",
|
||||
"- `paged` texts from each page are only combined"
|
||||
"Processing PDF documents works exactly the same way. Unstructured detects the file type and extracts the same types of elements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "672733fd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Define a Partitioning Strategy\n",
|
||||
"\n",
|
||||
"Unstructured document loader allow users to pass in a `strategy` parameter that lets Unstructured\n",
|
||||
"know how to partition pdf and other OCR'd documents. Currently supported strategies are `\"auto\"`,\n",
|
||||
"`\"hi_res\"`, `\"ocr_only\"`, and `\"fast\"`. Learn more about the different strategies\n",
|
||||
"[here](https://docs.unstructured.io/open-source/core-functionality/partitioning#partition-pdf). \n",
|
||||
"\n",
|
||||
"Not all document types have separate hi res and fast partitioning strategies. For those document types, the `strategy` kwarg is\n",
|
||||
"ignored. In some cases, the high res strategy will fallback to fast if there is a dependency missing\n",
|
||||
"(i.e. a model for document partitioning). You can see how to apply a strategy to an\n",
|
||||
"`UnstructuredLoader` below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "686e5eb4",
|
||||
"execution_count": 6,
|
||||
"id": "60685353",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
|
||||
"[Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': 'e9fa370aef7ee5c05744eb7bb7d9981b'}, page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title', 'element_id': 'bde0b230a1aa488e3ce837d33015181b'}, page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': '54700f902899f0c8c90488fa8d825bce'}, page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': 'b650f5867bad9bb4e30384282c79bcfe'}, page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText', 'element_id': 'cfc957c94fe63c8fd7c7f4bcb56e75a7'}, page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\", mode=\"elements\"\n",
|
||||
")\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredLoader(\"./example_data/layout-parser-paper.pdf\", strategy=\"fast\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
@@ -257,37 +180,39 @@
|
||||
"id": "1cf27fc8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you need to post process the `unstructured` elements after extraction, you can pass in a list of `str` -> `str` functions to the `post_processors` kwarg when you instantiate the `UnstructuredFileLoader`. This applies to other Unstructured loaders as well. Below is an example."
|
||||
"## Post Processing\n",
|
||||
"\n",
|
||||
"If you need to post process the `unstructured` elements after extraction, you can pass in a list of\n",
|
||||
"`str` -> `str` functions to the `post_processors` kwarg when you instantiate the `UnstructuredLoader`. This applies to other Unstructured loaders as well. Below is an example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 7,
|
||||
"id": "112e5538",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title'}),\n",
|
||||
" Document(page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText'}),\n",
|
||||
" Document(page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.', metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2023-12-19T13:42:18', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText'})]"
|
||||
"[Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((16.34, 393.9), (16.34, 560.0), (36.34, 560.0), (36.34, 393.9)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': '89565df026a24279aaea20dc08cedbec', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': 'e9fa370aef7ee5c05744eb7bb7d9981b'}, page_content='2 v 8 4 3 5 1 . 3 0 1 2 : v i X r a'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((157.62199999999999, 114.23496279999995), (157.62199999999999, 146.5141628), (457.7358962799999, 146.5141628), (457.7358962799999, 114.23496279999995)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'filetype': 'application/pdf', 'category': 'Title', 'element_id': 'bde0b230a1aa488e3ce837d33015181b'}, page_content='LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((134.809, 168.64029940800003), (134.809, 192.2517444), (480.5464199080001, 192.2517444), (480.5464199080001, 168.64029940800003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': '54700f902899f0c8c90488fa8d825bce'}, page_content='Zejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain Lee4, Jacob Carlson3, and Weining Li5'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((207.23000000000002, 202.57205439999996), (207.23000000000002, 311.8195408), (408.12676, 311.8195408), (408.12676, 202.57205439999996)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'UncategorizedText', 'element_id': 'b650f5867bad9bb4e30384282c79bcfe'}, page_content='1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca'),\n",
|
||||
" Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'coordinates': {'points': ((162.779, 338.45008160000003), (162.779, 566.8455408), (454.0372021523199, 566.8455408), (454.0372021523199, 338.45008160000003)), 'system': 'PixelSpace', 'layout_width': 612, 'layout_height': 792}, 'file_directory': './example_data', 'filename': 'layout-parser-paper.pdf', 'languages': ['eng'], 'last_modified': '2024-02-27T15:49:27', 'links': [{'text': ':// layout - parser . github . io', 'url': 'https://layout-parser.github.io', 'start_index': 1477}], 'page_number': 1, 'parent_id': 'bde0b230a1aa488e3ce837d33015181b', 'filetype': 'application/pdf', 'category': 'NarrativeText', 'element_id': 'cfc957c94fe63c8fd7c7f4bcb56e75a7'}, page_content='Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredFileLoader\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"from unstructured.cleaners.core import clean_extra_whitespace\n",
|
||||
"\n",
|
||||
"loader = UnstructuredFileLoader(\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" mode=\"elements\",\n",
|
||||
" post_processors=[clean_extra_whitespace],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
@@ -303,34 +228,70 @@
|
||||
"source": [
|
||||
"## Unstructured API\n",
|
||||
"\n",
|
||||
"If you want to get up and running with less set up, you can simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or `UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API. You can generate a free Unstructured API key [here](https://www.unstructured.io/api-key/). The [Unstructured documentation](https://unstructured-io.github.io/unstructured/) page will have instructions on how to generate an API key once they’re available. Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image) if you’d like to self-host the Unstructured API or run it locally."
|
||||
"If you want to get up and running with smaller packages and get the most up-to-date partitioning you can `pip install\n",
|
||||
"unstructured-client` and `pip install langchain-unstructured`. For\n",
|
||||
"more information about the `UnstructuredLoader`, refer to the\n",
|
||||
"[Unstructured provider page](https://python.langchain.com/v0.1/docs/integrations/document_loaders/unstructured_file/).\n",
|
||||
"\n",
|
||||
"The loader will process your document using the hosted Unstructured serverless API when you pass in\n",
|
||||
"your `api_key` and set `partition_via_api=True`. You can generate a free\n",
|
||||
"Unstructured API key [here](https://unstructured.io/api-key/).\n",
|
||||
"\n",
|
||||
"Check out the instructions [here](https://github.com/Unstructured-IO/unstructured-api#dizzy-instructions-for-using-the-docker-image)\n",
|
||||
"if you’d like to self-host the Unstructured API or run it locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"id": "6e5fde16",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install package\n",
|
||||
"%pip install \"langchain-unstructured\"\n",
|
||||
"%pip install \"unstructured-client\"\n",
|
||||
"\n",
|
||||
"# Set API key\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"UNSTRUCTURED_API_KEY\"] = \"FAKE_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "386eb63c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO: Preparing to split document for partition.\n",
|
||||
"INFO: Given file doesn't have '.pdf' extension, so splitting is not enabled.\n",
|
||||
"INFO: Partitioning without split.\n",
|
||||
"INFO: Successfully partitioned the document.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.docx'})"
|
||||
"Document(metadata={'source': 'example_data/fake.docx', 'category_depth': 0, 'filename': 'fake.docx', 'languages': ['por', 'cat'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'Title', 'element_id': '56d531394823d81787d77a04462ed096'}, page_content='Lorem ipsum dolor sit amet.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredAPIFileLoader\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"\n",
|
||||
"filenames = [\"example_data/fake.docx\", \"example_data/fake-email.eml\"]\n",
|
||||
"\n",
|
||||
"loader = UnstructuredAPIFileLoader(\n",
|
||||
" file_path=filenames[0],\n",
|
||||
" api_key=\"FAKE_API_KEY\",\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" file_path=\"example_data/fake.docx\",\n",
|
||||
" api_key=os.getenv(\"UNSTRUCTURED_API_KEY\"),\n",
|
||||
" partition_via_api=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
@@ -342,43 +303,197 @@
|
||||
"id": "94158999",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also batch multiple files through the Unstructured API in a single API using `UnstructuredAPIFileLoader`."
|
||||
"You can also batch multiple files through the Unstructured API in a single API using `UnstructuredLoader`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 10,
|
||||
"id": "a3d7c846",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Lorem ipsum dolor sit amet.\\n\\nThis is a test email to use for unit tests.\\n\\nImportant points:\\n\\nRoses are red\\n\\nViolets are blue', metadata={'source': ['example_data/fake.docx', 'example_data/fake-email.eml']})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO: Preparing to split document for partition.\n",
|
||||
"INFO: Given file doesn't have '.pdf' extension, so splitting is not enabled.\n",
|
||||
"INFO: Partitioning without split.\n",
|
||||
"INFO: Successfully partitioned the document.\n",
|
||||
"INFO: Preparing to split document for partition.\n",
|
||||
"INFO: Given file doesn't have '.pdf' extension, so splitting is not enabled.\n",
|
||||
"INFO: Partitioning without split.\n",
|
||||
"INFO: Successfully partitioned the document.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"fake.docx : Lorem ipsum dolor sit amet.\n",
|
||||
"fake-email.eml : Violets are blue\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = UnstructuredAPIFileLoader(\n",
|
||||
" file_path=filenames,\n",
|
||||
" api_key=\"FAKE_API_KEY\",\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" file_path=[\"example_data/fake.docx\", \"example_data/fake-email.eml\"],\n",
|
||||
" api_key=os.getenv(\"UNSTRUCTURED_API_KEY\"),\n",
|
||||
" partition_via_api=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
"\n",
|
||||
"print(docs[0].metadata[\"filename\"], \": \", docs[0].page_content[:100])\n",
|
||||
"print(docs[-1].metadata[\"filename\"], \": \", docs[-1].page_content[:100])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a324a0db",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Unstructured SDK Client\n",
|
||||
"\n",
|
||||
"Partitioning with the Unstructured API relies on the [Unstructured SDK\n",
|
||||
"Client](https://docs.unstructured.io/api-reference/api-services/sdk).\n",
|
||||
"\n",
|
||||
"Below is an example showing how you can customize some features of the client and use your own\n",
|
||||
"`requests.Session()`, pass in an alternative `server_url`, or customize the `RetryConfig` object for more control over how failed requests are handled."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0e510495",
|
||||
"execution_count": 11,
|
||||
"id": "58e55264",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO: Preparing to split document for partition.\n",
|
||||
"INFO: Concurrency level set to 5\n",
|
||||
"INFO: Splitting pages 1 to 16 (16 total)\n",
|
||||
"INFO: Determined optimal split size of 4 pages.\n",
|
||||
"INFO: Partitioning 4 files with 4 page(s) each.\n",
|
||||
"INFO: Partitioning set #1 (pages 1-4).\n",
|
||||
"INFO: Partitioning set #2 (pages 5-8).\n",
|
||||
"INFO: Partitioning set #3 (pages 9-12).\n",
|
||||
"INFO: Partitioning set #4 (pages 13-16).\n",
|
||||
"INFO: HTTP Request: POST https://api.unstructuredapp.io/general/v0/general \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO: HTTP Request: POST https://api.unstructuredapp.io/general/v0/general \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO: HTTP Request: POST https://api.unstructuredapp.io/general/v0/general \"HTTP/1.1 200 OK\"\n",
|
||||
"INFO: Successfully partitioned set #1, elements added to the final result.\n",
|
||||
"INFO: Successfully partitioned set #2, elements added to the final result.\n",
|
||||
"INFO: Successfully partitioned set #3, elements added to the final result.\n",
|
||||
"INFO: Successfully partitioned set #4, elements added to the final result.\n",
|
||||
"INFO: Successfully partitioned the document.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"layout-parser-paper.pdf : LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"from unstructured_client import UnstructuredClient\n",
|
||||
"from unstructured_client.utils import BackoffStrategy, RetryConfig\n",
|
||||
"\n",
|
||||
"client = UnstructuredClient(\n",
|
||||
" api_key_auth=os.getenv(\n",
|
||||
" \"UNSTRUCTURED_API_KEY\"\n",
|
||||
" ), # Note: the client API param is \"api_key_auth\" instead of \"api_key\"\n",
|
||||
" client=requests.Session(),\n",
|
||||
" server_url=\"https://api.unstructuredapp.io/general/v0/general\",\n",
|
||||
" retry_config=RetryConfig(\n",
|
||||
" strategy=\"backoff\",\n",
|
||||
" retry_connection_errors=True,\n",
|
||||
" backoff=BackoffStrategy(\n",
|
||||
" initial_interval=500,\n",
|
||||
" max_interval=60000,\n",
|
||||
" exponent=1.5,\n",
|
||||
" max_elapsed_time=900000,\n",
|
||||
" ),\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" partition_via_api=True,\n",
|
||||
" client=client,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"print(docs[0].metadata[\"filename\"], \": \", docs[0].page_content[:100])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c66fbeb3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chunking\n",
|
||||
"\n",
|
||||
"The `UnstructuredLoader` does not support `mode` as parameter for grouping text like the older\n",
|
||||
"loader `UnstructuredFileLoader` and others did. It instead supports \"chunking\". Chunking in\n",
|
||||
"unstructured differs from other chunking mechanisms you may be familiar with that form chunks based\n",
|
||||
"on plain-text features--character sequences like \"\\n\\n\" or \"\\n\" that might indicate a paragraph\n",
|
||||
"boundary or list-item boundary. Instead, all documents are split using specific knowledge about each\n",
|
||||
"document format to partition the document into semantic units (document elements) and we only need to\n",
|
||||
"resort to text-splitting when a single element exceeds the desired maximum chunk size. In general,\n",
|
||||
"chunking combines consecutive elements to form chunks as large as possible without exceeding the\n",
|
||||
"maximum chunk size. Chunking produces a sequence of CompositeElement, Table, or TableChunk elements.\n",
|
||||
"Each “chunk” is an instance of one of these three types.\n",
|
||||
"\n",
|
||||
"See this [page](https://docs.unstructured.io/open-source/core-functionality/chunking) for more\n",
|
||||
"details about chunking options, but to reproduce the same behavior as `mode=\"single\"`, you can set\n",
|
||||
"`chunking_strategy=\"basic\"`, `max_characters=<some-really-big-number>`, and `include_orig_elements=False`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "e9f1c20d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING: Partitioning locally even though api_key is defined since partition_via_api=False.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of LangChain documents: 1\n",
|
||||
"Length of text in the document: 42772\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredLoader(\n",
|
||||
" \"./example_data/layout-parser-paper.pdf\",\n",
|
||||
" chunking_strategy=\"basic\",\n",
|
||||
" max_characters=1000000,\n",
|
||||
" include_orig_elements=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"print(\"Number of LangChain documents:\", len(docs))\n",
|
||||
"print(\"Length of text in the document:\", len(docs[0].page_content))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -397,7 +512,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,12 +7,29 @@
|
||||
"source": [
|
||||
"# Model caches\n",
|
||||
"\n",
|
||||
"This notebook covers how to cache results of individual LLM calls using different caches."
|
||||
"This notebook covers how to cache results of individual LLM calls using different caches.\n",
|
||||
"\n",
|
||||
"First, let's install some dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "88486f6f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai langchain-community\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "10ad9224",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
@@ -25,8 +42,9 @@
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# To make the caching really obvious, lets use a slower model.\n",
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||
"# To make the caching really obvious, lets use a slower and older model.\n",
|
||||
"# Caching supports newer chat models as well.\n",
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -41,7 +59,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 3,
|
||||
"id": "426ff912",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -53,7 +71,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"id": "64005d1f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -61,45 +79,14 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 52.2 ms, sys: 15.2 ms, total: 67.4 ms\n",
|
||||
"Wall time: 1.19 s\n"
|
||||
"CPU times: user 7.57 ms, sys: 8.22 ms, total: 15.8 ms\n",
|
||||
"Wall time: 649 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was...two tired!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "c8a1cb2b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 191 µs, sys: 11 µs, total: 202 µs\n",
|
||||
"Wall time: 205 µs\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was...two tired!\""
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@@ -107,10 +94,41 @@
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "c8a1cb2b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 551 µs, sys: 221 µs, total: 772 µs\n",
|
||||
"Wall time: 1.23 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -125,7 +143,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"id": "aefd9d2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -135,7 +153,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"id": "5f036236",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -148,7 +166,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"id": "fa18e3af",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -156,47 +174,14 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 33.2 ms, sys: 18.1 ms, total: 51.2 ms\n",
|
||||
"Wall time: 667 ms\n"
|
||||
"CPU times: user 12.6 ms, sys: 3.51 ms, total: 16.1 ms\n",
|
||||
"Wall time: 486 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "5bf2f6fd",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 4.86 ms, sys: 1.97 ms, total: 6.83 ms\n",
|
||||
"Wall time: 5.79 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'"
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -204,10 +189,43 @@
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "5bf2f6fd",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 52.6 ms, sys: 57.7 ms, total: 110 ms\n",
|
||||
"Wall time: 113 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -227,6 +245,16 @@
|
||||
"Use [Upstash Redis](https://upstash.com) to cache prompts and responses with a serverless HTTP API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9bd81e8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU upstash_redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
@@ -272,7 +300,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -303,7 +331,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -326,6 +354,16 @@
|
||||
"Use [Redis](/docs/integrations/providers/redis) to cache prompts and responses."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d104226b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
@@ -369,7 +407,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -400,7 +438,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -414,7 +452,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": null,
|
||||
"id": "77b3e4e0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "64df3099",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -455,7 +503,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -487,7 +535,7 @@
|
||||
"%%time\n",
|
||||
"# The second time, while not a direct hit, the question is semantically similar to the original question,\n",
|
||||
"# so it uses the cached result!\n",
|
||||
"llm(\"Tell me one joke\")"
|
||||
"llm.invoke(\"Tell me one joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -505,6 +553,16 @@
|
||||
"Let's first start with an example of exact match"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7fe96cea",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU gptcache"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
@@ -563,7 +621,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -594,7 +652,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -659,7 +717,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -690,7 +748,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# This is an exact match, so it finds it in the cache\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -721,7 +779,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# This is not an exact match, but semantically within distance so it hits!\n",
|
||||
"llm(\"Tell me joke\")"
|
||||
"llm.invoke(\"Tell me joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -744,7 +802,11 @@
|
||||
"### `MongoDBCache`\n",
|
||||
"An abstraction to store a simple cache in MongoDB. This does not use Semantic Caching, nor does it require an index to be made on the collection before generation.\n",
|
||||
"\n",
|
||||
"To import this cache:\n",
|
||||
"To import this cache, first install the required dependency:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"%pip install -qU langchain-mongodb\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from langchain_mongodb.cache import MongoDBCache\n",
|
||||
@@ -822,7 +884,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet momento"
|
||||
"%pip install -qU momento"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -877,7 +939,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -909,7 +971,7 @@
|
||||
"%%time\n",
|
||||
"# The second time it is, so it goes faster\n",
|
||||
"# When run in the same region as the cache, latencies are single digit ms\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1015,7 +1077,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet \"cassio>=0.1.4\""
|
||||
"%pip install -qU \"cassio>=0.1.4\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1350,6 +1412,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain_astradb\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"ASTRA_DB_API_ENDPOINT = input(\"ASTRA_DB_API_ENDPOINT = \")\n",
|
||||
@@ -1633,7 +1697,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1669,7 +1733,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1690,7 +1754,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U langchain-elasticsearch"
|
||||
"%pip install -qU langchain-elasticsearch"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1823,7 +1887,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2, cache=False)"
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", n=2, best_of=2, cache=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1853,7 +1917,7 @@
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1883,7 +1947,7 @@
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1901,18 +1965,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 10,
|
||||
"id": "9afa3f7a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"no_cache_llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", cache=False)"
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"no_cache_llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", cache=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 11,
|
||||
"id": "98a78e8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -1924,19 +1988,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 14,
|
||||
"id": "2bfb099b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(\"../../how_to/state_of_the_union.txt\") as f:\n",
|
||||
"with open(\"../how_to/state_of_the_union.txt\") as f:\n",
|
||||
" state_of_the_union = f.read()\n",
|
||||
"texts = text_splitter.split_text(state_of_the_union)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 15,
|
||||
"id": "f78b7f51",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -1949,7 +2013,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 16,
|
||||
"id": "a2a30822",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -1959,7 +2023,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 17,
|
||||
"id": "a545b743",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -1967,24 +2031,27 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 452 ms, sys: 60.3 ms, total: 512 ms\n",
|
||||
"Wall time: 5.09 s\n"
|
||||
"CPU times: user 176 ms, sys: 23.2 ms, total: 199 ms\n",
|
||||
"Wall time: 4.42 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nPresident Biden is discussing the American Rescue Plan and the Bipartisan Infrastructure Law, which will create jobs and help Americans. He also talks about his vision for America, which includes investing in education and infrastructure. In response to Russian aggression in Ukraine, the United States is joining with European allies to impose sanctions and isolate Russia. American forces are being mobilized to protect NATO countries in the event that Putin decides to keep moving west. The Ukrainians are bravely fighting back, but the next few weeks will be hard for them. Putin will pay a high price for his actions in the long run. Americans should not be alarmed, as the United States is taking action to protect its interests and allies.'"
|
||||
"{'input_documents': [Document(page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\nWith a duty to one another to the American people to the Constitution. \\n\\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \\n\\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \\n\\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \\n\\nHe met the Ukrainian people. \\n\\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \\n\\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. \\n\\nIn this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. \\n\\nLet each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. \\n\\nPlease rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. \\n\\nThroughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. \\n\\nThey keep moving. \\n\\nAnd the costs and the threats to America and the world keep rising. \\n\\nThat’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. \\n\\nThe United States is a member along with 29 other nations. \\n\\nIt matters. American diplomacy matters. American resolve matters. \\n\\nPutin’s latest attack on Ukraine was premeditated and unprovoked. \\n\\nHe rejected repeated efforts at diplomacy. \\n\\nHe thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. \\n\\nWe prepared extensively and carefully. \\n\\nWe spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. \\n\\nI spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression. \\n\\nWe countered Russia’s lies with truth. \\n\\nAnd now that he has acted the free world is holding him accountable. \\n\\nAlong with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. \\n\\nWe are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. \\n\\nTogether with our allies –we are right now enforcing powerful economic sanctions. \\n\\nWe are cutting off Russia’s largest banks from the international financial system. \\n\\nPreventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. \\n\\nWe are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. \\n\\nTonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \\n\\nThe U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. \\n\\nWe are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.'),\n",
|
||||
" Document(page_content='We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. \\n\\nAnd tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. \\n\\nThe Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. \\n\\nTogether with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. \\n\\nWe are giving more than $1 Billion in direct assistance to Ukraine. \\n\\nAnd we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering. \\n\\nLet me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine. \\n\\nOur forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west. \\n\\nFor that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. \\n\\nAs I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power. \\n\\nAnd we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. \\n\\nPutin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. \\n\\nAnd a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \\n\\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \\n\\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \\n\\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \\n\\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \\n\\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \\n\\nBut I want you to know that we are going to be okay. \\n\\nWhen the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. \\n\\nWhile it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. \\n\\nWe see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine. \\n\\nIn the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. \\n\\nThis is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. \\n\\nTo our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. \\n\\nPutin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. \\n\\nHe will never extinguish their love of freedom. He will never weaken the resolve of the free world. \\n\\nWe meet tonight in an America that has lived through two of the hardest years this nation has ever faced.'),\n",
|
||||
" Document(page_content='We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \\n\\nThe pandemic has been punishing. \\n\\nAnd so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \\n\\nI understand. \\n\\nI remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \\n\\nThat’s why one of the first things I did as President was fight to pass the American Rescue Plan. \\n\\nBecause people were hurting. We needed to act, and we did. \\n\\nFew pieces of legislation have done more in a critical moment in our history to lift us out of crisis. \\n\\nIt fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. \\n\\nHelped put food on their table, keep a roof over their heads, and cut the cost of health insurance. \\n\\nAnd as my Dad used to say, it gave people a little breathing room. \\n\\nAnd unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. \\n\\nAnd it worked. It created jobs. Lots of jobs. \\n\\nIn fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year \\nthan ever before in the history of America. \\n\\nOur economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long. \\n\\nFor the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. \\n\\nBut that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. \\n\\nVice President Harris and I ran for office with a new economic vision for America. \\n\\nInvest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up \\nand the middle out, not from the top down. \\n\\nBecause we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. \\n\\nAmerica used to have the best roads, bridges, and airports on Earth. \\n\\nNow our infrastructure is ranked 13th in the world. \\n\\nWe won’t be able to compete for the jobs of the 21st Century if we don’t fix that. \\n\\nThat’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. \\n\\nThis was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. \\n\\nWe’re done talking about infrastructure weeks. \\n\\nWe’re going to have an infrastructure decade. \\n\\nIt is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China. \\n\\nAs I’ve told Xi Jinping, it is never a good bet to bet against the American people. \\n\\nWe’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \\n\\nAnd we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. \\n\\nWe’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. \\n\\n4,000 projects have already been announced. \\n\\nAnd tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. \\n\\nWhen we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs.')],\n",
|
||||
" 'output_text': \" The speaker addresses the unity and strength of Americans and discusses the recent conflict with Russia and actions taken by the US and its allies. They announce closures of airspace, support for Ukraine, and measures to target corrupt Russian leaders. President Biden reflects on past hardships and highlights efforts to pass the American Rescue Plan. He criticizes the previous administration's policies and shares plans for the economy, including investing in America, education, rebuilding infrastructure, and supporting American jobs. \"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"chain.run(docs)"
|
||||
"chain.invoke(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1997,7 +2064,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 19,
|
||||
"id": "39cbb282",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -2005,32 +2072,43 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 11.5 ms, sys: 4.33 ms, total: 15.8 ms\n",
|
||||
"Wall time: 1.04 s\n"
|
||||
"CPU times: user 7 ms, sys: 1.94 ms, total: 8.94 ms\n",
|
||||
"Wall time: 1.06 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nPresident Biden is discussing the American Rescue Plan and the Bipartisan Infrastructure Law, which will create jobs and help Americans. He also talks about his vision for America, which includes investing in education and infrastructure.'"
|
||||
"{'input_documents': [Document(page_content='Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\nWith a duty to one another to the American people to the Constitution. \\n\\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \\n\\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \\n\\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \\n\\nHe met the Ukrainian people. \\n\\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \\n\\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. \\n\\nIn this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight. \\n\\nLet each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. \\n\\nPlease rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. \\n\\nThroughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. \\n\\nThey keep moving. \\n\\nAnd the costs and the threats to America and the world keep rising. \\n\\nThat’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. \\n\\nThe United States is a member along with 29 other nations. \\n\\nIt matters. American diplomacy matters. American resolve matters. \\n\\nPutin’s latest attack on Ukraine was premeditated and unprovoked. \\n\\nHe rejected repeated efforts at diplomacy. \\n\\nHe thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. \\n\\nWe prepared extensively and carefully. \\n\\nWe spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin. \\n\\nI spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression. \\n\\nWe countered Russia’s lies with truth. \\n\\nAnd now that he has acted the free world is holding him accountable. \\n\\nAlong with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. \\n\\nWe are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. \\n\\nTogether with our allies –we are right now enforcing powerful economic sanctions. \\n\\nWe are cutting off Russia’s largest banks from the international financial system. \\n\\nPreventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. \\n\\nWe are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. \\n\\nTonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \\n\\nThe U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. \\n\\nWe are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.'),\n",
|
||||
" Document(page_content='We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. \\n\\nAnd tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. \\n\\nThe Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame. \\n\\nTogether with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. \\n\\nWe are giving more than $1 Billion in direct assistance to Ukraine. \\n\\nAnd we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering. \\n\\nLet me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine. \\n\\nOur forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west. \\n\\nFor that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. \\n\\nAs I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power. \\n\\nAnd we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. \\n\\nPutin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. \\n\\nAnd a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \\n\\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \\n\\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \\n\\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \\n\\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \\n\\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \\n\\nBut I want you to know that we are going to be okay. \\n\\nWhen the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. \\n\\nWhile it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. \\n\\nWe see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine. \\n\\nIn the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. \\n\\nThis is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. \\n\\nTo our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. \\n\\nPutin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. \\n\\nHe will never extinguish their love of freedom. He will never weaken the resolve of the free world. \\n\\nWe meet tonight in an America that has lived through two of the hardest years this nation has ever faced.'),\n",
|
||||
" Document(page_content='We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \\n\\nThe pandemic has been punishing. \\n\\nAnd so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \\n\\nI understand. \\n\\nI remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \\n\\nThat’s why one of the first things I did as President was fight to pass the American Rescue Plan. \\n\\nBecause people were hurting. We needed to act, and we did. \\n\\nFew pieces of legislation have done more in a critical moment in our history to lift us out of crisis. \\n\\nIt fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. \\n\\nHelped put food on their table, keep a roof over their heads, and cut the cost of health insurance. \\n\\nAnd as my Dad used to say, it gave people a little breathing room. \\n\\nAnd unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. \\n\\nAnd it worked. It created jobs. Lots of jobs. \\n\\nIn fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year \\nthan ever before in the history of America. \\n\\nOur economy grew at a rate of 5.7% last year, the strongest growth in nearly 40 years, the first step in bringing fundamental change to an economy that hasn’t worked for the working people of this nation for too long. \\n\\nFor the past 40 years we were told that if we gave tax breaks to those at the very top, the benefits would trickle down to everyone else. \\n\\nBut that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. \\n\\nVice President Harris and I ran for office with a new economic vision for America. \\n\\nInvest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up \\nand the middle out, not from the top down. \\n\\nBecause we know that when the middle class grows, the poor have a ladder up and the wealthy do very well. \\n\\nAmerica used to have the best roads, bridges, and airports on Earth. \\n\\nNow our infrastructure is ranked 13th in the world. \\n\\nWe won’t be able to compete for the jobs of the 21st Century if we don’t fix that. \\n\\nThat’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. \\n\\nThis was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. \\n\\nWe’re done talking about infrastructure weeks. \\n\\nWe’re going to have an infrastructure decade. \\n\\nIt is going to transform America and put us on a path to win the economic competition of the 21st Century that we face with the rest of the world—particularly with China. \\n\\nAs I’ve told Xi Jinping, it is never a good bet to bet against the American people. \\n\\nWe’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \\n\\nAnd we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice. \\n\\nWe’ll build a national network of 500,000 electric vehicle charging stations, begin to replace poisonous lead pipes—so every child—and every American—has clean water to drink at home and at school, provide affordable high-speed internet for every American—urban, suburban, rural, and tribal communities. \\n\\n4,000 projects have already been announced. \\n\\nAnd tonight, I’m announcing that this year we will start fixing over 65,000 miles of highway and 1,500 bridges in disrepair. \\n\\nWhen we use taxpayer dollars to rebuild America – we are going to Buy American: buy American products to support American jobs.')],\n",
|
||||
" 'output_text': '\\n\\nThe speaker addresses the unity of Americans and discusses the conflict with Russia and support for Ukraine. The US and allies are taking action against Russia and targeting corrupt leaders. There is also support and assurance for the American people. President Biden reflects on recent hardships and highlights efforts to pass the American Rescue Plan. He also shares plans for economic growth and investment in America. '}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"chain.run(docs)"
|
||||
"chain.invoke(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 20,
|
||||
"id": "9df0dab8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"rm: sqlite.db: No such file or directory\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!rm .langchain.db sqlite.db"
|
||||
]
|
||||
@@ -2105,7 +2183,7 @@
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -2142,7 +2220,7 @@
|
||||
"%%time\n",
|
||||
"# The second time, while not a direct hit, the question is semantically similar to the original question,\n",
|
||||
"# so it uses the cached result!\n",
|
||||
"llm(\"Tell me one joke\")"
|
||||
"llm.invoke(\"Tell me one joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -2192,6 +2270,16 @@
|
||||
"The standard cache that looks for an exact match of the user prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ac0a2276",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_couchbase couchbase"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
@@ -2578,14 +2666,6 @@
|
||||
"| langchain_couchbase.cache | [CouchbaseCache](https://api.python.langchain.com/en/latest/cache/langchain_couchbase.cache.CouchbaseCache.html) |\n",
|
||||
"| langchain_couchbase.cache | [CouchbaseSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_couchbase.cache.CouchbaseSemanticCache.html) |\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "19067f14-c69a-4156-9504-af43a0713669",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -2604,7 +2684,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -194,12 +194,37 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e4a1e0f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For certain requirements, there is an option to pass the IBM's [`APIClient`](https://ibm.github.io/watsonx-ai-python-sdk/base.html#apiclient) object into the `WatsonxLLM` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4b28afc1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from ibm_watsonx_ai import APIClient\n",
|
||||
"\n",
|
||||
"api_client = APIClient(...)\n",
|
||||
"\n",
|
||||
"watsonx_llm = WatsonxLLM(\n",
|
||||
" model_id=\"ibm/granite-13b-instruct-v2\",\n",
|
||||
" watsonx_client=api_client,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7c4a632b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also pass the IBM's [`ModelInference`](https://ibm.github.io/watsonx-ai-python-sdk/fm_model_inference.html) object into `WatsonxLLM` class."
|
||||
"You can also pass the IBM's [`ModelInference`](https://ibm.github.io/watsonx-ai-python-sdk/fm_model_inference.html) object into the `WatsonxLLM` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# TiDB\n",
|
||||
"\n",
|
||||
"> [TiDB Cloud](https://tidbcloud.com/), is a comprehensive Database-as-a-Service (DBaaS) solution, that provides dedicated and serverless options. TiDB Serverless is now integrating a built-in vector search into the MySQL landscape. With this enhancement, you can seamlessly develop AI applications using TiDB Serverless without the need for a new database or additional technical stacks. Be among the first to experience it by joining the waitlist for the private beta at https://tidb.cloud/ai.\n",
|
||||
"> [TiDB Cloud](https://pingcap.com/ai), is a comprehensive Database-as-a-Service (DBaaS) solution, that provides dedicated and serverless options. TiDB Serverless is now integrating a built-in vector search into the MySQL landscape. With this enhancement, you can seamlessly develop AI applications using TiDB Serverless without the need for a new database or additional technical stacks. Create a free TiDB Serverless cluster and start using the vector search feature today!\n",
|
||||
"\n",
|
||||
"This notebook introduces how to use TiDB to store chat message history. "
|
||||
]
|
||||
|
||||
@@ -40,6 +40,7 @@ These providers have standalone `langchain-{provider}` packages for improved ver
|
||||
- [Qdrant](/docs/integrations/providers/qdrant)
|
||||
- [Robocorp](/docs/integrations/providers/robocorp)
|
||||
- [Together AI](/docs/integrations/providers/together)
|
||||
- [Unstructured](/docs/integrations/providers/unstructured)
|
||||
- [Upstage](/docs/integrations/providers/upstage)
|
||||
- [Voyage AI](/docs/integrations/providers/voyageai)
|
||||
|
||||
|
||||
@@ -68,3 +68,18 @@ Learn more in the [example notebook](/docs/integrations/document_loaders/cassand
|
||||
|
||||
> Apache Cassandra, Cassandra and Apache are either registered trademarks or trademarks of
|
||||
> the [Apache Software Foundation](http://www.apache.org/) in the United States and/or other countries.
|
||||
|
||||
## Toolkit
|
||||
|
||||
The `Cassandra Database toolkit` enables AI engineers to efficiently integrate agents
|
||||
with Cassandra data.
|
||||
|
||||
```python
|
||||
from langchain_community.agent_toolkits.cassandra_database.toolkit import (
|
||||
CassandraDatabaseToolkit,
|
||||
)
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/toolkits/cassandra_database).
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ import getpass
|
||||
if "PREMAI_API_KEY" not in os.environ:
|
||||
os.environ["PREMAI_API_KEY"] = getpass.getpass("PremAI API Key:")
|
||||
|
||||
chat = ChatPremAI(project_id=8)
|
||||
chat = ChatPremAI(project_id=1234, model_name="gpt-4o")
|
||||
```
|
||||
|
||||
### Chat Completions
|
||||
@@ -50,7 +50,8 @@ The first one will give us a static result. Whereas the second one will stream t
|
||||
```python
|
||||
human_message = HumanMessage(content="Who are you?")
|
||||
|
||||
chat.invoke([human_message])
|
||||
response = chat.invoke([human_message])
|
||||
print(response.content)
|
||||
```
|
||||
|
||||
You can provide system prompt here like this:
|
||||
@@ -84,8 +85,8 @@ Repositories are also supported in langchain premai. Here is how you can do it.
|
||||
|
||||
```python
|
||||
|
||||
query = "what is the diameter of individual Galaxy"
|
||||
repository_ids = [1991, ]
|
||||
query = "Which models are used for dense retrieval"
|
||||
repository_ids = [1985,]
|
||||
repositories = dict(
|
||||
ids=repository_ids,
|
||||
similarity_threshold=0.3,
|
||||
@@ -100,6 +101,8 @@ First we start by defining our repository with some repository ids. Make sure th
|
||||
Now, we connect the repository with our chat object to invoke RAG based generations.
|
||||
|
||||
```python
|
||||
import json
|
||||
|
||||
response = chat.invoke(query, max_tokens=100, repositories=repositories)
|
||||
|
||||
print(response.content)
|
||||
@@ -109,25 +112,22 @@ print(json.dumps(response.response_metadata, indent=4))
|
||||
This is how an output looks like.
|
||||
|
||||
```bash
|
||||
The diameters of individual galaxies range from 80,000-150,000 light-years.
|
||||
Dense retrieval models typically include:
|
||||
|
||||
1. **BERT-based Models**: Such as DPR (Dense Passage Retrieval) which uses BERT for encoding queries and passages.
|
||||
2. **ColBERT**: A model that combines BERT with late interaction mechanisms.
|
||||
3. **ANCE (Approximate Nearest Neighbor Negative Contrastive Estimation)**: Uses BERT and focuses on efficient retrieval.
|
||||
4. **TCT-ColBERT**: A variant of ColBERT that uses a two-tower
|
||||
{
|
||||
"document_chunks": [
|
||||
{
|
||||
"repository_id": 19xx,
|
||||
"document_id": 13xx,
|
||||
"chunk_id": 173xxx,
|
||||
"document_name": "Kegy 202 Chapter 2",
|
||||
"similarity_score": 0.586126983165741,
|
||||
"content": "n thousands\n of light-years. The diameters of individual\n galaxies range from 80,000-150,000 light\n "
|
||||
},
|
||||
{
|
||||
"repository_id": 19xx,
|
||||
"document_id": 13xx,
|
||||
"chunk_id": 173xxx,
|
||||
"document_name": "Kegy 202 Chapter 2",
|
||||
"similarity_score": 0.4815782308578491,
|
||||
"content": " for development of galaxies. A galaxy contains\n a large number of stars. Galaxies spread over\n vast distances that are measured in thousands\n "
|
||||
},
|
||||
"repository_id": 1985,
|
||||
"document_id": 1306,
|
||||
"chunk_id": 173899,
|
||||
"document_name": "[D] Difference between sparse and dense informati\u2026",
|
||||
"similarity_score": 0.3209080100059509,
|
||||
"content": "with the difference or anywhere\nwhere I can read about it?\n\n\n 17 9\n\n\n u/ScotiabankCanada \u2022 Promoted\n\n\n Accelerate your study permit process\n with Scotiabank's Student GIC\n Program. We're here to help you tur\u2026\n\n\n startright.scotiabank.com Learn More\n\n\n Add a Comment\n\n\nSort by: Best\n\n\n DinosParkour \u2022 1y ago\n\n\n Dense Retrieval (DR) m"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -264,4 +264,164 @@ doc_result[:5]
|
||||
0.0008162345038726926,
|
||||
-0.004556538071483374,
|
||||
0.02918623760342598,
|
||||
-0.02547479420900345]
|
||||
-0.02547479420900345]
|
||||
|
||||
## Tool/Function Calling
|
||||
|
||||
LangChain PremAI supports tool/function calling. Tool/function calling allows a model to respond to a given prompt by generating output that matches a user-defined schema.
|
||||
|
||||
- You can learn all about tool calling in details [in our documentation here](https://docs.premai.io/get-started/function-calling).
|
||||
- You can learn more about langchain tool calling in [this part of the docs](https://python.langchain.com/v0.1/docs/modules/model_io/chat/function_calling).
|
||||
|
||||
**NOTE:**
|
||||
|
||||
> The current version of LangChain ChatPremAI do not support function/tool calling with streaming support. Streaming support along with function calling will come soon.
|
||||
|
||||
### Passing tools to model
|
||||
|
||||
In order to pass tools and let the LLM choose the tool it needs to call, we need to pass a tool schema. A tool schema is the function definition along with proper docstring on what does the function do, what each argument of the function is etc. Below are some simple arithmetic functions with their schema.
|
||||
|
||||
**NOTE:**
|
||||
> When defining function/tool schema, do not forget to add information around the function arguments, otherwise it would throw error.
|
||||
|
||||
```python
|
||||
from langchain_core.tools import tool
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
# Define the schema for function arguments
|
||||
class OperationInput(BaseModel):
|
||||
a: int = Field(description="First number")
|
||||
b: int = Field(description="Second number")
|
||||
|
||||
|
||||
# Now define the function where schema for argument will be OperationInput
|
||||
@tool("add", args_schema=OperationInput, return_direct=True)
|
||||
def add(a: int, b: int) -> int:
|
||||
"""Adds a and b.
|
||||
|
||||
Args:
|
||||
a: first int
|
||||
b: second int
|
||||
"""
|
||||
return a + b
|
||||
|
||||
|
||||
@tool("multiply", args_schema=OperationInput, return_direct=True)
|
||||
def multiply(a: int, b: int) -> int:
|
||||
"""Multiplies a and b.
|
||||
|
||||
Args:
|
||||
a: first int
|
||||
b: second int
|
||||
"""
|
||||
return a * b
|
||||
```
|
||||
|
||||
### Binding tool schemas with our LLM
|
||||
|
||||
We will now use the `bind_tools` method to convert our above functions to a "tool" and binding it with the model. This means we are going to pass these tool informations everytime we invoke the model.
|
||||
|
||||
```python
|
||||
tools = [add, multiply]
|
||||
llm_with_tools = chat.bind_tools(tools)
|
||||
```
|
||||
|
||||
After this, we get the response from the model which is now binded with the tools.
|
||||
|
||||
```python
|
||||
query = "What is 3 * 12? Also, what is 11 + 49?"
|
||||
|
||||
messages = [HumanMessage(query)]
|
||||
ai_msg = llm_with_tools.invoke(messages)
|
||||
```
|
||||
|
||||
As we can see, when our chat model is binded with tools, then based on the given prompt, it calls the correct set of the tools and sequentially.
|
||||
|
||||
```python
|
||||
ai_msg.tool_calls
|
||||
```
|
||||
**Output**
|
||||
|
||||
```python
|
||||
[{'name': 'multiply',
|
||||
'args': {'a': 3, 'b': 12},
|
||||
'id': 'call_A9FL20u12lz6TpOLaiS6rFa8'},
|
||||
{'name': 'add',
|
||||
'args': {'a': 11, 'b': 49},
|
||||
'id': 'call_MPKYGLHbf39csJIyb5BZ9xIk'}]
|
||||
```
|
||||
|
||||
We append this message shown above to the LLM which acts as a context and makes the LLM aware that what all functions it has called.
|
||||
|
||||
```python
|
||||
messages.append(ai_msg)
|
||||
```
|
||||
|
||||
Since tool calling happens into two phases, where:
|
||||
|
||||
1. in our first call, we gathered all the tools that the LLM decided to tool, so that it can get the result as an added context to give more accurate and hallucination free result.
|
||||
|
||||
2. in our second call, we will parse those set of tools decided by LLM and run them (in our case it will be the functions we defined, with the LLM's extracted arguments) and pass this result to the LLM
|
||||
|
||||
```python
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
for tool_call in ai_msg.tool_calls:
|
||||
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
|
||||
tool_output = selected_tool.invoke(tool_call["args"])
|
||||
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))
|
||||
```
|
||||
|
||||
Finally, we call the LLM (binded with the tools) with the function response added in it's context.
|
||||
|
||||
```python
|
||||
response = llm_with_tools.invoke(messages)
|
||||
print(response.content)
|
||||
```
|
||||
**Output**
|
||||
|
||||
```txt
|
||||
The final answers are:
|
||||
|
||||
- 3 * 12 = 36
|
||||
- 11 + 49 = 60
|
||||
```
|
||||
|
||||
### Defining tool schemas: Pydantic class `Optional`
|
||||
|
||||
Above we have shown how to define schema using `tool` decorator, however we can equivalently define the schema using Pydantic. Pydantic is useful when your tool inputs are more complex:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
|
||||
|
||||
class add(BaseModel):
|
||||
"""Add two integers together."""
|
||||
|
||||
a: int = Field(..., description="First integer")
|
||||
b: int = Field(..., description="Second integer")
|
||||
|
||||
|
||||
class multiply(BaseModel):
|
||||
"""Multiply two integers together."""
|
||||
|
||||
a: int = Field(..., description="First integer")
|
||||
b: int = Field(..., description="Second integer")
|
||||
|
||||
|
||||
tools = [add, multiply]
|
||||
```
|
||||
|
||||
Now, we can bind them to chat models and directly get the result:
|
||||
|
||||
```python
|
||||
chain = llm_with_tools | PydanticToolsParser(tools=[multiply, add])
|
||||
chain.invoke(query)
|
||||
```
|
||||
|
||||
**Output**
|
||||
|
||||
```txt
|
||||
[multiply(a=3, b=12), add(a=11, b=49)]
|
||||
```
|
||||
|
||||
Now, as done above, we parse this and run this functions and call the LLM once again to get the result.
|
||||
@@ -20,3 +20,16 @@ from langchain_community.vectorstores import SKLearnVectorStore
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the SKLearnVectorStore wrapper, see [this notebook](/docs/integrations/vectorstores/sklearn).
|
||||
|
||||
|
||||
## Retriever
|
||||
|
||||
`Support vector machines (SVMs)` are the supervised learning
|
||||
methods used for classification, regression and outliers detection.
|
||||
|
||||
See a [usage example](/docs/integrations/retrievers/svm).
|
||||
|
||||
```python
|
||||
from langchain_community.retrievers import SVMRetriever
|
||||
```
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
There isn't any special setup for it.
|
||||
|
||||
|
||||
|
||||
## Document loader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/slack).
|
||||
@@ -16,6 +15,14 @@ See a [usage example](/docs/integrations/document_loaders/slack).
|
||||
from langchain_community.document_loaders import SlackDirectoryLoader
|
||||
```
|
||||
|
||||
## Toolkit
|
||||
|
||||
See a [usage example](/docs/integrations/toolkits/slack).
|
||||
|
||||
```python
|
||||
from langchain_community.agent_toolkits import SlackToolkit
|
||||
```
|
||||
|
||||
## Chat loader
|
||||
|
||||
See a [usage example](/docs/integrations/chat_loaders/slack).
|
||||
|
||||
@@ -7,8 +7,8 @@ This page covers how to use the `Snowflake` ecosystem within `LangChain`.
|
||||
|
||||
## Embedding models
|
||||
|
||||
Snowflake offers their open weight `arctic` line of embedding models for free
|
||||
on [Hugging Face](https://huggingface.co/Snowflake/snowflake-arctic-embed-l).
|
||||
Snowflake offers their open-weight `arctic` line of embedding models for free
|
||||
on [Hugging Face](https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5). The most recent model, snowflake-arctic-embed-m-v1.5 feature [matryoshka embedding](https://arxiv.org/abs/2205.13147) which allows for effective vector truncation.
|
||||
You can use these models via the
|
||||
[HuggingFaceEmbeddings](/docs/integrations/text_embedding/huggingfacehub) connector:
|
||||
|
||||
@@ -19,7 +19,7 @@ pip install langchain-community sentence-transformers
|
||||
```python
|
||||
from langchain_huggingface import HuggingFaceEmbeddings
|
||||
|
||||
model = HuggingFaceEmbeddings(model_name="snowflake/arctic-embed-l")
|
||||
model = HuggingFaceEmbeddings(model_name="snowflake/arctic-embed-m-v1.5")
|
||||
```
|
||||
|
||||
## Document loader
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# TiDB
|
||||
|
||||
> [TiDB Cloud](https://tidbcloud.com/), is a comprehensive Database-as-a-Service (DBaaS) solution,
|
||||
> [TiDB Cloud](https://pingcap.com/ai), is a comprehensive Database-as-a-Service (DBaaS) solution,
|
||||
> that provides dedicated and serverless options. `TiDB Serverless` is now integrating
|
||||
> a built-in vector search into the MySQL landscape. With this enhancement, you can seamlessly
|
||||
> develop AI applications using `TiDB Serverless` without the need for a new database or additional
|
||||
> technical stacks. Be among the first to experience it by joining the [waitlist for the private beta](https://tidb.cloud/ai).
|
||||
> technical stacks. Create a free TiDB Serverless cluster and start using the vector search feature today.
|
||||
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
@@ -8,11 +8,21 @@ ecosystem within LangChain.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
If you are using a loader that runs locally, use the following steps to get `unstructured` and
|
||||
its dependencies running locally.
|
||||
If you are using a loader that runs locally, use the following steps to get `unstructured` and its
|
||||
dependencies running.
|
||||
|
||||
- Install the Python SDK with `pip install unstructured`.
|
||||
- You can install document specific dependencies with extras, i.e. `pip install "unstructured[docx]"`.
|
||||
- For the smallest installation footprint and to take advantage of features not available in the
|
||||
open-source `unstructured` package, install the Python SDK with `pip install unstructured-client`
|
||||
along with `pip install langchain-unstructured` to use the `UnstructuredLoader` and partition
|
||||
remotely against the Unstructured API. This loader lives
|
||||
in a LangChain partner repo instead of the `langchain-community` repo and you will need an
|
||||
`api_key`, which you can generate a free key [here](https://unstructured.io/api-key/).
|
||||
- Unstructured's documentation for the sdk can be found here:
|
||||
https://docs.unstructured.io/api-reference/api-services/sdk
|
||||
|
||||
- To run everything locally, install the open-source python package with `pip install unstructured`
|
||||
along with `pip install langchain-community` and use the same `UnstructuredLoader` as mentioned above.
|
||||
- You can install document specific dependencies with extras, e.g. `pip install "unstructured[docx]"`.
|
||||
- To install the dependencies for all document types, use `pip install "unstructured[all-docs]"`.
|
||||
- Install the following system dependencies if they are not already available on your system with e.g. `brew install` for Mac.
|
||||
Depending on what document types you're parsing, you may not need all of these.
|
||||
@@ -22,16 +32,11 @@ its dependencies running locally.
|
||||
- `qpdf` (PDFs)
|
||||
- `libreoffice` (MS Office docs)
|
||||
- `pandoc` (EPUBs)
|
||||
- When running locally, Unstructured also recommends using Docker [by following this
|
||||
guide](https://docs.unstructured.io/open-source/installation/docker-installation) to ensure all
|
||||
system dependencies are installed correctly.
|
||||
|
||||
When running locally, Unstructured also recommends using Docker [by following this guide](https://docs.unstructured.io/open-source/installation/docker-installation)
|
||||
to ensure all system dependencies are installed correctly.
|
||||
|
||||
If you want to get up and running with less set up, you can
|
||||
simply run `pip install unstructured` and use `UnstructuredAPIFileLoader` or
|
||||
`UnstructuredAPIFileIOLoader`. That will process your document using the hosted Unstructured API.
|
||||
|
||||
|
||||
The `Unstructured API` requires API keys to make requests.
|
||||
The Unstructured API requires API keys to make requests.
|
||||
You can request an API key [here](https://unstructured.io/api-key-hosted) and start using it today!
|
||||
Checkout the README [here](https://github.com/Unstructured-IO/unstructured-api) here to get started making API calls.
|
||||
We'd love to hear your feedback, let us know how it goes in our [community slack](https://join.slack.com/t/unstructuredw-kbe4326/shared_invite/zt-1x7cgo0pg-PTptXWylzPQF9xZolzCnwQ).
|
||||
@@ -42,30 +47,21 @@ Check out the instructions
|
||||
|
||||
## Data Loaders
|
||||
|
||||
The primary usage of the `Unstructured` is in data loaders.
|
||||
The primary usage of `Unstructured` is in data loaders.
|
||||
|
||||
### UnstructuredAPIFileIOLoader
|
||||
### UnstructuredLoader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/unstructured_file#unstructured-api).
|
||||
See a [usage example](/docs/integrations/document_loaders/unstructured_file) to see how you can use
|
||||
this loader for both partitioning locally and remotely with the serverless Unstructured API.
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import UnstructuredAPIFileIOLoader
|
||||
```
|
||||
|
||||
### UnstructuredAPIFileLoader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/unstructured_file#unstructured-api).
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import UnstructuredAPIFileLoader
|
||||
from langchain_unstructured import UnstructuredLoader
|
||||
```
|
||||
|
||||
### UnstructuredCHMLoader
|
||||
|
||||
`CHM` means `Microsoft Compiled HTML Help`.
|
||||
|
||||
See a usage example in the API documentation.
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import UnstructuredCHMLoader
|
||||
```
|
||||
@@ -119,15 +115,6 @@ See a [usage example](/docs/integrations/document_loaders/google_drive#passing-i
|
||||
from langchain_community.document_loaders import UnstructuredFileIOLoader
|
||||
```
|
||||
|
||||
### UnstructuredFileLoader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/unstructured_file).
|
||||
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import UnstructuredFileLoader
|
||||
```
|
||||
|
||||
### UnstructuredHTMLLoader
|
||||
|
||||
See a [usage example](/docs/how_to/document_loader_html).
|
||||
|
||||
135
docs/docs/integrations/retrievers/nanopq.ipynb
Normal file
135
docs/docs/integrations/retrievers/nanopq.ipynb
Normal file
@@ -0,0 +1,135 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "661d5123-8ed2-4504-a846-7df0984e79f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# NanoPQ (Product Quantization)\n",
|
||||
"\n",
|
||||
">[Product Quantization algorithm (k-NN)](https://towardsdatascience.com/similarity-search-product-quantization-b2a1a6397701) in brief is a quantization algorithm that helps in compression of database vectors which helps in semantic search when large datasets are involved. In a nutshell, the embedding is split into M subspaces which further goes through clustering. Upon clustering the vectors the centroid vector gets mapped to the vectors present in the each of the clusters of the subspace. \n",
|
||||
"\n",
|
||||
"This notebook goes over how to use a retriever that under the hood uses a Product Quantization which has been implemented by the [nanopq](https://github.com/matsui528/nanopq) package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "68794637-c13b-4145-944f-3b0c2f1258f9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community langchain-openai nanopq"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "39ecbf50-4623-4ee6-9c8e-fea5da21767e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings\n",
|
||||
"from langchain_community.retrievers import NanoPQRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c1ce742a-5085-408a-a2c2-4bae0f605880",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create New Retriever with Texts"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "6c80020e-bc9e-49e8-8f93-5f75fd823738",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = NanoPQRetriever.from_texts(\n",
|
||||
" [\"Great world\", \"great words\", \"world\", \"planets of the world\"],\n",
|
||||
" SpacyEmbeddings(model_name=\"en_core_web_sm\"),\n",
|
||||
" clusters=2,\n",
|
||||
" subspace=2,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "743c26c1-0072-4e46-b41b-c28b3f1737c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use Retriever\n",
|
||||
"\n",
|
||||
"We can now use the retriever!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "f496de2d-9b8f-4f8b-a30f-279ef199259a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"M: 2, Ks: 2, metric : <class 'numpy.uint8'>, code_dtype: l2\n",
|
||||
"iter: 20, seed: 123\n",
|
||||
"Training the subspace: 0 / 2\n",
|
||||
"Training the subspace: 1 / 2\n",
|
||||
"Encoding the subspace: 0 / 2\n",
|
||||
"Encoding the subspace: 1 / 2\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='world'),\n",
|
||||
" Document(page_content='Great world'),\n",
|
||||
" Document(page_content='great words'),\n",
|
||||
" Document(page_content='planets of the world')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.invoke(\"earth\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "617202a7-e3a6-49a8-b807-4b4d771159d5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -156,6 +156,29 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For certain requirements, there is an option to pass the IBM's [`APIClient`](https://ibm.github.io/watsonx-ai-python-sdk/base.html#apiclient) object into the `WatsonxEmbeddings` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from ibm_watsonx_ai import APIClient\n",
|
||||
"\n",
|
||||
"api_client = APIClient(...)\n",
|
||||
"\n",
|
||||
"watsonx_llm = WatsonxEmbeddings(\n",
|
||||
" model_id=\"ibm/slate-125m-english-rtrvr\",\n",
|
||||
" watsonx_client=api_client,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
155
docs/docs/integrations/text_embedding/pinecone.ipynb
Normal file
155
docs/docs/integrations/text_embedding/pinecone.ipynb
Normal file
@@ -0,0 +1,155 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Pinecone Embeddings\n",
|
||||
"\n",
|
||||
"Pinecone's inference API can be accessed via `PineconeEmbeddings`. Providing text embeddings via the Pinecone service. We start by installing prerequisite libraries:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "f4b5d823fee826c2"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install -qU \"langchain-pinecone>=0.2.0\" "
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "3bc5d3a5ed7f5ce3",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Next, we [sign up / log in to Pinecone](https://app.pinecone.io) to get our API key:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "62a77d25c3fd8bd5"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"PINECONE_API_KEY\"] = os.getenv(\"PINECONE_API_KEY\") or getpass(\n",
|
||||
" \"Enter your Pinecone API key: \"\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "8162dbcbcf7d3d55",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Check the document for available [models](https://docs.pinecone.io/models/overview). Now we initialize our embedding model like so:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "98d860a0a2d8b907"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_pinecone import PineconeEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = PineconeEmbeddings(model=\"multilingual-e5-large\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "2b3adb72786a5275",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"From here we can create embeddings either sync or async, let's start with sync! We embed a single text as a query embedding (ie what we search with in RAG) using `embed_query`:"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "11e24da855517230"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = [\n",
|
||||
" \"Apple is a popular fruit known for its sweetness and crisp texture.\",\n",
|
||||
" \"The tech company Apple is known for its innovative products like the iPhone.\",\n",
|
||||
" \"Many people enjoy eating apples as a healthy snack.\",\n",
|
||||
" \"Apple Inc. has revolutionized the tech industry with its sleek designs and user-friendly interfaces.\",\n",
|
||||
" \"An apple a day keeps the doctor away, as the saying goes.\",\n",
|
||||
"]"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "2da515e2a61ef7e9",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_embeds = embeddings.embed_documents(docs)\n",
|
||||
"doc_embeds"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "2897e0d570c90b2f",
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"Tell me about the tech company known as Apple\"\n",
|
||||
"query_embed = embeddings.embed_query(query)\n",
|
||||
"query_embed"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "510784963c0e17a",
|
||||
"execution_count": null
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -8,71 +8,61 @@
|
||||
"# Sentence Transformers on Hugging Face\n",
|
||||
"\n",
|
||||
">[Hugging Face sentence-transformers](https://huggingface.co/sentence-transformers) is a Python framework for state-of-the-art sentence, text and image embeddings.\n",
|
||||
">One of the embedding models is used in the `HuggingFaceEmbeddings` class.\n",
|
||||
">We have also added an alias for `SentenceTransformerEmbeddings` for users who are more familiar with directly using that package.\n",
|
||||
">You can use these embedding models from the `HuggingFaceEmbeddings` class.\n",
|
||||
"\n",
|
||||
"`sentence_transformers` package models are originating from [Sentence-BERT](https://arxiv.org/abs/1908.10084)"
|
||||
":::caution\n",
|
||||
"\n",
|
||||
"Running sentence-transformers locally can be affected by your operating system and other global factors. It is recommended for experienced users only.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"You'll need to install the `langchain_huggingface` package as a dependency:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "06c9f47d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-huggingface"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8fb16f74",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ff9be586",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.1.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
|
||||
"[-0.038338568061590195, 0.12346471101045609, -0.028642969205975533, 0.05365273356437683, 0.008845377...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet sentence_transformers > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "861521a9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_huggingface import HuggingFaceEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ff9be586",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_huggingface import HuggingFaceEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
|
||||
"# Equivalent to SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d0a98ae9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5d6c682b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
"\n",
|
||||
"text = \"This is a test document.\"\n",
|
||||
"query_result = embeddings.embed_query(text)\n",
|
||||
"\n",
|
||||
"# show only the first 100 characters of the stringified vector\n",
|
||||
"print(str(query_result)[:100] + \"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -80,18 +70,39 @@
|
||||
"execution_count": 6,
|
||||
"id": "bb5e74c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[-0.038338497281074524, 0.12346471846103668, -0.028642890974879265, 0.05365274101495743, 0.00884535...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"doc_result = embeddings.embed_documents([text, \"This is not a test document.\"])"
|
||||
"doc_result = embeddings.embed_documents([text, \"This is not a test document.\"])\n",
|
||||
"print(str(doc_result)[:100] + \"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e6525cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Troubleshooting\n",
|
||||
"\n",
|
||||
"If you are having issues with the `accelerate` package not being found or failing to import, installing/upgrading it may help:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aaad49f8",
|
||||
"id": "bbae70f7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"%pip install -qU accelerate"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -110,7 +121,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.5"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -6,23 +6,28 @@
|
||||
"source": [
|
||||
"# Cassandra Database\n",
|
||||
"\n",
|
||||
"Apache Cassandra® is a widely used database for storing transactional application data. The introduction of functions and tooling in Large Language Models has opened up some exciting use cases for existing data in Generative AI applications. The Cassandra Database toolkit enables AI engineers to efficiently integrate Agents with Cassandra data, offering the following features: \n",
|
||||
" - Fast data access through optimized queries. Most queries should run in single-digit ms or less. \n",
|
||||
" - Schema introspection to enhance LLM reasoning capabilities \n",
|
||||
" - Compatibility with various Cassandra deployments, including Apache Cassandra®, DataStax Enterprise™, and DataStax Astra™ \n",
|
||||
" - Currently, the toolkit is limited to SELECT queries and schema introspection operations. (Safety first)\n",
|
||||
">`Apache Cassandra®` is a widely used database for storing transactional application data. The introduction of functions and >tooling in Large Language Models has opened up some exciting use cases for existing data in Generative AI applications. \n",
|
||||
"\n",
|
||||
">The `Cassandra Database` toolkit enables AI engineers to integrate agents with Cassandra data efficiently, offering \n",
|
||||
">the following features: \n",
|
||||
"> - Fast data access through optimized queries. Most queries should run in single-digit ms or less.\n",
|
||||
"> - Schema introspection to enhance LLM reasoning capabilities\n",
|
||||
"> - Compatibility with various Cassandra deployments, including Apache Cassandra®, DataStax Enterprise™, and DataStax Astra™\n",
|
||||
"> - Currently, the toolkit is limited to SELECT queries and schema introspection operations. (Safety first)\n",
|
||||
"\n",
|
||||
"For more information on creating a Cassandra DB agent see the [CQL agent cookbook](https://github.com/langchain-ai/langchain/blob/master/cookbook/cql_agent.ipynb)\n",
|
||||
"\n",
|
||||
"## Quick Start\n",
|
||||
" - Install the cassio library\n",
|
||||
" - Install the `cassio` library\n",
|
||||
" - Set environment variables for the Cassandra database you are connecting to\n",
|
||||
" - Initialize CassandraDatabase\n",
|
||||
" - Pass the tools to your agent with toolkit.get_tools()\n",
|
||||
" - Initialize `CassandraDatabase`\n",
|
||||
" - Pass the tools to your agent with `toolkit.get_tools()`\n",
|
||||
" - Sit back and watch it do all your work for you\n",
|
||||
"\n",
|
||||
"## Theory of Operation\n",
|
||||
"Cassandra Query Language (CQL) is the primary *human-centric* way of interacting with a Cassandra database. While offering some flexibility when generating queries, it requires knowledge of Cassandra data modeling best practices. LLM function calling gives an agent the ability to reason and then choose a tool to satisfy the request. Agents using LLMs should reason using Cassandra-specific logic when choosing the appropriate toolkit or chain of toolkits. This reduces the randomness introduced when LLMs are forced to provide a top-down solution. Do you want an LLM to have complete unfettered access to your database? Yeah. Probably not. To accomplish this, we provide a prompt for use when constructing questions for the agent: \n",
|
||||
"\n",
|
||||
"```json\n",
|
||||
"`Cassandra Query Language (CQL)` is the primary *human-centric* way of interacting with a Cassandra database. While offering some flexibility when generating queries, it requires knowledge of Cassandra data modeling best practices. LLM function calling gives an agent the ability to reason and then choose a tool to satisfy the request. Agents using LLMs should reason using Cassandra-specific logic when choosing the appropriate toolkit or chain of toolkits. This reduces the randomness introduced when LLMs are forced to provide a top-down solution. Do you want an LLM to have complete unfettered access to your database? Yeah. Probably not. To accomplish this, we provide a prompt for use when constructing questions for the agent: \n",
|
||||
"\n",
|
||||
"You are an Apache Cassandra expert query analysis bot with the following features \n",
|
||||
"and rules:\n",
|
||||
" - You will take a question from the end user about finding specific \n",
|
||||
@@ -38,6 +43,7 @@
|
||||
"\n",
|
||||
"The following is an example of a query path in JSON format:\n",
|
||||
"\n",
|
||||
"```json\n",
|
||||
" {\n",
|
||||
" \"query_paths\": [\n",
|
||||
" {\n",
|
||||
@@ -448,13 +454,6 @@
|
||||
"\n",
|
||||
"print(response[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For a deepdive on creating a Cassandra DB agent see the [CQL agent cookbook](https://github.com/langchain-ai/langchain/blob/master/cookbook/cql_agent.ipynb)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -473,7 +472,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -7,58 +7,44 @@
|
||||
"source": [
|
||||
"# DuckDuckGo Search\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use the duck-duck-go search component."
|
||||
"This guide shows over how to use the DuckDuckGo search component.\n",
|
||||
"\n",
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": null,
|
||||
"id": "21e46d4d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet duckduckgo-search langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ac4910f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import DuckDuckGoSearchRun"
|
||||
"%pip install -qU duckduckgo-search langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "84b8f773",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = DuckDuckGoSearchRun()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "068991a6",
|
||||
"id": "ac4910f8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Life After the Presidency How Tall is Obama? Books and Grammy Hobbies Movies About Obama Quotes 1961-present Who Is Barack Obama? Barack Obama was the 44 th president of the United States... facts you never knew about Barack Obama is that his immediate family spread out across three continents. Barack, who led America from 2009 to 2017, comes from a large family of seven living half-siblings. His father, Barack Obama Sr., met his mother, Ann Dunham, in 1960 and married her a year after. With a tear running from his eye, President Barack Obama recalls the 20 first-graders killed in 2012 at Sandy Hook Elementary School, while speaking in the East Room of the White House in ... Former first Lady Rosalynn Carter was laid to rest at her family's home in Plains, Ga. on Nov. 29 following three days of memorials across her home state. She passed away on Nov. 19, aged 96 ... Here are 28 of President Obama's biggest accomplishments as President of the United States. 1 - Rescued the country from the Great Recession, cutting the unemployment rate from 10% to 4.7% over ...\""
|
||||
"'When Ann Dunham and Barack Obama Sr. tied the knot, they kept the news to themselves. \"Nobody was invited,\" Neil Abercrombie, a college friend of Obama Sr., told Time in 2008. The wedding came as ... As the head of the government of the United States, the president is arguably the most powerful government official in the world. The president is elected to a four-year term via an electoral college system. Since the Twenty-second Amendment was adopted in 1951, the American presidency has been Most common names of U.S. presidents 1789-2021. Published by. Aaron O\\'Neill , Jul 4, 2024. The most common first name for a U.S. president is James, followed by John and then William. Six U.S ... Obama\\'s personal charisma, stirring oratory, and his campaign promise to bring change to the established political system resonated with many Democrats, especially young and minority voters. On January 3, 2008, Obama won a surprise victory in the first major nominating contest, the Iowa caucus, over Sen. Hillary Clinton, who was the overwhelming favorite to win the nomination. Former President Barack Obama released a letter about President Biden\\'s decision to drop out of the 2024 presidential race. Notably, Obama did not name or endorse Vice President Kamala Harris.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search.run(\"Obama's first name?\")"
|
||||
"from langchain_community.tools import DuckDuckGoSearchRun\n",
|
||||
"\n",
|
||||
"search = DuckDuckGoSearchRun()\n",
|
||||
"\n",
|
||||
"search.invoke(\"Obama's first name?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -71,43 +57,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "95635444",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.tools import DuckDuckGoSearchResults"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0133d103",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = DuckDuckGoSearchResults()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "439efc06",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'[snippet: 1:12. Former President Barack Obama, in a CNN interview that aired Thursday night, said he does not believe President Joe Biden will face a serious primary challenge during his 2024 reelection ..., title: Five takeaways from Barack Obama\\'s CNN interview on Biden ... - Yahoo, link: https://www.usatoday.com/story/news/politics/2023/06/23/five-takeaways-from-barack-obama-cnn-interview/70349112007/], [snippet: Democratic institutions in the United States and around the world have grown \"creaky,\" former President Barack Obama warned in an exclusive CNN interview Thursday, and it remains incumbent on ..., title: Obama warns democratic institutions are \\'creaky\\' but Trump ... - CNN, link: https://www.cnn.com/2023/06/22/politics/barack-obama-interview-cnntv/index.html], [snippet: Barack Obama was the 44 th president of the United States and the first Black commander-in-chief. He served two terms, from 2009 until 2017. The son of parents from Kenya and Kansas, Obama was ..., title: Barack Obama: Biography, 44th U.S. President, Politician, link: https://www.biography.com/political-figures/barack-obama], [snippet: Aug. 2, 2023, 5:00 PM PDT. By Mike Memoli and Kristen Welker. WASHINGTON — During a trip to the White House in June, former President Barack Obama made it clear to his former running mate that ..., title: Obama privately told Biden he would do whatever it takes to help in 2024, link: https://www.nbcnews.com/politics/white-house/obama-privately-told-biden-whatever-takes-help-2024-rcna97865], [snippet: Natalie Bookey-Baker, a vice president at the Obama Foundation who worked for then-first lady Michelle Obama in the White House, said about 2,500 alumni are expected. They are veterans of Obama ..., title: Barack Obama team reunion this week in Chicago; 2,500 alumni expected ..., link: https://chicago.suntimes.com/politics/2023/10/29/23937504/barack-obama-michelle-obama-david-axelrod-pod-save-america-jon-batiste-jen-psaki-reunion-obamaworld]'"
|
||||
"\"[snippet: Why Obama Hasn't Endorsed Harris. The former president has positioned himself as an impartial elder statesman above intraparty machinations and was neutral during the 2020 Democratic primaries., title: Why Obama Hasn't Endorsed Harris - The New York Times, link: https://www.nytimes.com/2024/07/21/us/politics/why-obama-hasnt-endorsed-harris.html], [snippet: Former President Barack Obama released a letter about President Biden's decision to drop out of the 2024 presidential race. Notably, Obama did not name or endorse Vice President Kamala Harris., title: Read Obama's full statement on Biden dropping out - CBS News, link: https://www.cbsnews.com/news/barack-obama-biden-dropping-out-2024-presidential-race-full-statement/], [snippet: USA TODAY. 0:04. 0:48. Former President Barack Obama recently said, in private, that President Joe Biden's chances at a successful presidential run in 2024 have declined and that he needs to ..., title: Did Obama tell Biden to quit? What the former president said - USA TODAY, link: https://www.usatoday.com/story/news/politics/elections/2024/07/18/did-obama-tell-biden-to-quit-what-the-former-president-said/74458139007/], [snippet: Many of the marquee names in Democratic politics began quickly lining up behind Vice President Kamala Harris on Sunday, but one towering presence in the party held back: Barack Obama. The former president has not yet endorsed Harris; in fact, he did not mention her once in an affectionate — if tautly written — tribute to President Joe Biden that was posted on Medium shortly after Biden ..., title: Why Obama Hasn't Endorsed Harris - Yahoo News, link: https://news.yahoo.com/news/why-obama-hasn-t-endorsed-114259092.html]\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search.run(\"Obama\")"
|
||||
"from langchain_community.tools import DuckDuckGoSearchResults\n",
|
||||
"\n",
|
||||
"search = DuckDuckGoSearchResults()\n",
|
||||
"\n",
|
||||
"search.invoke(\"Obama\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -115,38 +85,30 @@
|
||||
"id": "e17ccfe7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also just search for news articles. Use the keyword ``backend=\"news\"``"
|
||||
"You can also just search for news articles. Use the keyword `backend=\"news\"`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 4,
|
||||
"id": "21afe28d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = DuckDuckGoSearchResults(backend=\"news\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "2a4beeb9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'[snippet: 1:12. Former President Barack Obama, in a CNN interview that aired Thursday night, said he does not believe President Joe Biden will face a serious primary challenge during his 2024 reelection ..., title: Five takeaways from Barack Obama\\'s CNN interview on Biden ... - Yahoo, link: https://www.usatoday.com/story/news/politics/2023/06/23/five-takeaways-from-barack-obama-cnn-interview/70349112007/], [snippet: Democratic institutions in the United States and around the world have grown \"creaky,\" former President Barack Obama warned in an exclusive CNN interview Thursday, and it remains incumbent on ..., title: Obama warns democratic institutions are \\'creaky\\' but Trump ... - CNN, link: https://www.cnn.com/2023/06/22/politics/barack-obama-interview-cnntv/index.html], [snippet: Barack Obama was the 44 th president of the United States and the first Black commander-in-chief. He served two terms, from 2009 until 2017. The son of parents from Kenya and Kansas, Obama was ..., title: Barack Obama: Biography, 44th U.S. President, Politician, link: https://www.biography.com/political-figures/barack-obama], [snippet: Natalie Bookey-Baker, a vice president at the Obama Foundation who worked for then-first lady Michelle Obama in the White House, said about 2,500 alumni are expected. They are veterans of Obama ..., title: Barack Obama team reunion this week in Chicago; 2,500 alumni expected ..., link: https://chicago.suntimes.com/politics/2023/10/29/23937504/barack-obama-michelle-obama-david-axelrod-pod-save-america-jon-batiste-jen-psaki-reunion-obamaworld], [snippet: Aug. 2, 2023, 5:00 PM PDT. By Mike Memoli and Kristen Welker. WASHINGTON — During a trip to the White House in June, former President Barack Obama made it clear to his former running mate that ..., title: Obama privately told Biden he would do whatever it takes to help in 2024, link: https://www.nbcnews.com/politics/white-house/obama-privately-told-biden-whatever-takes-help-2024-rcna97865]'"
|
||||
"'[snippet: Users on X have been widely comparing the boost of support felt for Kamala Harris\\' campaign to Barack Obama\\'s in 2008., title: Surging Support For Kamala Harris Compared To Obama-Era Energy, link: https://www.msn.com/en-us/news/politics/surging-support-for-kamala-harris-compared-to-obama-era-energy/ar-BB1qzdC0, date: 2024-07-24T18:27:01+00:00, source: Newsweek on MSN.com], [snippet: Harris tried to emulate Obama\\'s coalition in 2020 and failed. She may have a better shot at reaching young, Black, and Latino voters this time around., title: Harris May Follow Obama\\'s Path to the White House After All, link: https://www.msn.com/en-us/news/politics/harris-may-follow-obama-s-path-to-the-white-house-after-all/ar-BB1qv9d4, date: 2024-07-23T22:42:00+00:00, source: Intelligencer on MSN.com], [snippet: The Republican presidential candidate said in an interview on Fox News that he \"wouldn\\'t be worried\" about Michelle Obama running., title: Donald Trump Responds to Michelle Obama Threat, link: https://www.msn.com/en-us/news/politics/donald-trump-responds-to-michelle-obama-threat/ar-BB1qqtu5, date: 2024-07-22T18:26:00+00:00, source: Newsweek on MSN.com], [snippet: H eading into the weekend at his vacation home in Rehoboth Beach, Del., President Biden was reportedly stewing over Barack Obama\\'s role in the orchestrated campaign to force him, title: Opinion | Barack Obama Strikes Again, link: https://www.msn.com/en-us/news/politics/opinion-barack-obama-strikes-again/ar-BB1qrfiy, date: 2024-07-22T21:28:00+00:00, source: The Wall Street Journal on MSN.com]'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search.run(\"Obama\")"
|
||||
"search = DuckDuckGoSearchResults(backend=\"news\")\n",
|
||||
"\n",
|
||||
"search.invoke(\"Obama\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -154,59 +116,45 @@
|
||||
"id": "5f7c0129",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also directly pass a custom ``DuckDuckGoSearchAPIWrapper`` to ``DuckDuckGoSearchResults``. Therefore, you have much more control over the search results."
|
||||
"You can also directly pass a custom `DuckDuckGoSearchAPIWrapper` to `DuckDuckGoSearchResults` to provide more control over the search results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 5,
|
||||
"id": "c7ab3b55",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"wrapper = DuckDuckGoSearchAPIWrapper(region=\"de-de\", time=\"d\", max_results=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "adce16e1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = DuckDuckGoSearchResults(api_wrapper=wrapper, source=\"news\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "b7e77c54",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'[snippet: When Obama left office in January 2017, a CNN poll showed him with a 60% approval rating, landing him near the top of the list of presidential approval ratings upon leaving office., title: Opinion: The real reason Trump is attacking Obamacare | CNN, link: https://www.cnn.com/2023/12/04/opinions/trump-obamacare-obama-repeal-health-care-obeidallah/index.html], [snippet: Buchempfehlung von Barack Obama. Der gut zweistündige Netflix-Film basiert auf dem gleichnamigen Roman \"Leave the World Behind\" des hochgelobten US-Autors Rumaan Alam. 2020 landete er damit unter den Finalisten des \"National Book Awards\". In Deutschland ist das Buch, das auch Barack Obama auf seiner einflussreichen Lese-Empfehlungsliste hatte ..., title: Neu bei Netflix \"Leave The World Behind\": Kritik zum ... - Prisma, link: https://www.prisma.de/news/filme/Neu-bei-Netflix-Leave-The-World-Behind-Kritik-zum-ungewoehnlichen-Endzeit-Film-mit-Julia-Roberts,46563944]'"
|
||||
"'[snippet: So war das zum Beispiel bei Barack Obama, als er sich für Joe Biden als Kandidat für die Vizepräsidentschaft entschied. USA-Expertin Rachel Tausendfreund erklärt, wie sich Kamala Harris als ..., title: Interview: „Kamala Harris sieht die Welt eher wie Barack Obama - nicht ..., link: https://www.handelsblatt.com/politik/interview-kamala-harris-sieht-die-welt-eher-wie-barack-obama-nicht-wie-joe-biden/100054923.html], [snippet: Disput um Klimapolitik Der seltsame Moment, als Kamala Harris sogar Obama vor Gericht zerrte. Teilen. 0. Chip Somodevilla/Getty Images US-Vizepräsidentin Kamala Harris (links) und Ex-Präsident ..., title: Der seltsame Moment, als Kamala Harris sogar Obama vor Gericht zerrte, link: https://www.focus.de/earth/analyse/disput-um-klimapolitik-der-seltsame-moment-als-kamala-harris-sogar-obama-vor-gericht-zerrte_id_260165157.html], [snippet: Kamala Harris «Auf den Spuren von Obama»: Harris\\' erste Rede überzeugt Experten. Kamala Harris hat ihre erste Rede als Präsidentschaftskandidatin gehalten. Zwei Experten sind sich einig: Sie ..., title: Kamala Harris\\' erste Wahlkampfrede überzeugt Experten, link: https://www.20min.ch/story/kamala-harris-auf-den-spuren-von-obama-harris-erste-rede-ueberzeugt-experten-103154550], [snippet: Harris hat ihre erste Rede als Präsidentschaftskandidatin gehalten. Experten sind sich einig: Sie hat das Potenzial, ein Feuer zu entfachen wie Obama., title: \"Auf Spuren von Obama\": Harris-Rede überzeugt Experten, link: https://www.heute.at/s/auf-spuren-von-obama-harris-rede-ueberzeugt-experten-120049557]'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search.run(\"Obama\")"
|
||||
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"wrapper = DuckDuckGoSearchAPIWrapper(region=\"de-de\", time=\"d\", max_results=2)\n",
|
||||
"\n",
|
||||
"search = DuckDuckGoSearchResults(api_wrapper=wrapper, source=\"news\")\n",
|
||||
"\n",
|
||||
"search.invoke(\"Obama\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b133e3c1",
|
||||
"cell_type": "markdown",
|
||||
"id": "f3df6b8b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"## Related\n",
|
||||
"\n",
|
||||
"- [How to use a chat model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -225,7 +173,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.5"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -45,21 +45,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"id": "377bc723",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': 'state_of_the_union.txt'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import ScaNN\n",
|
||||
@@ -95,15 +84,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": null,
|
||||
"id": "fc27ad51",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_community.chat_models import google_palm\n",
|
||||
"from langchain_community.chat_models.google_palm import ChatGooglePalm\n",
|
||||
"\n",
|
||||
"palm_client = google_palm.ChatGooglePalm(google_api_key=\"YOUR_GOOGLE_PALM_API_KEY\")\n",
|
||||
"palm_client = ChatGooglePalm(google_api_key=\"YOUR_GOOGLE_PALM_API_KEY\")\n",
|
||||
"\n",
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=palm_client,\n",
|
||||
|
||||
@@ -88,9 +88,10 @@ CHAT_MODEL_FEAT_TABLE = {
|
||||
"link": "/docs/integrations/chat/huggingface/",
|
||||
},
|
||||
"ChatOllama": {
|
||||
"tool_calling": True,
|
||||
"local": True,
|
||||
"json_mode": True,
|
||||
"package": "langchain-community",
|
||||
"package": "langchain-ollama",
|
||||
"link": "/docs/integrations/chat/ollama/",
|
||||
},
|
||||
"vLLM Chat (via ChatOpenAI)": {
|
||||
|
||||
181
docs/scripts/tool_feat_table.py
Normal file
181
docs/scripts/tool_feat_table.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
SEARCH_TOOL_FEAT_TABLE = {
|
||||
"Exa Search": {
|
||||
"pricing": "1000 free searches/month",
|
||||
"available_data": "URL, Author, Title, Published Date",
|
||||
"link": "/docs/integrations/tools/exa_search",
|
||||
},
|
||||
"Bing Search": {
|
||||
"pricing": "Paid",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/bing_search",
|
||||
},
|
||||
"DuckDuckgoSearch": {
|
||||
"pricing": "Free",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/ddg",
|
||||
},
|
||||
"Brave Search": {
|
||||
"pricing": "Free",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/brave_search",
|
||||
},
|
||||
"Google Search": {
|
||||
"pricing": "Paid",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/google_search",
|
||||
},
|
||||
"Google Serper": {
|
||||
"pricing": "Free",
|
||||
"available_data": "URL, Snippet, Title, Search Rank, Site Links",
|
||||
"link": "/docs/integrations/tools/google_serper",
|
||||
},
|
||||
"Mojeek Search": {
|
||||
"pricing": "Paid",
|
||||
"available_data": "URL, Snippet, Title",
|
||||
"link": "/docs/integrations/tools/mojeek_search",
|
||||
},
|
||||
"SearxNG Search": {
|
||||
"pricing": "Free",
|
||||
"available_data": "URL, Snippet, Title, Category",
|
||||
"link": "/docs/integrations/tools/searx_search",
|
||||
},
|
||||
"You.com Search": {
|
||||
"pricing": "Free for 60 days",
|
||||
"available_data": "URL, Title, Page Content",
|
||||
"link": "/docs/integrations/tools/you",
|
||||
},
|
||||
"SearchApi": {
|
||||
"pricing": "100 Free Searches on Sign Up",
|
||||
"available_data": "URL, Snippet, Title, Search Rank, Site Links, Authors",
|
||||
"link": "/docs/integrations/tools/searchapi",
|
||||
},
|
||||
"SerpAPI": {
|
||||
"pricing": "100 Free Searches/Month",
|
||||
"available_data": "Answer",
|
||||
"link": "/docs/integrations/tools/serpapi",
|
||||
},
|
||||
}
|
||||
|
||||
CODE_INTERPRETER_TOOL_FEAT_TABLE = {
|
||||
"Bearly Code Interpreter": {
|
||||
"langauges": "Python",
|
||||
"sandbox_lifetime": "Resets on Execution",
|
||||
"upload": True,
|
||||
"return_results": "Text",
|
||||
"link": "/docs/integrations/tools/bearly",
|
||||
},
|
||||
"Riza Code Interpreter": {
|
||||
"langauges": "Python, JavaScript, PHP, Ruby",
|
||||
"sandbox_lifetime": "Resets on Execution",
|
||||
"upload": False,
|
||||
"return_results": "Text",
|
||||
"link": "/docs/integrations/tools/riza",
|
||||
},
|
||||
"E2B Data Analysis": {
|
||||
"langauges": "Python. In beta: JavaScript, R, Java",
|
||||
"sandbox_lifetime": "24 Hours",
|
||||
"upload": True,
|
||||
"return_results": "Text, Images, Videos",
|
||||
"link": "/docs/integrations/tools/e2b_data_analysis",
|
||||
},
|
||||
"Azure Container Apps dynamic sessions": {
|
||||
"langauges": "Python",
|
||||
"sandbox_lifetime": "1 Hour",
|
||||
"upload": True,
|
||||
"return_results": "Text, Images",
|
||||
"link": "/docs/integrations/tools/azure_dynamic_sessions",
|
||||
},
|
||||
}
|
||||
|
||||
TOOLS_TEMPLATE = """\
|
||||
---
|
||||
sidebar_position: 0
|
||||
sidebar_class_name: hidden
|
||||
keywords: [compatibility]
|
||||
custom_edit_url:
|
||||
hide_table_of_contents: true
|
||||
---
|
||||
|
||||
# Tools
|
||||
|
||||
## Search Tools
|
||||
|
||||
The following table shows tools that execute online searches in some shape or form:
|
||||
|
||||
{search_table}
|
||||
|
||||
## Code Interpreter Tools
|
||||
|
||||
The following table shows tools that can be used as code interpreters:
|
||||
|
||||
{code_interpreter_table}
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def get_search_tools_table() -> str:
|
||||
"""Get the table of search tools."""
|
||||
header = ["tool", "pricing", "available_data"]
|
||||
title = ["Tool", "Free/Paid", "Return Data"]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for search_tool, feats in sorted(SEARCH_TOOL_FEAT_TABLE.items()):
|
||||
# Fields are in the order of the header
|
||||
row = [
|
||||
f"[{search_tool}]({feats['link']})",
|
||||
]
|
||||
for h in header[1:]:
|
||||
row.append(feats.get(h))
|
||||
rows.append(row)
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
def get_code_interpreter_table() -> str:
|
||||
"""Get the table of search tools."""
|
||||
header = [
|
||||
"tool",
|
||||
"langauges",
|
||||
"sandbox_lifetime",
|
||||
"upload",
|
||||
"return_results",
|
||||
]
|
||||
title = [
|
||||
"Tool",
|
||||
"Supported Languages",
|
||||
"Sandbox Lifetime",
|
||||
"Supports File Uploads",
|
||||
"Return Types",
|
||||
]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for search_tool, feats in sorted(CODE_INTERPRETER_TOOL_FEAT_TABLE.items()):
|
||||
# Fields are in the order of the header
|
||||
row = [
|
||||
f"[{search_tool}]({feats['link']})",
|
||||
]
|
||||
for h in header[1:]:
|
||||
value = feats.get(h)
|
||||
if h == "upload":
|
||||
if value is True:
|
||||
row.append("✅")
|
||||
else:
|
||||
row.append("❌")
|
||||
else:
|
||||
row.append(value)
|
||||
rows.append(row)
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
output_dir = Path(sys.argv[1])
|
||||
output_integrations_dir = output_dir / "integrations"
|
||||
output_integrations_dir_tools = output_integrations_dir / "tools"
|
||||
output_integrations_dir_tools.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
tools_page = TOOLS_TEMPLATE.format(
|
||||
search_table=get_search_tools_table(),
|
||||
code_interpreter_table=get_code_interpreter_table(),
|
||||
)
|
||||
with open(output_integrations_dir / "tools" / "index.mdx", "w") as f:
|
||||
f.write(tools_page)
|
||||
@@ -243,8 +243,8 @@ module.exports = {
|
||||
},
|
||||
],
|
||||
link: {
|
||||
type: "generated-index",
|
||||
slug: "integrations/tools",
|
||||
type: "doc",
|
||||
id: "integrations/tools/index",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -52,17 +52,17 @@ export default function ChatModelTabs(props) {
|
||||
customVarName,
|
||||
} = props;
|
||||
|
||||
const openAIParamsOrDefault = openaiParams ?? `model="gpt-3.5-turbo-0125"`;
|
||||
const openAIParamsOrDefault = openaiParams ?? `model="gpt-4o-mini"`;
|
||||
const anthropicParamsOrDefault =
|
||||
anthropicParams ?? `model="claude-3-sonnet-20240229"`;
|
||||
const cohereParamsOrDefault = cohereParams ?? `model="command-r"`;
|
||||
anthropicParams ?? `model="claude-3-5-sonnet-20240620"`;
|
||||
const cohereParamsOrDefault = cohereParams ?? `model="command-r-plus"`;
|
||||
const fireworksParamsOrDefault =
|
||||
fireworksParams ??
|
||||
`model="accounts/fireworks/models/mixtral-8x7b-instruct"`;
|
||||
`model="accounts/fireworks/models/llama-v3p1-70b-instruct"`;
|
||||
const groqParamsOrDefault = groqParams ?? `model="llama3-8b-8192"`;
|
||||
const mistralParamsOrDefault =
|
||||
mistralParams ?? `model="mistral-large-latest"`;
|
||||
const googleParamsOrDefault = googleParams ?? `model="gemini-pro"`;
|
||||
const googleParamsOrDefault = googleParams ?? `model="gemini-1.5-flash"`;
|
||||
const togetherParamsOrDefault =
|
||||
togetherParams ??
|
||||
`\n base_url="https://api.together.xyz/v1",\n api_key=os.environ["TOGETHER_API_KEY"],\n model="mistralai/Mixtral-8x7B-Instruct-v0.1",\n`;
|
||||
|
||||
@@ -61,6 +61,10 @@
|
||||
{
|
||||
"source": "/cookbook(/?)",
|
||||
"destination": "/v0.1/docs/cookbook/"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/toolkits/document_comparison_toolkit(/?)",
|
||||
"destination": "/docs/tutorials/rag/"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -78,8 +78,7 @@ build-backend = "poetry.core.masonry.api"
|
||||
# section of the configuration file raise errors.
|
||||
#
|
||||
# https://github.com/tophat/syrupy
|
||||
# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite.
|
||||
addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
|
||||
addopts = "--strict-markers --strict-config --durations=5"
|
||||
# Registering custom markers.
|
||||
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
|
||||
markers = [
|
||||
|
||||
1520
libs/cli/poetry.lock
generated
1520
libs/cli/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain-cli"
|
||||
version = "0.0.25"
|
||||
version = "0.0.26"
|
||||
description = "CLI for interacting with LangChain"
|
||||
authors = ["Erick Friis <erick@langchain.dev>"]
|
||||
readme = "README.md"
|
||||
|
||||
@@ -89,3 +89,4 @@ upstash-ratelimit>=1.1.0,<2
|
||||
vdms==0.0.20
|
||||
xata>=1.0.0a7,<2
|
||||
xmltodict>=0.13.0,<0.14
|
||||
nanopq==0.2.1
|
||||
|
||||
@@ -100,12 +100,14 @@ MODEL_COST_PER_1K_TOKENS = {
|
||||
"gpt-3.5-turbo-0613-finetuned": 0.003,
|
||||
"gpt-3.5-turbo-1106-finetuned": 0.003,
|
||||
"gpt-3.5-turbo-0125-finetuned": 0.003,
|
||||
"gpt-4o-mini-2024-07-18-finetuned": 0.0003,
|
||||
# Fine Tuned output
|
||||
"babbage-002-finetuned-completion": 0.0016,
|
||||
"davinci-002-finetuned-completion": 0.012,
|
||||
"gpt-3.5-turbo-0613-finetuned-completion": 0.006,
|
||||
"gpt-3.5-turbo-1106-finetuned-completion": 0.006,
|
||||
"gpt-3.5-turbo-0125-finetuned-completion": 0.006,
|
||||
"gpt-4o-mini-2024-07-18-finetuned-completion": 0.0012,
|
||||
# Azure Fine Tuned input
|
||||
"babbage-002-azure-finetuned": 0.0004,
|
||||
"davinci-002-azure-finetuned": 0.002,
|
||||
|
||||
@@ -3,19 +3,29 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, List, NamedTuple, Optional, cast
|
||||
from typing import Any, Dict, List, Literal, NamedTuple, Optional, cast
|
||||
|
||||
import aiohttp
|
||||
from langchain.chains.api.openapi.requests_chain import APIRequesterChain
|
||||
from langchain.chains.api.openapi.response_chain import APIResponderChain
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain_core.callbacks import CallbackManagerForChainRun, Callbacks
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.language_models import BaseChatModel, BaseLanguageModel
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.runnables import Runnable, chain
|
||||
from langchain_core.tools import BaseTool
|
||||
from requests import Response
|
||||
|
||||
from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit
|
||||
from langchain_community.tools.openapi.utils.api_models import APIOperation
|
||||
from langchain_community.utilities.requests import Requests
|
||||
from langchain_community.utilities.requests import Requests, TextRequestsWrapper
|
||||
|
||||
|
||||
class _ParamMapping(NamedTuple):
|
||||
@@ -228,3 +238,84 @@ class OpenAPIEndpointChain(Chain, BaseModel):
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def _prepend_system_message(query: str, system_message: str) -> List[BaseMessage]:
|
||||
return [SystemMessage(system_message), HumanMessage(query)]
|
||||
|
||||
|
||||
def _invoke_llm(messages: List[BaseMessage], llm: Runnable) -> List[BaseMessage]:
|
||||
return messages + [llm.invoke(messages)]
|
||||
|
||||
|
||||
def _execute_tools(
|
||||
messages: List[BaseMessage], tool_name_to_tool: Dict[str, BaseTool]
|
||||
) -> List[BaseMessage]:
|
||||
"""Execute tool and return result as a string."""
|
||||
output_messages = []
|
||||
ai_message = next(
|
||||
message for message in messages[::-1] if isinstance(message, AIMessage)
|
||||
)
|
||||
for tool_call in ai_message.tool_calls:
|
||||
selected_tool = tool_name_to_tool[tool_call["name"]]
|
||||
tool_msg = selected_tool.invoke(tool_call)
|
||||
output_messages.append(tool_msg)
|
||||
return messages + output_messages
|
||||
|
||||
|
||||
def create_openapi_endpoint_chain(
|
||||
llm: BaseChatModel,
|
||||
api_spec: str,
|
||||
system_message: Optional[str] = None,
|
||||
allow_dangerous_requests: bool = False,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
aiosession: Optional[aiohttp.ClientSession] = None,
|
||||
auth: Optional[Any] = None,
|
||||
response_content_type: Literal["text", "json"] = "text",
|
||||
verify: bool = True,
|
||||
supported_tools: Optional[List[str]] = None,
|
||||
) -> Runnable:
|
||||
requests_wrapper = TextRequestsWrapper(
|
||||
headers=headers,
|
||||
aiosession=aiosession,
|
||||
auth=auth,
|
||||
response_content_type=response_content_type,
|
||||
verify=verify,
|
||||
)
|
||||
toolkit = RequestsToolkit(
|
||||
requests_wrapper=requests_wrapper,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
)
|
||||
if supported_tools is None:
|
||||
supported_tools = [
|
||||
"requests_get",
|
||||
"requests_post",
|
||||
"requests_patch",
|
||||
"requests_put",
|
||||
"requests_delete",
|
||||
]
|
||||
tools = [tool for tool in toolkit.get_tools() if tool.name in supported_tools]
|
||||
llm_with_tools = llm.bind_tools(tools)
|
||||
tool_name_to_tool = {tool.name: tool for tool in tools}
|
||||
if system_message is None:
|
||||
system_message = """
|
||||
You have access to an API to help answer user queries.
|
||||
|
||||
Here is documentation on the API:
|
||||
{api_spec}
|
||||
"""
|
||||
system_message = system_message.format(api_spec=api_spec)
|
||||
|
||||
@chain
|
||||
def prepend_system_message(query: str) -> List[BaseMessage]:
|
||||
return _prepend_system_message(query, system_message)
|
||||
|
||||
@chain
|
||||
def invoke_llm(messages: List[BaseMessage]) -> List[BaseMessage]:
|
||||
return _invoke_llm(messages, llm_with_tools)
|
||||
|
||||
@chain
|
||||
def execute_tools(messages: List[BaseMessage]) -> List[BaseMessage]:
|
||||
return _execute_tools(messages, tool_name_to_tool)
|
||||
|
||||
return prepend_system_message | invoke_llm | execute_tools | llm
|
||||
|
||||
@@ -28,7 +28,7 @@ class ElasticsearchChatMessageHistory(BaseChatMessageHistory):
|
||||
es_password: Password to use when connecting to Elasticsearch.
|
||||
es_api_key: API key to use when connecting to Elasticsearch.
|
||||
es_connection: Optional pre-existing Elasticsearch connection.
|
||||
esnsure_ascii: Used to escape ASCII symbols in json.dumps. Defaults to True.
|
||||
ensure_ascii: Used to escape ASCII symbols in json.dumps. Defaults to True.
|
||||
index: Name of the index to use.
|
||||
session_id: Arbitrary key that is used to store the messages
|
||||
of a single chat session.
|
||||
@@ -45,11 +45,11 @@ class ElasticsearchChatMessageHistory(BaseChatMessageHistory):
|
||||
es_user: Optional[str] = None,
|
||||
es_api_key: Optional[str] = None,
|
||||
es_password: Optional[str] = None,
|
||||
esnsure_ascii: Optional[bool] = True,
|
||||
ensure_ascii: Optional[bool] = True,
|
||||
):
|
||||
self.index: str = index
|
||||
self.session_id: str = session_id
|
||||
self.ensure_ascii = esnsure_ascii
|
||||
self.ensure_ascii = ensure_ascii
|
||||
|
||||
# Initialize Elasticsearch client from passed client arg or connection info
|
||||
if es_connection is not None:
|
||||
|
||||
@@ -187,7 +187,7 @@ class SQLChatMessageHistory(BaseChatMessageHistory):
|
||||
since="0.2.2",
|
||||
removal="0.3.0",
|
||||
name="connection_string",
|
||||
alternative="Use connection instead",
|
||||
alternative="connection",
|
||||
)
|
||||
_warned_once_already = True
|
||||
connection = connection_string
|
||||
|
||||
@@ -51,6 +51,7 @@ if TYPE_CHECKING:
|
||||
from langchain_community.chat_models.deepinfra import (
|
||||
ChatDeepInfra,
|
||||
)
|
||||
from langchain_community.chat_models.edenai import ChatEdenAI
|
||||
from langchain_community.chat_models.ernie import (
|
||||
ErnieBotChat,
|
||||
)
|
||||
@@ -182,6 +183,7 @@ __all__ = [
|
||||
"ChatOctoAI",
|
||||
"ChatDatabricks",
|
||||
"ChatDeepInfra",
|
||||
"ChatEdenAI",
|
||||
"ChatEverlyAI",
|
||||
"ChatFireworks",
|
||||
"ChatFriendli",
|
||||
@@ -237,6 +239,7 @@ _module_lookup = {
|
||||
"ChatDatabricks": "langchain_community.chat_models.databricks",
|
||||
"ChatDeepInfra": "langchain_community.chat_models.deepinfra",
|
||||
"ChatEverlyAI": "langchain_community.chat_models.everlyai",
|
||||
"ChatEdenAI": "langchain_community.chat_models.edenai",
|
||||
"ChatFireworks": "langchain_community.chat_models.fireworks",
|
||||
"ChatFriendli": "langchain_community.chat_models.friendli",
|
||||
"ChatGooglePalm": "langchain_community.chat_models.google_palm",
|
||||
|
||||
@@ -122,8 +122,8 @@ def _format_edenai_messages(messages: List[BaseMessage]) -> Dict[str, Any]:
|
||||
system = None
|
||||
formatted_messages = []
|
||||
|
||||
human_messages = filter(lambda msg: isinstance(msg, HumanMessage), messages)
|
||||
last_human_message = list(human_messages)[-1] if human_messages else ""
|
||||
human_messages = list(filter(lambda msg: isinstance(msg, HumanMessage), messages))
|
||||
last_human_message = human_messages[-1] if human_messages else ""
|
||||
|
||||
tool_results, other_messages = _extract_edenai_tool_results_from_messages(messages)
|
||||
for i, message in enumerate(other_messages):
|
||||
|
||||
@@ -12,6 +12,7 @@ from typing import (
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
@@ -20,6 +21,7 @@ from typing import (
|
||||
from langchain_core.callbacks import (
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain_core.language_models import LanguageModelInput
|
||||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
from langchain_core.language_models.llms import create_base_retry_decorator
|
||||
from langchain_core.messages import (
|
||||
@@ -33,6 +35,7 @@ from langchain_core.messages import (
|
||||
HumanMessageChunk,
|
||||
SystemMessage,
|
||||
SystemMessageChunk,
|
||||
ToolMessage,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
||||
from langchain_core.pydantic_v1 import (
|
||||
@@ -41,7 +44,10 @@ from langchain_core.pydantic_v1 import (
|
||||
Field,
|
||||
SecretStr,
|
||||
)
|
||||
from langchain_core.runnables import Runnable
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import get_from_dict_or_env, pre_init
|
||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from premai.api.chat_completions.v1_chat_completions_create import (
|
||||
@@ -51,6 +57,19 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TOOL_PROMPT_HEADER = """
|
||||
Given the set of tools you used and the response, provide the final answer\n
|
||||
"""
|
||||
|
||||
INTERMEDIATE_TOOL_RESULT_TEMPLATE = """
|
||||
{json}
|
||||
"""
|
||||
|
||||
SINGLE_TOOL_PROMPT_TEMPLATE = """
|
||||
tool id: {tool_id}
|
||||
tool_response: {tool_response}
|
||||
"""
|
||||
|
||||
|
||||
class ChatPremAPIError(Exception):
|
||||
"""Error with the `PremAI` API."""
|
||||
@@ -91,8 +110,22 @@ def _response_to_result(
|
||||
raise ChatPremAPIError(f"ChatResponse must have a content: {content}")
|
||||
|
||||
if role == "assistant":
|
||||
tool_calls = choice.message["tool_calls"]
|
||||
if tool_calls is None:
|
||||
tools = []
|
||||
else:
|
||||
tools = [
|
||||
{
|
||||
"id": tool_call["id"],
|
||||
"name": tool_call["function"]["name"],
|
||||
"args": tool_call["function"]["arguments"],
|
||||
}
|
||||
for tool_call in tool_calls
|
||||
]
|
||||
generations.append(
|
||||
ChatGeneration(text=content, message=AIMessage(content=content))
|
||||
ChatGeneration(
|
||||
text=content, message=AIMessage(content=content, tool_calls=tools)
|
||||
)
|
||||
)
|
||||
elif role == "user":
|
||||
generations.append(
|
||||
@@ -156,41 +189,65 @@ def _messages_to_prompt_dict(
|
||||
system_prompt: Optional[str] = None
|
||||
examples_and_messages: List[Dict[str, Any]] = []
|
||||
|
||||
if template_id is not None:
|
||||
params: Dict[str, str] = {}
|
||||
for input_msg in input_messages:
|
||||
if isinstance(input_msg, SystemMessage):
|
||||
system_prompt = str(input_msg.content)
|
||||
for input_msg in input_messages:
|
||||
if isinstance(input_msg, SystemMessage):
|
||||
system_prompt = str(input_msg.content)
|
||||
|
||||
elif isinstance(input_msg, HumanMessage):
|
||||
if template_id is None:
|
||||
examples_and_messages.append(
|
||||
{"role": "user", "content": str(input_msg.content)}
|
||||
)
|
||||
else:
|
||||
params: Dict[str, str] = {}
|
||||
assert (input_msg.id is not None) and (input_msg.id != ""), ValueError(
|
||||
"When using prompt template there should be id associated ",
|
||||
"with each HumanMessage",
|
||||
)
|
||||
params[str(input_msg.id)] = str(input_msg.content)
|
||||
|
||||
examples_and_messages.append(
|
||||
{"role": "user", "template_id": template_id, "params": params}
|
||||
)
|
||||
|
||||
for input_msg in input_messages:
|
||||
if isinstance(input_msg, AIMessage):
|
||||
examples_and_messages.append(
|
||||
{"role": "assistant", "content": str(input_msg.content)}
|
||||
{"role": "user", "template_id": template_id, "params": params}
|
||||
)
|
||||
else:
|
||||
for input_msg in input_messages:
|
||||
if isinstance(input_msg, SystemMessage):
|
||||
system_prompt = str(input_msg.content)
|
||||
elif isinstance(input_msg, HumanMessage):
|
||||
examples_and_messages.append(
|
||||
{"role": "user", "content": str(input_msg.content)}
|
||||
)
|
||||
elif isinstance(input_msg, AIMessage):
|
||||
elif isinstance(input_msg, AIMessage):
|
||||
if input_msg.tool_calls is None or len(input_msg.tool_calls) == 0:
|
||||
examples_and_messages.append(
|
||||
{"role": "assistant", "content": str(input_msg.content)}
|
||||
)
|
||||
else:
|
||||
raise ChatPremAPIError("No such role explicitly exists")
|
||||
ai_msg_to_json = {
|
||||
"id": input_msg.id,
|
||||
"content": input_msg.content,
|
||||
"response_metadata": input_msg.response_metadata,
|
||||
"tool_calls": input_msg.tool_calls,
|
||||
}
|
||||
examples_and_messages.append(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": INTERMEDIATE_TOOL_RESULT_TEMPLATE.format(
|
||||
json=ai_msg_to_json,
|
||||
),
|
||||
}
|
||||
)
|
||||
elif isinstance(input_msg, ToolMessage):
|
||||
pass
|
||||
|
||||
else:
|
||||
raise ChatPremAPIError("No such role explicitly exists")
|
||||
|
||||
# do a seperate search for tool calls
|
||||
tool_prompt = ""
|
||||
for input_msg in input_messages:
|
||||
if isinstance(input_msg, ToolMessage):
|
||||
tool_id = input_msg.tool_call_id
|
||||
tool_result = input_msg.content
|
||||
tool_prompt += SINGLE_TOOL_PROMPT_TEMPLATE.format(
|
||||
tool_id=tool_id, tool_response=tool_result
|
||||
)
|
||||
if tool_prompt != "":
|
||||
prompt = TOOL_PROMPT_HEADER
|
||||
prompt += tool_prompt
|
||||
examples_and_messages.append({"role": "user", "content": prompt})
|
||||
|
||||
return system_prompt, examples_and_messages
|
||||
|
||||
|
||||
@@ -289,7 +346,6 @@ class ChatPremAI(BaseChatModel, BaseModel):
|
||||
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
|
||||
kwargs_to_ignore = [
|
||||
"top_p",
|
||||
"tools",
|
||||
"frequency_penalty",
|
||||
"presence_penalty",
|
||||
"logit_bias",
|
||||
@@ -392,6 +448,14 @@ class ChatPremAI(BaseChatModel, BaseModel):
|
||||
except Exception as _:
|
||||
continue
|
||||
|
||||
def bind_tools(
|
||||
self,
|
||||
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
|
||||
**kwargs: Any,
|
||||
) -> Runnable[LanguageModelInput, BaseMessage]:
|
||||
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
|
||||
return super().bind(tools=formatted_tools, **kwargs)
|
||||
|
||||
|
||||
def create_prem_retry_decorator(
|
||||
llm: ChatPremAI,
|
||||
|
||||
@@ -411,6 +411,9 @@ if TYPE_CHECKING:
|
||||
from langchain_community.document_loaders.scrapfly import (
|
||||
ScrapflyLoader,
|
||||
)
|
||||
from langchain_community.document_loaders.scrapingant import (
|
||||
ScrapingAntLoader,
|
||||
)
|
||||
from langchain_community.document_loaders.sharepoint import (
|
||||
SharePointLoader,
|
||||
)
|
||||
@@ -666,6 +669,7 @@ _module_lookup = {
|
||||
"S3DirectoryLoader": "langchain_community.document_loaders.s3_directory",
|
||||
"S3FileLoader": "langchain_community.document_loaders.s3_file",
|
||||
"ScrapflyLoader": "langchain_community.document_loaders.scrapfly",
|
||||
"ScrapingAntLoader": "langchain_community.document_loaders.scrapingant",
|
||||
"SQLDatabaseLoader": "langchain_community.document_loaders.sql_database",
|
||||
"SRTLoader": "langchain_community.document_loaders.srt",
|
||||
"SeleniumURLLoader": "langchain_community.document_loaders.url_selenium",
|
||||
@@ -870,6 +874,7 @@ __all__ = [
|
||||
"S3DirectoryLoader",
|
||||
"S3FileLoader",
|
||||
"ScrapflyLoader",
|
||||
"ScrapingAntLoader",
|
||||
"SQLDatabaseLoader",
|
||||
"SRTLoader",
|
||||
"SeleniumURLLoader",
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
"""ScrapingAnt Web Extractor."""
|
||||
|
||||
import logging
|
||||
from typing import Iterator, List, Optional
|
||||
|
||||
from langchain_core.document_loaders import BaseLoader
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.utils import get_from_env
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
||||
class ScrapingAntLoader(BaseLoader):
|
||||
"""Turn an url to LLM accessible markdown with `ScrapingAnt`.
|
||||
|
||||
For further details, visit: https://docs.scrapingant.com/python-client
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
urls: List[str],
|
||||
*,
|
||||
api_key: Optional[str] = None,
|
||||
scrape_config: Optional[dict] = None,
|
||||
continue_on_failure: bool = True,
|
||||
) -> None:
|
||||
"""Initialize client.
|
||||
|
||||
Args:
|
||||
urls: List of urls to scrape.
|
||||
api_key: The ScrapingAnt API key. If not specified must have env var
|
||||
SCRAPINGANT_API_KEY set.
|
||||
scrape_config: The scraping config from ScrapingAntClient.markdown_request
|
||||
continue_on_failure: Whether to continue if scraping an url fails.
|
||||
"""
|
||||
try:
|
||||
from scrapingant_client import ScrapingAntClient
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"`scrapingant-client` package not found,"
|
||||
" run `pip install scrapingant-client`"
|
||||
)
|
||||
if not urls:
|
||||
raise ValueError("URLs must be provided.")
|
||||
api_key = api_key or get_from_env("api_key", "SCRAPINGANT_API_KEY")
|
||||
self.client = ScrapingAntClient(token=api_key)
|
||||
self.urls = urls
|
||||
self.scrape_config = scrape_config
|
||||
self.continue_on_failure = continue_on_failure
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""Fetch data from ScrapingAnt."""
|
||||
|
||||
scrape_config = self.scrape_config if self.scrape_config is not None else {}
|
||||
for url in self.urls:
|
||||
try:
|
||||
result = self.client.markdown_request(url=url, **scrape_config)
|
||||
yield Document(
|
||||
page_content=result.markdown,
|
||||
metadata={"url": result.url},
|
||||
)
|
||||
except Exception as e:
|
||||
if self.continue_on_failure:
|
||||
logger.error(f"Error fetching data from {url}, exception: {e}")
|
||||
else:
|
||||
raise e
|
||||
@@ -1,14 +1,23 @@
|
||||
"""Loader that uses unstructured to load files."""
|
||||
|
||||
import collections
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import IO, Any, Callable, Dict, Iterator, List, Optional, Sequence, Union
|
||||
from typing import IO, Any, Callable, Iterator, List, Optional, Sequence, Union
|
||||
|
||||
from langchain_core._api.deprecation import deprecated
|
||||
from langchain_core.documents import Document
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from langchain_community.document_loaders.base import BaseLoader
|
||||
|
||||
Element: TypeAlias = Any
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
||||
def satisfies_min_unstructured_version(min_version: str) -> bool:
|
||||
"""Check if the installed `Unstructured` version exceeds the minimum version
|
||||
@@ -41,8 +50,8 @@ class UnstructuredBaseLoader(BaseLoader, ABC):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mode: str = "single",
|
||||
post_processors: Optional[List[Callable]] = None,
|
||||
mode: str = "single", # deprecated
|
||||
post_processors: Optional[List[Callable[[str], str]]] = None,
|
||||
**unstructured_kwargs: Any,
|
||||
):
|
||||
"""Initialize with file path."""
|
||||
@@ -53,32 +62,41 @@ class UnstructuredBaseLoader(BaseLoader, ABC):
|
||||
"unstructured package not found, please install it with "
|
||||
"`pip install unstructured`"
|
||||
)
|
||||
|
||||
# `single` - elements are combined into one (default)
|
||||
# `elements` - maintain individual elements
|
||||
# `paged` - elements are combined by page
|
||||
_valid_modes = {"single", "elements", "paged"}
|
||||
if mode not in _valid_modes:
|
||||
raise ValueError(
|
||||
f"Got {mode} for `mode`, but should be one of `{_valid_modes}`"
|
||||
)
|
||||
self.mode = mode
|
||||
|
||||
if not satisfies_min_unstructured_version("0.5.4"):
|
||||
if "strategy" in unstructured_kwargs:
|
||||
unstructured_kwargs.pop("strategy")
|
||||
|
||||
self._check_if_both_mode_and_chunking_strategy_are_by_page(
|
||||
mode, unstructured_kwargs
|
||||
)
|
||||
self.mode = mode
|
||||
self.unstructured_kwargs = unstructured_kwargs
|
||||
self.post_processors = post_processors or []
|
||||
|
||||
@abstractmethod
|
||||
def _get_elements(self) -> List:
|
||||
def _get_elements(self) -> List[Element]:
|
||||
"""Get elements."""
|
||||
|
||||
@abstractmethod
|
||||
def _get_metadata(self) -> dict:
|
||||
"""Get metadata."""
|
||||
def _get_metadata(self) -> dict[str, Any]:
|
||||
"""Get file_path metadata if available."""
|
||||
|
||||
def _post_process_elements(self, elements: list) -> list:
|
||||
"""Applies post processing functions to extracted unstructured elements.
|
||||
Post processing functions are str -> str callables are passed
|
||||
in using the post_processors kwarg when the loader is instantiated."""
|
||||
def _post_process_elements(self, elements: List[Element]) -> List[Element]:
|
||||
"""Apply post processing functions to extracted unstructured elements.
|
||||
|
||||
Post processing functions are str -> str callables passed
|
||||
in using the post_processors kwarg when the loader is instantiated.
|
||||
"""
|
||||
for element in elements:
|
||||
for post_processor in self.post_processors:
|
||||
element.apply(post_processor)
|
||||
@@ -97,18 +115,25 @@ class UnstructuredBaseLoader(BaseLoader, ABC):
|
||||
metadata.update(element.metadata.to_dict())
|
||||
if hasattr(element, "category"):
|
||||
metadata["category"] = element.category
|
||||
if element.to_dict().get("element_id"):
|
||||
metadata["element_id"] = element.to_dict().get("element_id")
|
||||
yield Document(page_content=str(element), metadata=metadata)
|
||||
elif self.mode == "paged":
|
||||
text_dict: Dict[int, str] = {}
|
||||
meta_dict: Dict[int, Dict] = {}
|
||||
logger.warning(
|
||||
"`mode='paged'` is deprecated in favor of the 'by_page' chunking"
|
||||
" strategy. Learn more about chunking here:"
|
||||
" https://docs.unstructured.io/open-source/core-functionality/chunking"
|
||||
)
|
||||
text_dict: dict[int, str] = {}
|
||||
meta_dict: dict[int, dict[str, Any]] = {}
|
||||
|
||||
for idx, element in enumerate(elements):
|
||||
for element in elements:
|
||||
metadata = self._get_metadata()
|
||||
if hasattr(element, "metadata"):
|
||||
metadata.update(element.metadata.to_dict())
|
||||
page_number = metadata.get("page_number", 1)
|
||||
|
||||
# Check if this page_number already exists in docs_dict
|
||||
# Check if this page_number already exists in text_dict
|
||||
if page_number not in text_dict:
|
||||
# If not, create new entry with initial text and metadata
|
||||
text_dict[page_number] = str(element) + "\n\n"
|
||||
@@ -128,18 +153,37 @@ class UnstructuredBaseLoader(BaseLoader, ABC):
|
||||
else:
|
||||
raise ValueError(f"mode of {self.mode} not supported.")
|
||||
|
||||
def _check_if_both_mode_and_chunking_strategy_are_by_page(
|
||||
self, mode: str, unstructured_kwargs: dict[str, Any]
|
||||
) -> None:
|
||||
if (
|
||||
mode == "paged"
|
||||
and unstructured_kwargs.get("chunking_strategy") == "by_page"
|
||||
):
|
||||
raise ValueError(
|
||||
"Only one of `chunking_strategy='by_page'` or `mode='paged'` may be"
|
||||
" set. `chunking_strategy` is preferred."
|
||||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.2.8",
|
||||
removal="0.4.0",
|
||||
alternative_import="langchain_unstructured.UnstructuredLoader",
|
||||
)
|
||||
class UnstructuredFileLoader(UnstructuredBaseLoader):
|
||||
"""Load files using `Unstructured`.
|
||||
|
||||
The file loader uses the
|
||||
unstructured partition function and will automatically detect the file
|
||||
type. You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
langchain Document object. If you use "elements" mode, the unstructured
|
||||
library will split the document into elements such as Title and NarrativeText.
|
||||
You can pass in additional unstructured kwargs after mode to apply
|
||||
different unstructured settings.
|
||||
The file loader uses the unstructured partition function and will automatically
|
||||
detect the file type. You can run the loader in different modes: "single",
|
||||
"elements", and "paged". The default "single" mode will return a single langchain
|
||||
Document object. If you use "elements" mode, the unstructured library will split
|
||||
the document into elements such as Title and NarrativeText and return those as
|
||||
individual langchain Document objects. In addition to these post-processing modes
|
||||
(which are specific to the LangChain Loaders), Unstructured has its own "chunking"
|
||||
parameters for post-processing elements into more useful chunks for uses cases such
|
||||
as Retrieval Augmented Generation (RAG). You can pass in additional unstructured
|
||||
kwargs to configure different unstructured settings.
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -152,24 +196,27 @@ class UnstructuredFileLoader(UnstructuredBaseLoader):
|
||||
|
||||
References
|
||||
----------
|
||||
https://unstructured-io.github.io/unstructured/bricks.html#partition
|
||||
https://docs.unstructured.io/open-source/core-functionality/partitioning
|
||||
https://docs.unstructured.io/open-source/core-functionality/chunking
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_path: Union[str, List[str], Path, List[Path], None],
|
||||
file_path: Union[str, List[str], Path, List[Path]],
|
||||
*,
|
||||
mode: str = "single",
|
||||
**unstructured_kwargs: Any,
|
||||
):
|
||||
"""Initialize with file path."""
|
||||
self.file_path = file_path
|
||||
|
||||
super().__init__(mode=mode, **unstructured_kwargs)
|
||||
|
||||
def _get_elements(self) -> List:
|
||||
def _get_elements(self) -> List[Element]:
|
||||
from unstructured.partition.auto import partition
|
||||
|
||||
if isinstance(self.file_path, list):
|
||||
elements = []
|
||||
elements: List[Element] = []
|
||||
for file in self.file_path:
|
||||
if isinstance(file, Path):
|
||||
file = str(file)
|
||||
@@ -180,35 +227,33 @@ class UnstructuredFileLoader(UnstructuredBaseLoader):
|
||||
self.file_path = str(self.file_path)
|
||||
return partition(filename=self.file_path, **self.unstructured_kwargs)
|
||||
|
||||
def _get_metadata(self) -> dict:
|
||||
def _get_metadata(self) -> dict[str, Any]:
|
||||
return {"source": self.file_path}
|
||||
|
||||
|
||||
def get_elements_from_api(
|
||||
file_path: Union[str, List[str], Path, List[Path], None] = None,
|
||||
file: Union[IO, Sequence[IO], None] = None,
|
||||
api_url: str = "https://api.unstructured.io/general/v0/general",
|
||||
file: Union[IO[bytes], Sequence[IO[bytes]], None] = None,
|
||||
api_url: str = "https://api.unstructuredapp.io/general/v0/general",
|
||||
api_key: str = "",
|
||||
**unstructured_kwargs: Any,
|
||||
) -> List:
|
||||
) -> List[Element]:
|
||||
"""Retrieve a list of elements from the `Unstructured API`."""
|
||||
if is_list := isinstance(file_path, list):
|
||||
file_path = [str(path) for path in file_path]
|
||||
if isinstance(file, collections.abc.Sequence) or is_list:
|
||||
if isinstance(file, Sequence) or is_list:
|
||||
from unstructured.partition.api import partition_multiple_via_api
|
||||
|
||||
_doc_elements = partition_multiple_via_api(
|
||||
filenames=file_path,
|
||||
files=file,
|
||||
filenames=file_path, # type: ignore
|
||||
files=file, # type: ignore
|
||||
api_key=api_key,
|
||||
api_url=api_url,
|
||||
**unstructured_kwargs,
|
||||
)
|
||||
|
||||
elements = []
|
||||
for _elements in _doc_elements:
|
||||
elements.extend(_elements)
|
||||
|
||||
return elements
|
||||
else:
|
||||
from unstructured.partition.api import partition_via_api
|
||||
@@ -222,59 +267,69 @@ def get_elements_from_api(
|
||||
)
|
||||
|
||||
|
||||
class UnstructuredAPIFileLoader(UnstructuredFileLoader):
|
||||
@deprecated(
|
||||
since="0.2.8",
|
||||
removal="0.4.0",
|
||||
alternative_import="langchain_unstructured.UnstructuredLoader",
|
||||
)
|
||||
class UnstructuredAPIFileLoader(UnstructuredBaseLoader):
|
||||
"""Load files using `Unstructured` API.
|
||||
|
||||
By default, the loader makes a call to the hosted Unstructured API.
|
||||
If you are running the unstructured API locally, you can change the
|
||||
API rule by passing in the url parameter when you initialize the loader.
|
||||
The hosted Unstructured API requires an API key. See
|
||||
https://www.unstructured.io/api-key/ if you need to generate a key.
|
||||
By default, the loader makes a call to the hosted Unstructured API. If you are
|
||||
running the unstructured API locally, you can change the API rule by passing in the
|
||||
url parameter when you initialize the loader. The hosted Unstructured API requires
|
||||
an API key. See the links below to learn more about our API offerings and get an
|
||||
API key.
|
||||
|
||||
You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
langchain Document object. If you use "elements" mode, the unstructured
|
||||
library will split the document into elements such as Title and NarrativeText.
|
||||
You can pass in additional unstructured kwargs after mode to apply
|
||||
different unstructured settings.
|
||||
You can run the loader in different modes: "single", "elements", and "paged". The
|
||||
default "single" mode will return a single langchain Document object. If you use
|
||||
"elements" mode, the unstructured library will split the document into elements such
|
||||
as Title and NarrativeText and return those as individual langchain Document
|
||||
objects. In addition to these post-processing modes (which are specific to the
|
||||
LangChain Loaders), Unstructured has its own "chunking" parameters for
|
||||
post-processing elements into more useful chunks for uses cases such as Retrieval
|
||||
Augmented Generation (RAG). You can pass in additional unstructured kwargs to
|
||||
configure different unstructured settings.
|
||||
|
||||
Examples
|
||||
```python
|
||||
from langchain_community.document_loaders import UnstructuredAPIFileLoader
|
||||
|
||||
loader = UnstructuredFileAPILoader(
|
||||
loader = UnstructuredAPIFileLoader(
|
||||
"example.pdf", mode="elements", strategy="fast", api_key="MY_API_KEY",
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
References
|
||||
----------
|
||||
https://unstructured-io.github.io/unstructured/bricks.html#partition
|
||||
https://www.unstructured.io/api-key/
|
||||
https://github.com/Unstructured-IO/unstructured-api
|
||||
https://docs.unstructured.io/api-reference/api-services/sdk
|
||||
https://docs.unstructured.io/api-reference/api-services/overview
|
||||
https://docs.unstructured.io/open-source/core-functionality/partitioning
|
||||
https://docs.unstructured.io/open-source/core-functionality/chunking
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file_path: Union[str, List[str], None] = "",
|
||||
file_path: Union[str, List[str]],
|
||||
*,
|
||||
mode: str = "single",
|
||||
url: str = "https://api.unstructured.io/general/v0/general",
|
||||
url: str = "https://api.unstructuredapp.io/general/v0/general",
|
||||
api_key: str = "",
|
||||
**unstructured_kwargs: Any,
|
||||
):
|
||||
"""Initialize with file path."""
|
||||
|
||||
validate_unstructured_version(min_unstructured_version="0.10.15")
|
||||
|
||||
self.file_path = file_path
|
||||
self.url = url
|
||||
self.api_key = api_key
|
||||
self.api_key = os.getenv("UNSTRUCTURED_API_KEY") or api_key
|
||||
|
||||
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
|
||||
super().__init__(mode=mode, **unstructured_kwargs)
|
||||
|
||||
def _get_metadata(self) -> dict:
|
||||
def _get_metadata(self) -> dict[str, Any]:
|
||||
return {"source": self.file_path}
|
||||
|
||||
def _get_elements(self) -> List:
|
||||
def _get_elements(self) -> List[Element]:
|
||||
return get_elements_from_api(
|
||||
file_path=self.file_path,
|
||||
api_key=self.api_key,
|
||||
@@ -282,18 +337,36 @@ class UnstructuredAPIFileLoader(UnstructuredFileLoader):
|
||||
**self.unstructured_kwargs,
|
||||
)
|
||||
|
||||
def _post_process_elements(self, elements: List[Element]) -> List[Element]:
|
||||
"""Apply post processing functions to extracted unstructured elements.
|
||||
|
||||
Post processing functions are str -> str callables passed
|
||||
in using the post_processors kwarg when the loader is instantiated.
|
||||
"""
|
||||
for element in elements:
|
||||
for post_processor in self.post_processors:
|
||||
element.apply(post_processor)
|
||||
return elements
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.2.8",
|
||||
removal="0.4.0",
|
||||
alternative_import="langchain_unstructured.UnstructuredLoader",
|
||||
)
|
||||
class UnstructuredFileIOLoader(UnstructuredBaseLoader):
|
||||
"""Load files using `Unstructured`.
|
||||
"""Load file-like objects opened in read mode using `Unstructured`.
|
||||
|
||||
The file loader
|
||||
uses the unstructured partition function and will automatically detect the file
|
||||
type. You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
langchain Document object. If you use "elements" mode, the unstructured
|
||||
library will split the document into elements such as Title and NarrativeText.
|
||||
You can pass in additional unstructured kwargs after mode to apply
|
||||
different unstructured settings.
|
||||
The file loader uses the unstructured partition function and will automatically
|
||||
detect the file type. You can run the loader in different modes: "single",
|
||||
"elements", and "paged". The default "single" mode will return a single langchain
|
||||
Document object. If you use "elements" mode, the unstructured library will split
|
||||
the document into elements such as Title and NarrativeText and return those as
|
||||
individual langchain Document objects. In addition to these post-processing modes
|
||||
(which are specific to the LangChain Loaders), Unstructured has its own "chunking"
|
||||
parameters for post-processing elements into more useful chunks for uses cases
|
||||
such as Retrieval Augmented Generation (RAG). You can pass in additional
|
||||
unstructured kwargs to configure different unstructured settings.
|
||||
|
||||
Examples
|
||||
--------
|
||||
@@ -308,12 +381,14 @@ class UnstructuredFileIOLoader(UnstructuredBaseLoader):
|
||||
|
||||
References
|
||||
----------
|
||||
https://unstructured-io.github.io/unstructured/bricks.html#partition
|
||||
https://docs.unstructured.io/open-source/core-functionality/partitioning
|
||||
https://docs.unstructured.io/open-source/core-functionality/chunking
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file: Union[IO, Sequence[IO]],
|
||||
file: IO[bytes],
|
||||
*,
|
||||
mode: str = "single",
|
||||
**unstructured_kwargs: Any,
|
||||
):
|
||||
@@ -321,72 +396,114 @@ class UnstructuredFileIOLoader(UnstructuredBaseLoader):
|
||||
self.file = file
|
||||
super().__init__(mode=mode, **unstructured_kwargs)
|
||||
|
||||
def _get_elements(self) -> List:
|
||||
def _get_elements(self) -> List[Element]:
|
||||
from unstructured.partition.auto import partition
|
||||
|
||||
return partition(file=self.file, **self.unstructured_kwargs)
|
||||
|
||||
def _get_metadata(self) -> dict:
|
||||
def _get_metadata(self) -> dict[str, Any]:
|
||||
return {}
|
||||
|
||||
def _post_process_elements(self, elements: List[Element]) -> List[Element]:
|
||||
"""Apply post processing functions to extracted unstructured elements.
|
||||
|
||||
class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader):
|
||||
"""Load files using `Unstructured` API.
|
||||
Post processing functions are str -> str callables passed
|
||||
in using the post_processors kwarg when the loader is instantiated.
|
||||
"""
|
||||
for element in elements:
|
||||
for post_processor in self.post_processors:
|
||||
element.apply(post_processor)
|
||||
return elements
|
||||
|
||||
By default, the loader makes a call to the hosted Unstructured API.
|
||||
If you are running the unstructured API locally, you can change the
|
||||
API rule by passing in the url parameter when you initialize the loader.
|
||||
The hosted Unstructured API requires an API key. See
|
||||
https://www.unstructured.io/api-key/ if you need to generate a key.
|
||||
|
||||
You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
langchain Document object. If you use "elements" mode, the unstructured
|
||||
library will split the document into elements such as Title and NarrativeText.
|
||||
You can pass in additional unstructured kwargs after mode to apply
|
||||
different unstructured settings.
|
||||
@deprecated(
|
||||
since="0.2.8",
|
||||
removal="0.4.0",
|
||||
alternative_import="langchain_unstructured.UnstructuredLoader",
|
||||
)
|
||||
class UnstructuredAPIFileIOLoader(UnstructuredBaseLoader):
|
||||
"""Send file-like objects with `unstructured-client` sdk to the Unstructured API.
|
||||
|
||||
By default, the loader makes a call to the hosted Unstructured API. If you are
|
||||
running the unstructured API locally, you can change the API rule by passing in the
|
||||
url parameter when you initialize the loader. The hosted Unstructured API requires
|
||||
an API key. See the links below to learn more about our API offerings and get an
|
||||
API key.
|
||||
|
||||
You can run the loader in different modes: "single", "elements", and "paged". The
|
||||
default "single" mode will return a single langchain Document object. If you use
|
||||
"elements" mode, the unstructured library will split the document into elements
|
||||
such as Title and NarrativeText and return those as individual langchain Document
|
||||
objects. In addition to these post-processing modes (which are specific to the
|
||||
LangChain Loaders), Unstructured has its own "chunking" parameters for
|
||||
post-processing elements into more useful chunks for uses cases such as Retrieval
|
||||
Augmented Generation (RAG). You can pass in additional unstructured kwargs to
|
||||
configure different unstructured settings.
|
||||
|
||||
Examples
|
||||
--------
|
||||
from langchain_community.document_loaders import UnstructuredAPIFileLoader
|
||||
|
||||
with open("example.pdf", "rb") as f:
|
||||
loader = UnstructuredFileAPILoader(
|
||||
loader = UnstructuredAPIFileIOLoader(
|
||||
f, mode="elements", strategy="fast", api_key="MY_API_KEY",
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
References
|
||||
----------
|
||||
https://unstructured-io.github.io/unstructured/bricks.html#partition
|
||||
https://www.unstructured.io/api-key/
|
||||
https://github.com/Unstructured-IO/unstructured-api
|
||||
https://docs.unstructured.io/api-reference/api-services/sdk
|
||||
https://docs.unstructured.io/api-reference/api-services/overview
|
||||
https://docs.unstructured.io/open-source/core-functionality/partitioning
|
||||
https://docs.unstructured.io/open-source/core-functionality/chunking
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file: Union[IO, Sequence[IO]],
|
||||
file: Union[IO[bytes], Sequence[IO[bytes]]],
|
||||
*,
|
||||
mode: str = "single",
|
||||
url: str = "https://api.unstructured.io/general/v0/general",
|
||||
url: str = "https://api.unstructuredapp.io/general/v0/general",
|
||||
api_key: str = "",
|
||||
**unstructured_kwargs: Any,
|
||||
):
|
||||
"""Initialize with file path."""
|
||||
|
||||
if isinstance(file, collections.abc.Sequence):
|
||||
if isinstance(file, Sequence):
|
||||
validate_unstructured_version(min_unstructured_version="0.6.3")
|
||||
if file:
|
||||
validate_unstructured_version(min_unstructured_version="0.6.2")
|
||||
validate_unstructured_version(min_unstructured_version="0.6.2")
|
||||
|
||||
self.file = file
|
||||
self.url = url
|
||||
self.api_key = api_key
|
||||
self.api_key = os.getenv("UNSTRUCTURED_API_KEY") or api_key
|
||||
|
||||
super().__init__(file=file, mode=mode, **unstructured_kwargs)
|
||||
super().__init__(mode=mode, **unstructured_kwargs)
|
||||
|
||||
def _get_elements(self) -> List:
|
||||
return get_elements_from_api(
|
||||
file=self.file,
|
||||
api_key=self.api_key,
|
||||
api_url=self.url,
|
||||
**self.unstructured_kwargs,
|
||||
)
|
||||
def _get_elements(self) -> List[Element]:
|
||||
if self.unstructured_kwargs.get("metadata_filename"):
|
||||
return get_elements_from_api(
|
||||
file=self.file,
|
||||
file_path=self.unstructured_kwargs.pop("metadata_filename"),
|
||||
api_key=self.api_key,
|
||||
api_url=self.url,
|
||||
**self.unstructured_kwargs,
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"If partitioning a file via api,"
|
||||
" metadata_filename must be specified as well.",
|
||||
)
|
||||
|
||||
def _get_metadata(self) -> dict[str, Any]:
|
||||
return {}
|
||||
|
||||
def _post_process_elements(self, elements: List[Element]) -> List[Element]:
|
||||
"""Apply post processing functions to extracted unstructured elements.
|
||||
|
||||
Post processing functions are str -> str callables passed
|
||||
in using the post_processors kwarg when the loader is instantiated.
|
||||
"""
|
||||
for element in elements:
|
||||
for post_processor in self.post_processors:
|
||||
element.apply(post_processor)
|
||||
return elements
|
||||
|
||||
@@ -65,10 +65,10 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings):
|
||||
or os.getenv("AZURE_OPENAI_API_KEY")
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
)
|
||||
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
|
||||
values["openai_api_base"] = values.get("openai_api_base") or os.getenv(
|
||||
"OPENAI_API_BASE"
|
||||
)
|
||||
values["openai_api_version"] = values["openai_api_version"] or os.getenv(
|
||||
values["openai_api_version"] = values.get("openai_api_version") or os.getenv(
|
||||
"OPENAI_API_VERSION", default="2023-05-15"
|
||||
)
|
||||
values["openai_api_type"] = get_from_dict_or_env(
|
||||
|
||||
@@ -25,19 +25,34 @@ BAICHUAN_API_URL: str = "http://api.baichuan-ai.com/v1/embeddings"
|
||||
class BaichuanTextEmbeddings(BaseModel, Embeddings):
|
||||
"""Baichuan Text Embedding models.
|
||||
|
||||
To use, you should set the environment variable ``BAICHUAN_API_KEY`` to
|
||||
your API key or pass it as a named parameter to the constructor.
|
||||
Setup:
|
||||
To use, you should set the environment variable ``BAICHUAN_API_KEY`` to
|
||||
your API key or pass it as a named parameter to the constructor.
|
||||
|
||||
Example:
|
||||
.. code-block:: bash
|
||||
|
||||
export BAICHUAN_API_KEY="your-api-key"
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.embeddings import BaichuanTextEmbeddings
|
||||
|
||||
baichuan = BaichuanTextEmbeddings(baichuan_api_key="my-api-key")
|
||||
"""
|
||||
embeddings = BaichuanTextEmbeddings()
|
||||
|
||||
Embed:
|
||||
.. code-block:: python
|
||||
|
||||
# embed the documents
|
||||
vectors = embeddings.embed_documents([text1, text2, ...])
|
||||
|
||||
# embed the query
|
||||
vectors = embeddings.embed_query(text)
|
||||
""" # noqa: E501
|
||||
|
||||
session: Any #: :meta private:
|
||||
model_name: str = Field(default="Baichuan-Text-Embedding", alias="model")
|
||||
"""The model used to embed the documents."""
|
||||
baichuan_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
|
||||
"""Automatically inferred from env var `BAICHUAN_API_KEY` if not provided."""
|
||||
chunk_size: int = 16
|
||||
|
||||
@@ -1,5 +1,18 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, Union
|
||||
from typing import (
|
||||
Any,
|
||||
AsyncIterator,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
import aiohttp
|
||||
import requests
|
||||
@@ -132,6 +145,10 @@ class _OllamaCommon(BaseLanguageModel):
|
||||
tokens for authentication.
|
||||
"""
|
||||
|
||||
auth: Union[Callable, Tuple, None] = None
|
||||
"""Additional auth tuple or callable to enable Basic/Digest/Custom HTTP Auth.
|
||||
Expects the same format, type and values as requests.request auth parameter."""
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling Ollama."""
|
||||
@@ -237,6 +254,7 @@ class _OllamaCommon(BaseLanguageModel):
|
||||
"Content-Type": "application/json",
|
||||
**(self.headers if isinstance(self.headers, dict) else {}),
|
||||
},
|
||||
auth=self.auth,
|
||||
json=request_payload,
|
||||
stream=True,
|
||||
timeout=self.timeout,
|
||||
@@ -300,6 +318,7 @@ class _OllamaCommon(BaseLanguageModel):
|
||||
"Content-Type": "application/json",
|
||||
**(self.headers if isinstance(self.headers, dict) else {}),
|
||||
},
|
||||
auth=self.auth,
|
||||
json=request_payload,
|
||||
timeout=self.timeout,
|
||||
) as response:
|
||||
|
||||
@@ -92,6 +92,7 @@ if TYPE_CHECKING:
|
||||
from langchain_community.retrievers.milvus import (
|
||||
MilvusRetriever,
|
||||
)
|
||||
from langchain_community.retrievers.nanopq import NanoPQRetriever
|
||||
from langchain_community.retrievers.outline import (
|
||||
OutlineRetriever,
|
||||
)
|
||||
@@ -171,6 +172,7 @@ _module_lookup = {
|
||||
"LlamaIndexRetriever": "langchain_community.retrievers.llama_index",
|
||||
"MetalRetriever": "langchain_community.retrievers.metal",
|
||||
"MilvusRetriever": "langchain_community.retrievers.milvus",
|
||||
"NanoPQRetriever": "langchain_community.retrievers.nanopq",
|
||||
"OutlineRetriever": "langchain_community.retrievers.outline",
|
||||
"PineconeHybridSearchRetriever": "langchain_community.retrievers.pinecone_hybrid_search", # noqa: E501
|
||||
"PubMedRetriever": "langchain_community.retrievers.pubmed",
|
||||
@@ -226,6 +228,7 @@ __all__ = [
|
||||
"LlamaIndexRetriever",
|
||||
"MetalRetriever",
|
||||
"MilvusRetriever",
|
||||
"NanoPQRetriever",
|
||||
"NeuralDBRetriever",
|
||||
"OutlineRetriever",
|
||||
"PineconeHybridSearchRetriever",
|
||||
|
||||
125
libs/community/langchain_community/retrievers/nanopq.py
Normal file
125
libs/community/langchain_community/retrievers/nanopq.py
Normal file
@@ -0,0 +1,125 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import concurrent.futures
|
||||
from typing import Any, Iterable, List, Optional
|
||||
|
||||
import numpy as np
|
||||
from langchain_core.callbacks import CallbackManagerForRetrieverRun
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
|
||||
|
||||
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
|
||||
"""
|
||||
Create an index of embeddings for a list of contexts.
|
||||
|
||||
Args:
|
||||
contexts: List of contexts to embed.
|
||||
embeddings: Embeddings model to use.
|
||||
|
||||
Returns:
|
||||
Index of embeddings.
|
||||
"""
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
return np.array(list(executor.map(embeddings.embed_query, contexts)))
|
||||
|
||||
|
||||
class NanoPQRetriever(BaseRetriever):
|
||||
"""`NanoPQ retriever."""
|
||||
|
||||
embeddings: Embeddings
|
||||
"""Embeddings model to use."""
|
||||
index: Any
|
||||
"""Index of embeddings."""
|
||||
texts: List[str]
|
||||
"""List of texts to index."""
|
||||
metadatas: Optional[List[dict]] = None
|
||||
"""List of metadatas corresponding with each text."""
|
||||
k: int = 4
|
||||
"""Number of results to return."""
|
||||
relevancy_threshold: Optional[float] = None
|
||||
"""Threshold for relevancy."""
|
||||
subspace: int = 4
|
||||
"""No of subspaces to be created, should be a multiple of embedding shape"""
|
||||
clusters: int = 128
|
||||
"""No of clusters to be created"""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@classmethod
|
||||
def from_texts(
|
||||
cls,
|
||||
texts: List[str],
|
||||
embeddings: Embeddings,
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
**kwargs: Any,
|
||||
) -> NanoPQRetriever:
|
||||
index = create_index(texts, embeddings)
|
||||
return cls(
|
||||
embeddings=embeddings,
|
||||
index=index,
|
||||
texts=texts,
|
||||
metadatas=metadatas,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_documents(
|
||||
cls,
|
||||
documents: Iterable[Document],
|
||||
embeddings: Embeddings,
|
||||
**kwargs: Any,
|
||||
) -> NanoPQRetriever:
|
||||
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
|
||||
return cls.from_texts(
|
||||
texts=texts, embeddings=embeddings, metadatas=metadatas, **kwargs
|
||||
)
|
||||
|
||||
def _get_relevant_documents(
|
||||
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
|
||||
) -> List[Document]:
|
||||
try:
|
||||
from nanopq import PQ
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import nanopq, please install with `pip install " "nanopq`."
|
||||
)
|
||||
|
||||
query_embeds = np.array(self.embeddings.embed_query(query))
|
||||
try:
|
||||
pq = PQ(M=self.subspace, Ks=self.clusters, verbose=True).fit(
|
||||
self.index.astype("float32")
|
||||
)
|
||||
except AssertionError:
|
||||
error_message = (
|
||||
"Received params: training_sample={training_sample}, "
|
||||
"n_cluster={n_clusters}, subspace={subspace}, "
|
||||
"embedding_shape={embedding_shape}. Issue with the combination. "
|
||||
"Please retrace back to find the exact error"
|
||||
).format(
|
||||
training_sample=self.index.shape[0],
|
||||
n_clusters=self.clusters,
|
||||
subspace=self.subspace,
|
||||
embedding_shape=self.index.shape[1],
|
||||
)
|
||||
raise RuntimeError(error_message)
|
||||
|
||||
index_code = pq.encode(vecs=self.index.astype("float32"))
|
||||
dt = pq.dtable(query=query_embeds.astype("float32"))
|
||||
dists = dt.adist(codes=index_code)
|
||||
|
||||
sorted_ix = np.argsort(dists)
|
||||
|
||||
top_k_results = [
|
||||
Document(
|
||||
page_content=self.texts[row],
|
||||
metadata=self.metadatas[row] if self.metadatas else {},
|
||||
)
|
||||
for row in sorted_ix[0 : self.k]
|
||||
]
|
||||
|
||||
return top_k_results
|
||||
@@ -3,17 +3,21 @@ from __future__ import annotations
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from typing import List, Optional
|
||||
from typing import List, Optional, Type
|
||||
|
||||
import requests
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import validator
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, HttpUrl, validator
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SpeechToTextInput(BaseModel):
|
||||
query: HttpUrl = Field(description="url of the audio to analyze")
|
||||
|
||||
|
||||
class EdenAiSpeechToTextTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Speech To Text API.
|
||||
|
||||
@@ -23,7 +27,6 @@ class EdenAiSpeechToTextTool(EdenaiTool):
|
||||
To use, you should have
|
||||
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
||||
You can find your token here: https://app.edenai.run/admin/account/settings
|
||||
|
||||
"""
|
||||
|
||||
edenai_api_key: Optional[str] = None
|
||||
@@ -34,6 +37,7 @@ class EdenAiSpeechToTextTool(EdenaiTool):
|
||||
"Useful for when you have to convert audio to text."
|
||||
"Input should be a url to an audio file."
|
||||
)
|
||||
args_schema: Type[BaseModel] = SpeechToTextInput
|
||||
is_async: bool = True
|
||||
|
||||
language: Optional[str] = "en"
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Dict, List, Literal, Optional
|
||||
from typing import Dict, List, Literal, Optional, Type
|
||||
|
||||
import requests
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import Field, root_validator, validator
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator, validator
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TextToSpeechInput(BaseModel):
|
||||
query: str = Field(description="text to generate audio from")
|
||||
|
||||
|
||||
class EdenAiTextToSpeechTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Text to speech API.
|
||||
for api reference check edenai documentation:
|
||||
@@ -30,6 +34,7 @@ class EdenAiTextToSpeechTool(EdenaiTool):
|
||||
"""the output is a string representing the URL of the audio file,
|
||||
or the path to the downloaded wav file """
|
||||
)
|
||||
args_schema: Type[BaseModel] = TextToSpeechInput
|
||||
|
||||
language: Optional[str] = "en"
|
||||
"""
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, HttpUrl
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExplicitImageInput(BaseModel):
|
||||
query: HttpUrl = Field(description="url of the image to analyze")
|
||||
|
||||
|
||||
class EdenAiExplicitImageTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Explicit image detection.
|
||||
|
||||
@@ -33,6 +38,7 @@ class EdenAiExplicitImageTool(EdenaiTool):
|
||||
pornography, violence, gore content, etc."""
|
||||
"Input should be the string url of the image ."
|
||||
)
|
||||
args_schema: Type[BaseModel] = ExplicitImageInput
|
||||
|
||||
combine_available: bool = True
|
||||
feature: str = "image"
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, HttpUrl
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ObjectDetectionInput(BaseModel):
|
||||
query: HttpUrl = Field(description="url of the image to analyze")
|
||||
|
||||
|
||||
class EdenAiObjectDetectionTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Object detection API.
|
||||
|
||||
@@ -30,6 +35,7 @@ class EdenAiObjectDetectionTool(EdenaiTool):
|
||||
(with bounding boxes) objects in an image """
|
||||
"Input should be the string url of the image to identify."
|
||||
)
|
||||
args_schema: Type[BaseModel] = ObjectDetectionInput
|
||||
|
||||
show_positions: bool = False
|
||||
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, HttpUrl
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IDParsingInput(BaseModel):
|
||||
query: HttpUrl = Field(description="url of the document to parse")
|
||||
|
||||
|
||||
class EdenAiParsingIDTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Identity parsing API.
|
||||
|
||||
@@ -29,6 +34,7 @@ class EdenAiParsingIDTool(EdenaiTool):
|
||||
"Useful for when you have to extract information from an ID Document "
|
||||
"Input should be the string url of the document to parse."
|
||||
)
|
||||
args_schema: Type[BaseModel] = IDParsingInput
|
||||
|
||||
feature: str = "ocr"
|
||||
subfeature: str = "identity_parser"
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, HttpUrl
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InvoiceParsingInput(BaseModel):
|
||||
query: HttpUrl = Field(description="url of the document to parse")
|
||||
|
||||
|
||||
class EdenAiParsingInvoiceTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Invoice parsing API.
|
||||
|
||||
@@ -23,7 +28,6 @@ class EdenAiParsingInvoiceTool(EdenaiTool):
|
||||
"""
|
||||
|
||||
name: str = "edenai_invoice_parsing"
|
||||
|
||||
description: str = (
|
||||
"A wrapper around edenai Services invoice parsing. "
|
||||
"""Useful for when you have to extract information from
|
||||
@@ -33,6 +37,7 @@ class EdenAiParsingInvoiceTool(EdenaiTool):
|
||||
in a structured format to automate the invoice processing """
|
||||
"Input should be the string url of the document to parse."
|
||||
)
|
||||
args_schema: Type[BaseModel] = InvoiceParsingInput
|
||||
|
||||
language: Optional[str] = None
|
||||
"""
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TextModerationInput(BaseModel):
|
||||
query: str = Field(description="Text to moderate")
|
||||
|
||||
|
||||
class EdenAiTextModerationTool(EdenaiTool):
|
||||
"""Tool that queries the Eden AI Explicit text detection.
|
||||
|
||||
@@ -23,7 +28,6 @@ class EdenAiTextModerationTool(EdenaiTool):
|
||||
"""
|
||||
|
||||
name: str = "edenai_explicit_content_detection_text"
|
||||
|
||||
description: str = (
|
||||
"A wrapper around edenai Services explicit content detection for text. "
|
||||
"""Useful for when you have to scan text for offensive,
|
||||
@@ -44,6 +48,7 @@ class EdenAiTextModerationTool(EdenaiTool):
|
||||
"""
|
||||
"Input should be a string."
|
||||
)
|
||||
args_schema: Type[BaseModel] = TextModerationInput
|
||||
|
||||
language: str
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ class Requests(BaseModel):
|
||||
url,
|
||||
headers=self.headers,
|
||||
auth=self.auth,
|
||||
verify=self.verify,
|
||||
verify_ssl=self.verify,
|
||||
**kwargs,
|
||||
) as response:
|
||||
yield response
|
||||
@@ -94,7 +94,7 @@ class Requests(BaseModel):
|
||||
url,
|
||||
headers=self.headers,
|
||||
auth=self.auth,
|
||||
verify=self.verify,
|
||||
verify_ssl=self.verify,
|
||||
**kwargs,
|
||||
) as response:
|
||||
yield response
|
||||
|
||||
@@ -306,6 +306,27 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
}
|
||||
return command
|
||||
|
||||
def create_filter_index(
|
||||
self,
|
||||
property_to_filter: str,
|
||||
index_name: str,
|
||||
) -> dict[str, Any]:
|
||||
command = {
|
||||
"createIndexes": self._collection.name,
|
||||
"indexes": [
|
||||
{
|
||||
"key": {property_to_filter: 1},
|
||||
"name": index_name,
|
||||
}
|
||||
],
|
||||
}
|
||||
# retrieve the database object
|
||||
current_database = self._collection.database
|
||||
|
||||
# invoke the command from the database object
|
||||
create_index_responses: dict[str, Any] = current_database.command(command)
|
||||
return create_index_responses
|
||||
|
||||
def add_texts(
|
||||
self,
|
||||
texts: Iterable[str],
|
||||
@@ -345,7 +366,7 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
# Embed and create the documents
|
||||
embeddings = self._embedding.embed_documents(texts)
|
||||
to_insert = [
|
||||
{self._text_key: t, self._embedding_key: embedding, **m}
|
||||
{self._text_key: t, self._embedding_key: embedding, "metadata": m}
|
||||
for t, m, embedding in zip(texts, metadatas, embeddings)
|
||||
]
|
||||
# insert the documents in Cosmos DB
|
||||
@@ -397,8 +418,10 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
embeddings: List[float],
|
||||
k: int = 4,
|
||||
kind: CosmosDBVectorSearchType = CosmosDBVectorSearchType.VECTOR_IVF,
|
||||
pre_filter: Optional[Dict] = None,
|
||||
ef_search: int = 40,
|
||||
score_threshold: float = 0.0,
|
||||
with_embedding: bool = False,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
"""Returns a list of documents with their scores
|
||||
|
||||
@@ -422,9 +445,11 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
"""
|
||||
pipeline: List[dict[str, Any]] = []
|
||||
if kind == CosmosDBVectorSearchType.VECTOR_IVF:
|
||||
pipeline = self._get_pipeline_vector_ivf(embeddings, k)
|
||||
pipeline = self._get_pipeline_vector_ivf(embeddings, k, pre_filter)
|
||||
elif kind == CosmosDBVectorSearchType.VECTOR_HNSW:
|
||||
pipeline = self._get_pipeline_vector_hnsw(embeddings, k, ef_search)
|
||||
pipeline = self._get_pipeline_vector_hnsw(
|
||||
embeddings, k, ef_search, pre_filter
|
||||
)
|
||||
|
||||
cursor = self._collection.aggregate(pipeline)
|
||||
|
||||
@@ -433,28 +458,32 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
score = res.pop("similarityScore")
|
||||
if score < score_threshold:
|
||||
continue
|
||||
document_object_field = (
|
||||
res.pop("document")
|
||||
if kind == CosmosDBVectorSearchType.VECTOR_IVF
|
||||
else res
|
||||
)
|
||||
document_object_field = res.pop("document")
|
||||
text = document_object_field.pop(self._text_key)
|
||||
docs.append(
|
||||
(Document(page_content=text, metadata=document_object_field), score)
|
||||
)
|
||||
metadata = document_object_field.pop("metadata")
|
||||
if with_embedding:
|
||||
metadata[self._embedding_key] = document_object_field.pop(
|
||||
self._embedding_key
|
||||
)
|
||||
|
||||
docs.append((Document(page_content=text, metadata=metadata), score))
|
||||
return docs
|
||||
|
||||
def _get_pipeline_vector_ivf(
|
||||
self, embeddings: List[float], k: int = 4
|
||||
self, embeddings: List[float], k: int = 4, pre_filter: Optional[Dict] = None
|
||||
) -> List[dict[str, Any]]:
|
||||
params = {
|
||||
"vector": embeddings,
|
||||
"path": self._embedding_key,
|
||||
"k": k,
|
||||
}
|
||||
if pre_filter:
|
||||
params["filter"] = pre_filter
|
||||
|
||||
pipeline: List[dict[str, Any]] = [
|
||||
{
|
||||
"$search": {
|
||||
"cosmosSearch": {
|
||||
"vector": embeddings,
|
||||
"path": self._embedding_key,
|
||||
"k": k,
|
||||
},
|
||||
"cosmosSearch": params,
|
||||
"returnStoredSource": True,
|
||||
}
|
||||
},
|
||||
@@ -468,17 +497,25 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
return pipeline
|
||||
|
||||
def _get_pipeline_vector_hnsw(
|
||||
self, embeddings: List[float], k: int = 4, ef_search: int = 40
|
||||
self,
|
||||
embeddings: List[float],
|
||||
k: int = 4,
|
||||
ef_search: int = 40,
|
||||
pre_filter: Optional[Dict] = None,
|
||||
) -> List[dict[str, Any]]:
|
||||
params = {
|
||||
"vector": embeddings,
|
||||
"path": self._embedding_key,
|
||||
"k": k,
|
||||
"efSearch": ef_search,
|
||||
}
|
||||
if pre_filter:
|
||||
params["filter"] = pre_filter
|
||||
|
||||
pipeline: List[dict[str, Any]] = [
|
||||
{
|
||||
"$search": {
|
||||
"cosmosSearch": {
|
||||
"vector": embeddings,
|
||||
"path": self._embedding_key,
|
||||
"k": k,
|
||||
"efSearch": ef_search,
|
||||
},
|
||||
"cosmosSearch": params,
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -495,16 +532,20 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
query: str,
|
||||
k: int = 4,
|
||||
kind: CosmosDBVectorSearchType = CosmosDBVectorSearchType.VECTOR_IVF,
|
||||
pre_filter: Optional[Dict] = None,
|
||||
ef_search: int = 40,
|
||||
score_threshold: float = 0.0,
|
||||
with_embedding: bool = False,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
embeddings = self._embedding.embed_query(query)
|
||||
docs = self._similarity_search_with_score(
|
||||
embeddings=embeddings,
|
||||
k=k,
|
||||
kind=kind,
|
||||
pre_filter=pre_filter,
|
||||
ef_search=ef_search,
|
||||
score_threshold=score_threshold,
|
||||
with_embedding=with_embedding,
|
||||
)
|
||||
return docs
|
||||
|
||||
@@ -513,16 +554,20 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
query: str,
|
||||
k: int = 4,
|
||||
kind: CosmosDBVectorSearchType = CosmosDBVectorSearchType.VECTOR_IVF,
|
||||
pre_filter: Optional[Dict] = None,
|
||||
ef_search: int = 40,
|
||||
score_threshold: float = 0.0,
|
||||
with_embedding: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
docs_and_scores = self.similarity_search_with_score(
|
||||
query,
|
||||
k=k,
|
||||
kind=kind,
|
||||
pre_filter=pre_filter,
|
||||
ef_search=ef_search,
|
||||
score_threshold=score_threshold,
|
||||
with_embedding=with_embedding,
|
||||
)
|
||||
return [doc for doc, _ in docs_and_scores]
|
||||
|
||||
@@ -533,8 +578,10 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
kind: CosmosDBVectorSearchType = CosmosDBVectorSearchType.VECTOR_IVF,
|
||||
pre_filter: Optional[Dict] = None,
|
||||
ef_search: int = 40,
|
||||
score_threshold: float = 0.0,
|
||||
with_embedding: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
# Retrieves the docs with similarity scores
|
||||
@@ -543,8 +590,10 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
embedding,
|
||||
k=fetch_k,
|
||||
kind=kind,
|
||||
pre_filter=pre_filter,
|
||||
ef_search=ef_search,
|
||||
score_threshold=score_threshold,
|
||||
with_embedding=with_embedding,
|
||||
)
|
||||
|
||||
# Re-ranks the docs using MMR
|
||||
@@ -564,8 +613,10 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
kind: CosmosDBVectorSearchType = CosmosDBVectorSearchType.VECTOR_IVF,
|
||||
pre_filter: Optional[Dict] = None,
|
||||
ef_search: int = 40,
|
||||
score_threshold: float = 0.0,
|
||||
with_embedding: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
# compute the embeddings vector from the query string
|
||||
@@ -577,8 +628,10 @@ class AzureCosmosDBVectorSearch(VectorStore):
|
||||
fetch_k=fetch_k,
|
||||
lambda_mult=lambda_mult,
|
||||
kind=kind,
|
||||
pre_filter=pre_filter,
|
||||
ef_search=ef_search,
|
||||
score_threshold=score_threshold,
|
||||
with_embedding=with_embedding,
|
||||
)
|
||||
return docs
|
||||
|
||||
|
||||
@@ -162,7 +162,12 @@ class AzureCosmosDBNoSqlVectorSearch(VectorStore):
|
||||
text_key = "text"
|
||||
|
||||
to_insert = [
|
||||
{"id": str(uuid.uuid4()), text_key: t, self._embedding_key: embedding, **m}
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
text_key: t,
|
||||
self._embedding_key: embedding,
|
||||
"metadata": m,
|
||||
}
|
||||
for t, m, embedding in zip(texts, metadatas, embeddings)
|
||||
]
|
||||
# insert the documents in CosmosDB No Sql
|
||||
@@ -184,6 +189,7 @@ class AzureCosmosDBNoSqlVectorSearch(VectorStore):
|
||||
cosmos_database_properties: Dict[str, Any],
|
||||
database_name: str = "vectorSearchDB",
|
||||
container_name: str = "vectorSearchContainer",
|
||||
create_container: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> AzureCosmosDBNoSqlVectorSearch:
|
||||
if kwargs:
|
||||
@@ -204,6 +210,7 @@ class AzureCosmosDBNoSqlVectorSearch(VectorStore):
|
||||
cosmos_database_properties=cosmos_database_properties,
|
||||
database_name=database_name,
|
||||
container_name=container_name,
|
||||
create_container=create_container,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -257,41 +264,83 @@ class AzureCosmosDBNoSqlVectorSearch(VectorStore):
|
||||
self,
|
||||
embeddings: List[float],
|
||||
k: int = 4,
|
||||
pre_filter: Optional[Dict] = None,
|
||||
with_embedding: bool = False,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
query = (
|
||||
"SELECT TOP {} c.id, c.{}, c.text, VectorDistance(c.{}, {}) AS "
|
||||
"SimilarityScore FROM c ORDER BY VectorDistance(c.{}, {})".format(
|
||||
k,
|
||||
self._embedding_key,
|
||||
self._embedding_key,
|
||||
embeddings,
|
||||
self._embedding_key,
|
||||
embeddings,
|
||||
)
|
||||
query = "SELECT "
|
||||
|
||||
# If limit_offset_clause is not specified, add TOP clause
|
||||
if pre_filter is None or pre_filter.get("limit_offset_clause") is None:
|
||||
query += "TOP @limit "
|
||||
|
||||
query += (
|
||||
"c.id, c.{}, c.text, c.metadata, "
|
||||
"VectorDistance(c.@embeddingKey, @embeddings) AS SimilarityScore FROM c"
|
||||
)
|
||||
|
||||
# Add where_clause if specified
|
||||
if pre_filter is not None and pre_filter.get("where_clause") is not None:
|
||||
query += " {}".format(pre_filter["where_clause"])
|
||||
|
||||
query += " ORDER BY VectorDistance(c.@embeddingKey, @embeddings)"
|
||||
|
||||
# Add limit_offset_clause if specified
|
||||
if pre_filter is not None and pre_filter.get("limit_offset_clause") is not None:
|
||||
query += " {}".format(pre_filter["limit_offset_clause"])
|
||||
parameters = [
|
||||
{"name": "@limit", "value": k},
|
||||
{"name": "@embeddingKey", "value": self._embedding_key},
|
||||
{"name": "@embeddings", "value": embeddings},
|
||||
]
|
||||
|
||||
docs_and_scores = []
|
||||
|
||||
items = list(
|
||||
self._container.query_items(query=query, enable_cross_partition_query=True)
|
||||
self._container.query_items(
|
||||
query=query, parameters=parameters, enable_cross_partition_query=True
|
||||
)
|
||||
)
|
||||
for item in items:
|
||||
text = item["text"]
|
||||
metadata = item["metadata"]
|
||||
score = item["SimilarityScore"]
|
||||
docs_and_scores.append((Document(page_content=text, metadata=item), score))
|
||||
if with_embedding:
|
||||
metadata[self._embedding_key] = item[self._embedding_key]
|
||||
docs_and_scores.append(
|
||||
(Document(page_content=text, metadata=metadata), score)
|
||||
)
|
||||
return docs_and_scores
|
||||
|
||||
def similarity_search_with_score(
|
||||
self,
|
||||
query: str,
|
||||
k: int = 4,
|
||||
pre_filter: Optional[Dict] = None,
|
||||
with_embedding: bool = False,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
embeddings = self._embedding.embed_query(query)
|
||||
docs_and_scores = self._similarity_search_with_score(embeddings=embeddings, k=k)
|
||||
docs_and_scores = self._similarity_search_with_score(
|
||||
embeddings=embeddings,
|
||||
k=k,
|
||||
pre_filter=pre_filter,
|
||||
with_embedding=with_embedding,
|
||||
)
|
||||
return docs_and_scores
|
||||
|
||||
def similarity_search(
|
||||
self, query: str, k: int = 4, **kwargs: Any
|
||||
self,
|
||||
query: str,
|
||||
k: int = 4,
|
||||
pre_filter: Optional[Dict] = None,
|
||||
with_embedding: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
docs_and_scores = self.similarity_search_with_score(query, k=k)
|
||||
docs_and_scores = self.similarity_search_with_score(
|
||||
query,
|
||||
k=k,
|
||||
pre_filter=pre_filter,
|
||||
with_embedding=with_embedding,
|
||||
)
|
||||
|
||||
return [doc for doc, _ in docs_and_scores]
|
||||
|
||||
@@ -304,7 +353,18 @@ class AzureCosmosDBNoSqlVectorSearch(VectorStore):
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
# Retrieves the docs with similarity scores
|
||||
docs = self._similarity_search_with_score(embeddings=embedding, k=fetch_k)
|
||||
pre_filter = {}
|
||||
with_embedding = False
|
||||
if kwargs["pre_filter"]:
|
||||
pre_filter = kwargs["pre_filter"]
|
||||
if kwargs["with_embedding"]:
|
||||
with_embedding = kwargs["with_embedding"]
|
||||
docs = self._similarity_search_with_score(
|
||||
embeddings=embedding,
|
||||
k=fetch_k,
|
||||
pre_filter=pre_filter,
|
||||
with_embedding=with_embedding,
|
||||
)
|
||||
|
||||
# Re-ranks the docs using MMR
|
||||
mmr_doc_indexes = maximal_marginal_relevance(
|
||||
@@ -326,6 +386,12 @@ class AzureCosmosDBNoSqlVectorSearch(VectorStore):
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
# compute the embeddings vector from the query string
|
||||
pre_filter = {}
|
||||
with_embedding = False
|
||||
if kwargs["pre_filter"]:
|
||||
pre_filter = kwargs["pre_filter"]
|
||||
if kwargs["with_embedding"]:
|
||||
with_embedding = kwargs["with_embedding"]
|
||||
embeddings = self._embedding.embed_query(query)
|
||||
|
||||
docs = self.max_marginal_relevance_search_by_vector(
|
||||
@@ -333,5 +399,7 @@ class AzureCosmosDBNoSqlVectorSearch(VectorStore):
|
||||
k=k,
|
||||
fetch_k=fetch_k,
|
||||
lambda_mult=lambda_mult,
|
||||
pre_filter=pre_filter,
|
||||
with_embedding=with_embedding,
|
||||
)
|
||||
return docs
|
||||
|
||||
8
libs/community/poetry.lock
generated
8
libs/community/poetry.lock
generated
@@ -2117,7 +2117,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "0.2.9"
|
||||
version = "0.2.10"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -2127,7 +2127,7 @@ develop = true
|
||||
[package.dependencies]
|
||||
aiohttp = "^3.8.3"
|
||||
async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""}
|
||||
langchain-core = "^0.2.20"
|
||||
langchain-core = "^0.2.22"
|
||||
langchain-text-splitters = "^0.2.0"
|
||||
langsmith = "^0.1.17"
|
||||
numpy = [
|
||||
@@ -2146,7 +2146,7 @@ url = "../langchain"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.22"
|
||||
version = "0.2.23"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -5759,4 +5759,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools",
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "7755e46a5239dc88c52f49ac337d573090728fc8a3b1f3465fcd4a9f18c5aaec"
|
||||
content-hash = "14d60e1f61fa9c0ba69cb4e227e4af3de395a8dd4a53b121fe488e7b9f75ea66"
|
||||
|
||||
@@ -4,17 +4,13 @@ build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "langchain-community"
|
||||
version = "0.2.9"
|
||||
version = "0.2.10"
|
||||
description = "Community contributed LangChain integrations."
|
||||
authors = []
|
||||
license = "MIT"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/langchain-ai/langchain"
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/community"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-community%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.ruff]
|
||||
exclude = [ "tests/examples/non-utf8-encoding.py", "tests/integration_tests/examples/non-utf8-encoding.py",]
|
||||
|
||||
@@ -28,9 +24,13 @@ skip = ".git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package
|
||||
ignore-regex = ".*(Stati Uniti|Tense=Pres).*"
|
||||
ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin,cann"
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/community"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-community%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
langchain-core = "^0.2.22"
|
||||
langchain-core = "^0.2.23"
|
||||
langchain = "^0.2.9"
|
||||
SQLAlchemy = ">=1.4,<3"
|
||||
requests = "^2"
|
||||
|
||||
@@ -13,6 +13,30 @@ fi
|
||||
|
||||
repository_path="$1"
|
||||
|
||||
# Check that we are not using features that cannot be captured via init.
|
||||
# pre-init is a custom decorator that we introduced to capture the same semantics
|
||||
# as @root_validator(pre=False, skip_on_failure=False) available in pydantic 1.
|
||||
count=$(git grep -E '(@root_validator)|(@validator)|(@pre_init)' -- "*.py" | wc -l)
|
||||
# PRs that increase the current count will not be accepted.
|
||||
# PRs that decrease update the code in the repository
|
||||
# and allow decreasing the count of are welcome!
|
||||
current_count=336
|
||||
|
||||
if [ "$count" -gt "$current_count" ]; then
|
||||
echo "The PR seems to be introducing new usage of @root_validator and/or @field_validator."
|
||||
echo "git grep -E '(@root_validator)|(@validator)' | wc -l returned $count"
|
||||
echo "whereas the expected count should be equal or less than $current_count"
|
||||
echo "Please update the code to instead use __init__"
|
||||
echo "For examples, please see: "
|
||||
echo "https://gist.github.com/eyurtsev/d1dcba10c2f35626e302f1b98a0f5a3c "
|
||||
echo "This linter is here to make sure that its easier to upgrade pydantic in the future."
|
||||
exit 1
|
||||
elif [ "$count" -lt "$current_count" ]; then
|
||||
echo "Please update the $current_count variable in ./scripts/check_pydantic.sh to $count"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Search for lines matching the pattern within the specified repository
|
||||
result=$(git -C "$repository_path" grep -En '^import pydantic|^from pydantic')
|
||||
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
import yaml
|
||||
from langchain_core.messages import AIMessage
|
||||
|
||||
from langchain_community.chains.openapi.chain import create_openapi_endpoint_chain
|
||||
|
||||
|
||||
def _get_schema(response_json: Union[dict, list]) -> dict:
|
||||
if isinstance(response_json, list):
|
||||
response_json = response_json[0] if response_json else {}
|
||||
return {key: type(value).__name__ for key, value in response_json.items()}
|
||||
|
||||
|
||||
def _get_api_spec() -> str:
|
||||
base_url = "https://jsonplaceholder.typicode.com"
|
||||
endpoints = [
|
||||
"/posts",
|
||||
"/comments",
|
||||
]
|
||||
common_query_parameters = [
|
||||
{
|
||||
"name": "_limit",
|
||||
"in": "query",
|
||||
"required": False,
|
||||
"schema": {"type": "integer", "example": 2},
|
||||
"description": "Limit the number of results",
|
||||
}
|
||||
]
|
||||
openapi_spec: Dict[str, Any] = {
|
||||
"openapi": "3.0.0",
|
||||
"info": {"title": "JSONPlaceholder API", "version": "1.0.0"},
|
||||
"servers": [{"url": base_url}],
|
||||
"paths": {},
|
||||
}
|
||||
# Iterate over the endpoints to construct the paths
|
||||
for endpoint in endpoints:
|
||||
response = requests.get(base_url + endpoint)
|
||||
if response.status_code == 200:
|
||||
schema = _get_schema(response.json())
|
||||
openapi_spec["paths"][endpoint] = {
|
||||
"get": {
|
||||
"summary": f"Get {endpoint[1:]}",
|
||||
"parameters": common_query_parameters,
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Successful response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {"type": "object", "properties": schema}
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
return yaml.dump(openapi_spec, sort_keys=False)
|
||||
|
||||
|
||||
@pytest.mark.requires("langchain_openai")
|
||||
def test_create_openapi_endpoint_chain() -> None:
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
api_spec = _get_api_spec()
|
||||
|
||||
chain = create_openapi_endpoint_chain(llm, api_spec, allow_dangerous_requests=True)
|
||||
|
||||
result = chain.invoke("What are the titles of the top two posts?")
|
||||
assert isinstance(result, AIMessage)
|
||||
assert "sunt aut facere" in result.content and "qui est esse" in result.content
|
||||
@@ -25,7 +25,7 @@ model_name = os.getenv("OPENAI_EMBEDDINGS_MODEL_NAME", "text-embedding-ada-002")
|
||||
INDEX_NAME = "langchain-test-index"
|
||||
INDEX_NAME_VECTOR_HNSW = "langchain-test-index-hnsw"
|
||||
NAMESPACE = "langchain_test_db.langchain_test_collection"
|
||||
CONNECTION_STRING: str = os.environ.get("MONGODB_VCORE_URI", "")
|
||||
CONNECTION_STRING: str = "mongodb+srv://akataria:Basket24ball@akataria-vector-search-testing.mongocluster.cosmos.azure.com/?tls=true&authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000"
|
||||
DB_NAME, COLLECTION_NAME = NAMESPACE.split(".")
|
||||
|
||||
num_lists = 3
|
||||
|
||||
@@ -104,6 +104,7 @@ class TestAzureCosmosDBNoSqlVectorSearch:
|
||||
),
|
||||
indexing_policy=get_vector_indexing_policy("flat"),
|
||||
cosmos_container_properties={"partition_key": partition_key},
|
||||
cosmos_database_properties={},
|
||||
)
|
||||
sleep(1) # waits for Cosmos DB to save contents to the collection
|
||||
|
||||
@@ -139,6 +140,7 @@ class TestAzureCosmosDBNoSqlVectorSearch:
|
||||
),
|
||||
indexing_policy=get_vector_indexing_policy("flat"),
|
||||
cosmos_container_properties={"partition_key": partition_key},
|
||||
cosmos_database_properties={},
|
||||
)
|
||||
sleep(1) # waits for Cosmos DB to save contents to the collection
|
||||
|
||||
@@ -154,3 +156,60 @@ class TestAzureCosmosDBNoSqlVectorSearch:
|
||||
assert output2
|
||||
assert output2[0].page_content != "Dogs are tough."
|
||||
safe_delete_database(cosmos_client)
|
||||
|
||||
def test_from_documents_cosine_distance_with_filtering(
|
||||
self,
|
||||
cosmos_client: Any,
|
||||
partition_key: Any,
|
||||
azure_openai_embeddings: OpenAIEmbeddings,
|
||||
) -> None:
|
||||
"""Test end to end construction and search."""
|
||||
documents = [
|
||||
Document(page_content="Dogs are tough.", metadata={"a": 1}),
|
||||
Document(page_content="Cats have fluff.", metadata={"a": 1}),
|
||||
Document(page_content="What is a sandwich?", metadata={"c": 1}),
|
||||
Document(page_content="That fence is purple.", metadata={"d": 1, "e": 2}),
|
||||
]
|
||||
|
||||
store = AzureCosmosDBNoSqlVectorSearch.from_documents(
|
||||
documents,
|
||||
azure_openai_embeddings,
|
||||
cosmos_client=cosmos_client,
|
||||
database_name=database_name,
|
||||
container_name=container_name,
|
||||
vector_embedding_policy=get_vector_embedding_policy(
|
||||
"cosine", "float32", 400
|
||||
),
|
||||
indexing_policy=get_vector_indexing_policy("flat"),
|
||||
cosmos_container_properties={"partition_key": partition_key},
|
||||
cosmos_database_properties={},
|
||||
)
|
||||
sleep(1) # waits for Cosmos DB to save contents to the collection
|
||||
|
||||
output = store.similarity_search("Dogs", k=4)
|
||||
assert len(output) == 4
|
||||
assert output[0].page_content == "Dogs are tough."
|
||||
assert output[0].metadata["a"] == 1
|
||||
|
||||
pre_filter = {
|
||||
"where_clause": "WHERE c.metadata.a=1",
|
||||
}
|
||||
output = store.similarity_search(
|
||||
"Dogs", k=4, pre_filter=pre_filter, with_embedding=True
|
||||
)
|
||||
|
||||
assert len(output) == 2
|
||||
assert output[0].page_content == "Dogs are tough."
|
||||
assert output[0].metadata["a"] == 1
|
||||
|
||||
pre_filter = {
|
||||
"where_clause": "WHERE c.metadata.a=1",
|
||||
"limit_offset_clause": "OFFSET 0 LIMIT 1",
|
||||
}
|
||||
|
||||
output = store.similarity_search("Dogs", k=4, pre_filter=pre_filter)
|
||||
|
||||
assert len(output) == 1
|
||||
assert output[0].page_content == "Dogs are tough."
|
||||
assert output[0].metadata["a"] == 1
|
||||
safe_delete_database(cosmos_client)
|
||||
|
||||
@@ -11,6 +11,7 @@ EXPECTED_ALL = [
|
||||
"ChatDatabricks",
|
||||
"ChatDeepInfra",
|
||||
"ChatEverlyAI",
|
||||
"ChatEdenAI",
|
||||
"ChatFireworks",
|
||||
"ChatFriendli",
|
||||
"ChatGooglePalm",
|
||||
|
||||
@@ -3,12 +3,16 @@
|
||||
from typing import cast
|
||||
|
||||
import pytest
|
||||
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
||||
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
|
||||
from langchain_core.pydantic_v1 import SecretStr
|
||||
from pytest import CaptureFixture
|
||||
|
||||
from langchain_community.chat_models import ChatPremAI
|
||||
from langchain_community.chat_models.premai import _messages_to_prompt_dict
|
||||
from langchain_community.chat_models.premai import (
|
||||
SINGLE_TOOL_PROMPT_TEMPLATE,
|
||||
TOOL_PROMPT_HEADER,
|
||||
_messages_to_prompt_dict,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.requires("premai")
|
||||
@@ -36,13 +40,20 @@ def test_messages_to_prompt_dict_with_valid_messages() -> None:
|
||||
AIMessage(content="AI message #1"),
|
||||
HumanMessage(content="User message #2"),
|
||||
AIMessage(content="AI message #2"),
|
||||
ToolMessage(content="Tool Message #1", tool_call_id="test_tool"),
|
||||
AIMessage(content="AI message #3"),
|
||||
]
|
||||
)
|
||||
expected_tool_message = SINGLE_TOOL_PROMPT_TEMPLATE.format(
|
||||
tool_id="test_tool", tool_response="Tool Message #1"
|
||||
)
|
||||
expected = [
|
||||
{"role": "user", "content": "User message #1"},
|
||||
{"role": "assistant", "content": "AI message #1"},
|
||||
{"role": "user", "content": "User message #2"},
|
||||
{"role": "assistant", "content": "AI message #2"},
|
||||
{"role": "assistant", "content": "AI message #3"},
|
||||
{"role": "user", "content": TOOL_PROMPT_HEADER + expected_tool_message},
|
||||
]
|
||||
|
||||
assert system_message == "System Prompt"
|
||||
|
||||
@@ -142,6 +142,7 @@ EXPECTED_ALL = [
|
||||
"S3DirectoryLoader",
|
||||
"S3FileLoader",
|
||||
"ScrapflyLoader",
|
||||
"ScrapingAntLoader",
|
||||
"SQLDatabaseLoader",
|
||||
"SRTLoader",
|
||||
"SeleniumURLLoader",
|
||||
|
||||
@@ -31,7 +31,7 @@ def test_pass_headers_if_provided(monkeypatch: MonkeyPatch) -> None:
|
||||
timeout=300,
|
||||
)
|
||||
|
||||
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
||||
def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def]
|
||||
assert url == "https://ollama-hostname:8000/api/generate"
|
||||
assert headers == {
|
||||
"Content-Type": "application/json",
|
||||
@@ -49,10 +49,35 @@ def test_pass_headers_if_provided(monkeypatch: MonkeyPatch) -> None:
|
||||
llm.invoke("Test prompt")
|
||||
|
||||
|
||||
def test_pass_auth_if_provided(monkeypatch: MonkeyPatch) -> None:
|
||||
llm = Ollama(
|
||||
base_url="https://ollama-hostname:8000",
|
||||
model="foo",
|
||||
auth=("Test-User", "Test-Password"),
|
||||
timeout=300,
|
||||
)
|
||||
|
||||
def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def]
|
||||
assert url == "https://ollama-hostname:8000/api/generate"
|
||||
assert headers == {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
assert json is not None
|
||||
assert stream is True
|
||||
assert timeout == 300
|
||||
assert auth == ("Test-User", "Test-Password")
|
||||
|
||||
return mock_response_stream()
|
||||
|
||||
monkeypatch.setattr(requests, "post", mock_post)
|
||||
|
||||
llm.invoke("Test prompt")
|
||||
|
||||
|
||||
def test_handle_if_headers_not_provided(monkeypatch: MonkeyPatch) -> None:
|
||||
llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300)
|
||||
|
||||
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
||||
def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def]
|
||||
assert url == "https://ollama-hostname:8000/api/generate"
|
||||
assert headers == {
|
||||
"Content-Type": "application/json",
|
||||
@@ -72,7 +97,7 @@ def test_handle_kwargs_top_level_parameters(monkeypatch: MonkeyPatch) -> None:
|
||||
"""Test that top level params are sent to the endpoint as top level params"""
|
||||
llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300)
|
||||
|
||||
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
||||
def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def]
|
||||
assert url == "https://ollama-hostname:8000/api/generate"
|
||||
assert headers == {
|
||||
"Content-Type": "application/json",
|
||||
@@ -120,7 +145,7 @@ def test_handle_kwargs_with_unknown_param(monkeypatch: MonkeyPatch) -> None:
|
||||
"""
|
||||
llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300)
|
||||
|
||||
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
||||
def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def]
|
||||
assert url == "https://ollama-hostname:8000/api/generate"
|
||||
assert headers == {
|
||||
"Content-Type": "application/json",
|
||||
@@ -169,7 +194,7 @@ def test_handle_kwargs_with_options(monkeypatch: MonkeyPatch) -> None:
|
||||
"""
|
||||
llm = Ollama(base_url="https://ollama-hostname:8000", model="foo", timeout=300)
|
||||
|
||||
def mock_post(url, headers, json, stream, timeout): # type: ignore[no-untyped-def]
|
||||
def mock_post(url, headers, json, stream, timeout, auth): # type: ignore[no-untyped-def]
|
||||
assert url == "https://ollama-hostname:8000/api/generate"
|
||||
assert headers == {
|
||||
"Content-Type": "application/json",
|
||||
|
||||
@@ -25,6 +25,7 @@ EXPECTED_ALL = [
|
||||
"LlamaIndexRetriever",
|
||||
"MetalRetriever",
|
||||
"MilvusRetriever",
|
||||
"NanoPQRetriever",
|
||||
"OutlineRetriever",
|
||||
"PineconeHybridSearchRetriever",
|
||||
"PubMedRetriever",
|
||||
|
||||
41
libs/community/tests/unit_tests/retrievers/test_nanopq.py
Normal file
41
libs/community/tests/unit_tests/retrievers/test_nanopq.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import pytest
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_community.embeddings import FakeEmbeddings
|
||||
from langchain_community.retrievers import NanoPQRetriever
|
||||
|
||||
|
||||
class TestNanoPQRetriever:
|
||||
@pytest.mark.requires("nanopq")
|
||||
def test_from_texts(self) -> None:
|
||||
input_texts = ["I have a pen.", "Do you have a pen?", "I have a bag."]
|
||||
pq_retriever = NanoPQRetriever.from_texts(
|
||||
texts=input_texts, embeddings=FakeEmbeddings(size=100)
|
||||
)
|
||||
assert len(pq_retriever.texts) == 3
|
||||
|
||||
@pytest.mark.requires("nanopq")
|
||||
def test_from_documents(self) -> None:
|
||||
input_docs = [
|
||||
Document(page_content="I have a pen.", metadata={"page": 1}),
|
||||
Document(page_content="Do you have a pen?", metadata={"page": 2}),
|
||||
Document(page_content="I have a bag.", metadata={"page": 3}),
|
||||
]
|
||||
pq_retriever = NanoPQRetriever.from_documents(
|
||||
documents=input_docs, embeddings=FakeEmbeddings(size=100)
|
||||
)
|
||||
assert pq_retriever.texts == [
|
||||
"I have a pen.",
|
||||
"Do you have a pen?",
|
||||
"I have a bag.",
|
||||
]
|
||||
assert pq_retriever.metadatas == [{"page": 1}, {"page": 2}, {"page": 3}]
|
||||
|
||||
@pytest.mark.requires("nanopq")
|
||||
def invalid_subspace_error(self) -> None:
|
||||
input_texts = ["I have a pen.", "Do you have a pen?", "I have a bag."]
|
||||
pq_retriever = NanoPQRetriever.from_texts(
|
||||
texts=input_texts, embeddings=FakeEmbeddings(size=43)
|
||||
)
|
||||
with pytest.raises(RuntimeError):
|
||||
pq_retriever.invoke("I have")
|
||||
@@ -43,6 +43,7 @@ from langchain_core.runnables.passthrough import (
|
||||
RunnablePassthrough,
|
||||
RunnablePick,
|
||||
)
|
||||
from langchain_core.runnables.rate_limiter import InMemoryRateLimiter
|
||||
from langchain_core.runnables.router import RouterInput, RouterRunnable
|
||||
from langchain_core.runnables.utils import (
|
||||
AddableDict,
|
||||
@@ -64,6 +65,7 @@ __all__ = [
|
||||
"ensure_config",
|
||||
"run_in_executor",
|
||||
"patch_config",
|
||||
"InMemoryRateLimiter",
|
||||
"RouterInput",
|
||||
"RouterRunnable",
|
||||
"Runnable",
|
||||
|
||||
@@ -3999,11 +3999,14 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
RunnableLambda can be composed as any other Runnable and provides
|
||||
seamless integration with LangChain tracing.
|
||||
|
||||
`RunnableLambda` is best suited for code that does not need to support
|
||||
``RunnableLambda`` is best suited for code that does not need to support
|
||||
streaming. If you need to support streaming (i.e., be able to operate
|
||||
on chunks of inputs and yield chunks of outputs), use `RunnableGenerator`
|
||||
on chunks of inputs and yield chunks of outputs), use ``RunnableGenerator``
|
||||
instead.
|
||||
|
||||
Note that if a ``RunnableLambda`` returns an instance of ``Runnable``, that
|
||||
instance is invoked (or streamed) during execution.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
319
libs/core/langchain_core/runnables/rate_limiter.py
Normal file
319
libs/core/langchain_core/runnables/rate_limiter.py
Normal file
@@ -0,0 +1,319 @@
|
||||
"""Interface and implementation for time based rate limiters.
|
||||
|
||||
This module defines an interface for rate limiting requests based on time.
|
||||
|
||||
The interface cannot account for the size of the request or any other factors.
|
||||
|
||||
The module also provides an in-memory implementation of the rate limiter.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import asyncio
|
||||
import threading
|
||||
import time
|
||||
from typing import (
|
||||
Any,
|
||||
Optional,
|
||||
cast,
|
||||
)
|
||||
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langchain_core.runnables.base import (
|
||||
Input,
|
||||
Output,
|
||||
Runnable,
|
||||
)
|
||||
|
||||
|
||||
@beta(message="Introduced in 0.2.24. API subject to change.")
|
||||
class BaseRateLimiter(Runnable[Input, Output], abc.ABC):
|
||||
"""Base class for rate limiters.
|
||||
|
||||
Usage of the base limiter is through the acquire and aacquire methods depending
|
||||
on whether running in a sync or async context.
|
||||
|
||||
Implementations are free to add a timeout parameter to their initialize method
|
||||
to allow users to specify a timeout for acquiring the necessary tokens when
|
||||
using a blocking call.
|
||||
|
||||
Current limitations:
|
||||
|
||||
- The rate limiter is not designed to work across different processes. It is
|
||||
an in-memory rate limiter, but it is thread safe.
|
||||
- The rate limiter only supports time-based rate limiting. It does not take
|
||||
into account the size of the request or any other factors.
|
||||
- The current implementation does not handle streaming inputs well and will
|
||||
consume all inputs even if the rate limit has not been reached. Better support
|
||||
for streaming inputs will be added in the future.
|
||||
- When the rate limiter is combined with another runnable via a RunnableSequence,
|
||||
usage of .batch() or .abatch() will only respect the average rate limit.
|
||||
There will be bursty behavior as .batch() and .abatch() wait for each step
|
||||
to complete before starting the next step. One way to mitigate this is to
|
||||
use batch_as_completed() or abatch_as_completed().
|
||||
|
||||
.. versionadded:: 0.2.24
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def acquire(self, *, blocking: bool = True) -> bool:
|
||||
"""Attempt to acquire the necessary tokens for the rate limiter.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If True, the method will block until the tokens are available.
|
||||
If False, the method will return immediately with the result of
|
||||
the attempt. Defaults to True.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def aacquire(self, *, blocking: bool = True) -> bool:
|
||||
"""Attempt to acquire the necessary tokens for the rate limiter.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If True, the method will block until the tokens are available.
|
||||
If False, the method will return immediately with the result of
|
||||
the attempt. Defaults to True.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
"""
|
||||
|
||||
def invoke(
|
||||
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
|
||||
) -> Output:
|
||||
"""Invoke the rate limiter.
|
||||
|
||||
This is a blocking call that waits until the given number of tokens are
|
||||
available.
|
||||
|
||||
Args:
|
||||
input: The input to the rate limiter.
|
||||
config: The configuration for the rate limiter.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
The output of the rate limiter.
|
||||
"""
|
||||
|
||||
def _invoke(input: Input) -> Output:
|
||||
"""Invoke the rate limiter. Internal function."""
|
||||
self.acquire(blocking=True)
|
||||
return cast(Output, input)
|
||||
|
||||
return self._call_with_config(_invoke, input, config, **kwargs)
|
||||
|
||||
async def ainvoke(
|
||||
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
|
||||
) -> Output:
|
||||
"""Invoke the rate limiter. Async version.
|
||||
|
||||
This is a blocking call that waits until the given number of tokens are
|
||||
available.
|
||||
|
||||
Args:
|
||||
input: The input to the rate limiter.
|
||||
config: The configuration for the rate limiter.
|
||||
**kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def _ainvoke(input: Input) -> Output:
|
||||
"""Invoke the rate limiter. Internal function."""
|
||||
await self.aacquire(blocking=True)
|
||||
return cast(Output, input)
|
||||
|
||||
return await self._acall_with_config(_ainvoke, input, config, **kwargs)
|
||||
|
||||
|
||||
@beta(message="Introduced in 0.2.24. API subject to change.")
|
||||
class InMemoryRateLimiter(BaseRateLimiter):
|
||||
"""An in memory rate limiter.
|
||||
|
||||
This is an in memory rate limiter, so it cannot rate limit across
|
||||
different processes.
|
||||
|
||||
The rate limiter only allows time-based rate limiting and does not
|
||||
take into account any information about the input or the output, so it
|
||||
cannot be used to rate limit based on the size of the request.
|
||||
|
||||
It is thread safe and can be used in either a sync or async context.
|
||||
|
||||
The in memory rate limiter is based on a token bucket. The bucket is filled
|
||||
with tokens at a given rate. Each request consumes a token. If there are
|
||||
not enough tokens in the bucket, the request is blocked until there are
|
||||
enough tokens.
|
||||
|
||||
These *tokens* have NOTHING to do with LLM tokens. They are just
|
||||
a way to keep track of how many requests can be made at a given time.
|
||||
|
||||
Current limitations:
|
||||
|
||||
- The rate limiter is not designed to work across different processes. It is
|
||||
an in-memory rate limiter, but it is thread safe.
|
||||
- The rate limiter only supports time-based rate limiting. It does not take
|
||||
into account the size of the request or any other factors.
|
||||
- The current implementation does not handle streaming inputs well and will
|
||||
consume all inputs even if the rate limit has not been reached. Better support
|
||||
for streaming inputs will be added in the future.
|
||||
- When the rate limiter is combined with another runnable via a RunnableSequence,
|
||||
usage of .batch() or .abatch() will only respect the average rate limit.
|
||||
There will be bursty behavior as .batch() and .abatch() wait for each step
|
||||
to complete before starting the next step. One way to mitigate this is to
|
||||
use batch_as_completed() or abatch_as_completed().
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.runnables import RunnableLambda, InMemoryRateLimiter
|
||||
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=100, check_every_n_seconds=0.1, max_bucket_size=10
|
||||
)
|
||||
|
||||
def foo(x: int) -> int:
|
||||
return x
|
||||
|
||||
foo_ = RunnableLambda(foo)
|
||||
chain = rate_limiter | foo_
|
||||
assert chain.invoke(1) == 1
|
||||
|
||||
.. versionadded:: 0.2.24
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
requests_per_second: float = 1,
|
||||
check_every_n_seconds: float = 0.1,
|
||||
max_bucket_size: float = 1,
|
||||
) -> None:
|
||||
"""A rate limiter based on a token bucket.
|
||||
|
||||
These *tokens* have NOTHING to do with LLM tokens. They are just
|
||||
a way to keep track of how many requests can be made at a given time.
|
||||
|
||||
This rate limiter is designed to work in a threaded environment.
|
||||
|
||||
It works by filling up a bucket with tokens at a given rate. Each
|
||||
request consumes a given number of tokens. If there are not enough
|
||||
tokens in the bucket, the request is blocked until there are enough
|
||||
tokens.
|
||||
|
||||
Args:
|
||||
requests_per_second: The number of tokens to add per second to the bucket.
|
||||
Must be at least 1. The tokens represent "credit" that can be used
|
||||
to make requests.
|
||||
check_every_n_seconds: check whether the tokens are available
|
||||
every this many seconds. Can be a float to represent
|
||||
fractions of a second.
|
||||
max_bucket_size: The maximum number of tokens that can be in the bucket.
|
||||
This is used to prevent bursts of requests.
|
||||
"""
|
||||
# Number of requests that we can make per second.
|
||||
self.requests_per_second = requests_per_second
|
||||
# Number of tokens in the bucket.
|
||||
self.available_tokens = 0.0
|
||||
self.max_bucket_size = max_bucket_size
|
||||
# A lock to ensure that tokens can only be consumed by one thread
|
||||
# at a given time.
|
||||
self._consume_lock = threading.Lock()
|
||||
# The last time we tried to consume tokens.
|
||||
self.last: Optional[float] = None
|
||||
self.check_every_n_seconds = check_every_n_seconds
|
||||
|
||||
def _consume(self) -> bool:
|
||||
"""Consume the given amount of tokens if possible.
|
||||
|
||||
Returns:
|
||||
True means that the tokens were consumed, and the caller can proceed to
|
||||
make the request. A False means that the tokens were not consumed, and
|
||||
the caller should try again later.
|
||||
"""
|
||||
with self._consume_lock:
|
||||
now = time.time()
|
||||
|
||||
# initialize on first call to avoid a burst
|
||||
if self.last is None:
|
||||
self.last = now
|
||||
|
||||
elapsed = now - self.last
|
||||
|
||||
if elapsed * self.requests_per_second >= 1:
|
||||
self.available_tokens += elapsed * self.requests_per_second
|
||||
self.last = now
|
||||
|
||||
# Make sure that we don't exceed the bucket size.
|
||||
# This is used to prevent bursts of requests.
|
||||
self.available_tokens = min(self.available_tokens, self.max_bucket_size)
|
||||
|
||||
# As long as we have at least one token, we can proceed.
|
||||
if self.available_tokens >= 1:
|
||||
self.available_tokens -= 1
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def acquire(self, *, blocking: bool = True) -> bool:
|
||||
"""Attempt to acquire a token from the rate limiter.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If True, the method will block until the tokens are available.
|
||||
If False, the method will return immediately with the result of
|
||||
the attempt. Defaults to True.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
|
||||
while not self._consume():
|
||||
time.sleep(self.check_every_n_seconds)
|
||||
return True
|
||||
|
||||
async def aacquire(self, *, blocking: bool = True) -> bool:
|
||||
"""Attempt to acquire a token from the rate limiter. Async version.
|
||||
|
||||
This method blocks until the required tokens are available if `blocking`
|
||||
is set to True.
|
||||
|
||||
If `blocking` is set to False, the method will immediately return the result
|
||||
of the attempt to acquire the tokens.
|
||||
|
||||
Args:
|
||||
blocking: If True, the method will block until the tokens are available.
|
||||
If False, the method will return immediately with the result of
|
||||
the attempt. Defaults to True.
|
||||
|
||||
Returns:
|
||||
True if the tokens were successfully acquired, False otherwise.
|
||||
"""
|
||||
if not blocking:
|
||||
return self._consume()
|
||||
|
||||
while not self._consume():
|
||||
await asyncio.sleep(self.check_every_n_seconds)
|
||||
return True
|
||||
@@ -20,6 +20,7 @@ tool for the job.
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import copy
|
||||
import functools
|
||||
import inspect
|
||||
import json
|
||||
@@ -1481,8 +1482,9 @@ def _prep_run_args(
|
||||
) -> Tuple[Union[str, Dict], Dict]:
|
||||
config = ensure_config(config)
|
||||
if _is_tool_call(input):
|
||||
tool_call_id: Optional[str] = cast(ToolCall, input)["id"]
|
||||
tool_input: Union[str, dict] = cast(ToolCall, input)["args"]
|
||||
input_copy = copy.deepcopy(input)
|
||||
tool_call_id: Optional[str] = cast(ToolCall, input_copy)["id"]
|
||||
tool_input: Union[str, dict] = cast(ToolCall, input_copy)["args"]
|
||||
else:
|
||||
tool_call_id = None
|
||||
tool_input = cast(Union[str, dict], input)
|
||||
|
||||
@@ -8,7 +8,6 @@ from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
@@ -74,6 +73,27 @@ class InMemoryVectorStore(VectorStore):
|
||||
"failed": [],
|
||||
}
|
||||
|
||||
async def aupsert(
|
||||
self, items: Sequence[Document], /, **kwargs: Any
|
||||
) -> UpsertResponse:
|
||||
vectors = await self.embedding.aembed_documents(
|
||||
[item.page_content for item in items]
|
||||
)
|
||||
ids = []
|
||||
for item, vector in zip(items, vectors):
|
||||
doc_id = item.id if item.id else str(uuid.uuid4())
|
||||
ids.append(doc_id)
|
||||
self.store[doc_id] = {
|
||||
"id": doc_id,
|
||||
"vector": vector,
|
||||
"text": item.page_content,
|
||||
"metadata": item.metadata,
|
||||
}
|
||||
return {
|
||||
"succeeded": ids,
|
||||
"failed": [],
|
||||
}
|
||||
|
||||
def get_by_ids(self, ids: Sequence[str], /) -> List[Document]:
|
||||
"""Get documents by their ids.
|
||||
|
||||
@@ -108,14 +128,6 @@ class InMemoryVectorStore(VectorStore):
|
||||
"""
|
||||
return self.get_by_ids(ids)
|
||||
|
||||
async def aadd_texts(
|
||||
self,
|
||||
texts: Iterable[str],
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[str]:
|
||||
return self.add_texts(texts, metadatas, **kwargs)
|
||||
|
||||
def _similarity_search_with_score_by_vector(
|
||||
self,
|
||||
embedding: List[float],
|
||||
@@ -172,7 +184,13 @@ class InMemoryVectorStore(VectorStore):
|
||||
async def asimilarity_search_with_score(
|
||||
self, query: str, k: int = 4, **kwargs: Any
|
||||
) -> List[Tuple[Document, float]]:
|
||||
return self.similarity_search_with_score(query, k, **kwargs)
|
||||
embedding = await self.embedding.aembed_query(query)
|
||||
docs = self.similarity_search_with_score_by_vector(
|
||||
embedding,
|
||||
k,
|
||||
**kwargs,
|
||||
)
|
||||
return docs
|
||||
|
||||
def similarity_search_by_vector(
|
||||
self,
|
||||
@@ -200,7 +218,10 @@ class InMemoryVectorStore(VectorStore):
|
||||
async def asimilarity_search(
|
||||
self, query: str, k: int = 4, **kwargs: Any
|
||||
) -> List[Document]:
|
||||
return self.similarity_search(query, k, **kwargs)
|
||||
return [
|
||||
doc
|
||||
for doc, _ in await self.asimilarity_search_with_score(query, k, **kwargs)
|
||||
]
|
||||
|
||||
def max_marginal_relevance_search_by_vector(
|
||||
self,
|
||||
@@ -249,6 +270,23 @@ class InMemoryVectorStore(VectorStore):
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
async def amax_marginal_relevance_search(
|
||||
self,
|
||||
query: str,
|
||||
k: int = 4,
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
embedding_vector = await self.embedding.aembed_query(query)
|
||||
return self.max_marginal_relevance_search_by_vector(
|
||||
embedding_vector,
|
||||
k,
|
||||
fetch_k,
|
||||
lambda_mult=lambda_mult,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_texts(
|
||||
cls,
|
||||
@@ -271,7 +309,11 @@ class InMemoryVectorStore(VectorStore):
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
**kwargs: Any,
|
||||
) -> "InMemoryVectorStore":
|
||||
return cls.from_texts(texts, embedding, metadatas, **kwargs)
|
||||
store = cls(
|
||||
embedding=embedding,
|
||||
)
|
||||
await store.aadd_texts(texts=texts, metadatas=metadatas, **kwargs)
|
||||
return store
|
||||
|
||||
@classmethod
|
||||
def load(
|
||||
|
||||
@@ -11,6 +11,7 @@ EXPECTED_ALL = [
|
||||
"run_in_executor",
|
||||
"patch_config",
|
||||
"RouterInput",
|
||||
"InMemoryRateLimiter",
|
||||
"RouterRunnable",
|
||||
"Runnable",
|
||||
"RunnableSerializable",
|
||||
|
||||
145
libs/core/tests/unit_tests/runnables/test_rate_limiter.py
Normal file
145
libs/core/tests/unit_tests/runnables/test_rate_limiter.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""Test rate limiter."""
|
||||
|
||||
import time
|
||||
|
||||
import pytest
|
||||
from freezegun import freeze_time
|
||||
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
from langchain_core.runnables.rate_limiter import InMemoryRateLimiter
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def rate_limiter() -> InMemoryRateLimiter:
|
||||
"""Return an instance of InMemoryRateLimiter."""
|
||||
return InMemoryRateLimiter(
|
||||
requests_per_second=2, check_every_n_seconds=0.1, max_bucket_size=2
|
||||
)
|
||||
|
||||
|
||||
def test_initial_state(rate_limiter: InMemoryRateLimiter) -> None:
|
||||
"""Test the initial state of the rate limiter."""
|
||||
assert rate_limiter.available_tokens == 0.0
|
||||
|
||||
|
||||
def test_sync_wait(rate_limiter: InMemoryRateLimiter) -> None:
|
||||
with freeze_time("2023-01-01 00:00:00") as frozen_time:
|
||||
rate_limiter.last = time.time()
|
||||
assert not rate_limiter.acquire(blocking=False)
|
||||
frozen_time.tick(0.1) # Increment by 0.1 seconds
|
||||
assert rate_limiter.available_tokens == 0
|
||||
assert not rate_limiter.acquire(blocking=False)
|
||||
frozen_time.tick(0.1) # Increment by 0.1 seconds
|
||||
assert rate_limiter.available_tokens == 0
|
||||
assert not rate_limiter.acquire(blocking=False)
|
||||
frozen_time.tick(1.8)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1.0
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 0
|
||||
frozen_time.tick(2.1)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
frozen_time.tick(0.9)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
|
||||
# Check max bucket size
|
||||
frozen_time.tick(100)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
|
||||
|
||||
async def test_async_wait(rate_limiter: InMemoryRateLimiter) -> None:
|
||||
with freeze_time("2023-01-01 00:00:00") as frozen_time:
|
||||
rate_limiter.last = time.time()
|
||||
assert not await rate_limiter.aacquire(blocking=False)
|
||||
frozen_time.tick(0.1) # Increment by 0.1 seconds
|
||||
assert rate_limiter.available_tokens == 0
|
||||
assert not await rate_limiter.aacquire(blocking=False)
|
||||
frozen_time.tick(0.1) # Increment by 0.1 seconds
|
||||
assert rate_limiter.available_tokens == 0
|
||||
assert not await rate_limiter.aacquire(blocking=False)
|
||||
frozen_time.tick(1.8)
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1.0
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 0
|
||||
frozen_time.tick(2.1)
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
frozen_time.tick(0.9)
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 1
|
||||
|
||||
|
||||
def test_sync_wait_max_bucket_size() -> None:
|
||||
with freeze_time("2023-01-01 00:00:00") as frozen_time:
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=2, check_every_n_seconds=0.1, max_bucket_size=500
|
||||
)
|
||||
rate_limiter.last = time.time()
|
||||
frozen_time.tick(100) # Increment by 100 seconds
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
# After 100 seconds we manage to refill the bucket with 200 tokens
|
||||
# After consuming 1 token, we should have 199 tokens left
|
||||
assert rate_limiter.available_tokens == 199.0
|
||||
frozen_time.tick(10000)
|
||||
assert rate_limiter.acquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 499.0
|
||||
# Assert that sync wait can proceed without blocking
|
||||
# since we have enough tokens
|
||||
rate_limiter.acquire(blocking=True)
|
||||
|
||||
|
||||
async def test_async_wait_max_bucket_size() -> None:
|
||||
with freeze_time("2023-01-01 00:00:00") as frozen_time:
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=2, check_every_n_seconds=0.1, max_bucket_size=500
|
||||
)
|
||||
rate_limiter.last = time.time()
|
||||
frozen_time.tick(100) # Increment by 100 seconds
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
# After 100 seconds we manage to refill the bucket with 200 tokens
|
||||
# After consuming 1 token, we should have 199 tokens left
|
||||
assert rate_limiter.available_tokens == 199.0
|
||||
frozen_time.tick(10000)
|
||||
assert await rate_limiter.aacquire(blocking=False)
|
||||
assert rate_limiter.available_tokens == 499.0
|
||||
# Assert that sync wait can proceed without blocking
|
||||
# since we have enough tokens
|
||||
await rate_limiter.aacquire(blocking=True)
|
||||
|
||||
|
||||
def test_add_rate_limiter() -> None:
|
||||
"""Add rate limiter."""
|
||||
|
||||
def foo(x: int) -> int:
|
||||
"""Return x."""
|
||||
return x
|
||||
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=100, check_every_n_seconds=0.1, max_bucket_size=10
|
||||
)
|
||||
|
||||
foo_ = RunnableLambda(foo)
|
||||
chain = rate_limiter | foo_
|
||||
assert chain.invoke(1) == 1
|
||||
|
||||
|
||||
async def test_async_add_rate_limiter() -> None:
|
||||
"""Add rate limiter."""
|
||||
|
||||
async def foo(x: int) -> int:
|
||||
"""Return x."""
|
||||
return x
|
||||
|
||||
rate_limiter = InMemoryRateLimiter(
|
||||
requests_per_second=100, check_every_n_seconds=0.1, max_bucket_size=10
|
||||
)
|
||||
|
||||
# mypy is unable to follow the type information when
|
||||
# RunnableLambda is used with an async function
|
||||
foo_ = RunnableLambda(foo) # type: ignore
|
||||
chain = rate_limiter | foo_
|
||||
assert (await chain.ainvoke(1)) == 1
|
||||
@@ -977,6 +977,16 @@ class AFooBase(FooBase):
|
||||
def test_tool_pass_config(tool: BaseTool) -> None:
|
||||
assert tool.invoke({"bar": "baz"}, {"configurable": {"foo": "not-bar"}}) == "baz"
|
||||
|
||||
# Test tool calls
|
||||
tool_call = {
|
||||
"name": tool.name,
|
||||
"args": {"bar": "baz"},
|
||||
"id": "abc123",
|
||||
"type": "tool_call",
|
||||
}
|
||||
_ = tool.invoke(tool_call, {"configurable": {"foo": "not-bar"}})
|
||||
assert tool_call["args"] == {"bar": "baz"}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"tool", [foo, afoo, simple_foo, asimple_foo, FooBase(), AFooBase()]
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
import pytest
|
||||
from langchain_standard_tests.integration_tests.vectorstores import (
|
||||
@@ -24,25 +25,39 @@ class TestAsyncInMemoryReadWriteTestSuite(AsyncReadWriteTestSuite):
|
||||
return InMemoryVectorStore(embedding=self.get_embeddings())
|
||||
|
||||
|
||||
async def test_inmemory() -> None:
|
||||
"""Test end to end construction and search."""
|
||||
async def test_inmemory_similarity_search() -> None:
|
||||
"""Test end to end similarity search."""
|
||||
store = await InMemoryVectorStore.afrom_texts(
|
||||
["foo", "bar", "baz"], DeterministicFakeEmbedding(size=6)
|
||||
["foo", "bar", "baz"], DeterministicFakeEmbedding(size=3)
|
||||
)
|
||||
output = await store.asimilarity_search("foo", k=1)
|
||||
|
||||
# Check sync version
|
||||
output = store.similarity_search("foo", k=1)
|
||||
assert output == [Document(page_content="foo", id=AnyStr())]
|
||||
|
||||
# Check async version
|
||||
output = await store.asimilarity_search("bar", k=2)
|
||||
assert output == [
|
||||
Document(page_content="bar", id=AnyStr()),
|
||||
Document(page_content="baz", id=AnyStr()),
|
||||
]
|
||||
|
||||
output2 = await store.asimilarity_search_with_score("bar", k=2)
|
||||
assert output2[0][1] > output2[1][1]
|
||||
|
||||
async def test_inmemory_similarity_search_with_score() -> None:
|
||||
"""Test end to end similarity search with score"""
|
||||
store = await InMemoryVectorStore.afrom_texts(
|
||||
["foo", "bar", "baz"], DeterministicFakeEmbedding(size=3)
|
||||
)
|
||||
|
||||
output = store.similarity_search_with_score("foo", k=1)
|
||||
assert output[0][0].page_content == "foo"
|
||||
|
||||
output = await store.asimilarity_search_with_score("bar", k=2)
|
||||
assert output[0][1] > output[1][1]
|
||||
|
||||
|
||||
async def test_add_by_ids() -> None:
|
||||
"""Test add texts with ids."""
|
||||
vectorstore = InMemoryVectorStore(embedding=DeterministicFakeEmbedding(size=6))
|
||||
|
||||
# Check sync version
|
||||
@@ -50,17 +65,25 @@ async def test_add_by_ids() -> None:
|
||||
assert ids1 == ["1", "2", "3"]
|
||||
assert sorted(vectorstore.store.keys()) == ["1", "2", "3"]
|
||||
|
||||
# Check async version
|
||||
ids2 = await vectorstore.aadd_texts(["foo", "bar", "baz"], ids=["4", "5", "6"])
|
||||
assert ids2 == ["4", "5", "6"]
|
||||
assert sorted(vectorstore.store.keys()) == ["1", "2", "3", "4", "5", "6"]
|
||||
|
||||
|
||||
async def test_inmemory_mmr() -> None:
|
||||
"""Test MMR search"""
|
||||
texts = ["foo", "foo", "fou", "foy"]
|
||||
docsearch = await InMemoryVectorStore.afrom_texts(
|
||||
texts, DeterministicFakeEmbedding(size=6)
|
||||
)
|
||||
# make sure we can k > docstore size
|
||||
output = docsearch.max_marginal_relevance_search("foo", k=10, lambda_mult=0.1)
|
||||
assert len(output) == len(texts)
|
||||
assert output[0] == Document(page_content="foo", id=AnyStr())
|
||||
assert output[1] == Document(page_content="foy", id=AnyStr())
|
||||
|
||||
# Check async version
|
||||
output = await docsearch.amax_marginal_relevance_search(
|
||||
"foo", k=10, lambda_mult=0.1
|
||||
)
|
||||
@@ -85,13 +108,91 @@ async def test_inmemory_dump_load(tmp_path: Path) -> None:
|
||||
|
||||
|
||||
async def test_inmemory_filter() -> None:
|
||||
"""Test end to end construction and search."""
|
||||
"""Test end to end construction and search with filter."""
|
||||
store = await InMemoryVectorStore.afrom_texts(
|
||||
["foo", "bar"],
|
||||
DeterministicFakeEmbedding(size=6),
|
||||
[{"id": 1}, {"id": 2}],
|
||||
)
|
||||
output = await store.asimilarity_search(
|
||||
"baz", filter=lambda doc: doc.metadata["id"] == 1
|
||||
)
|
||||
|
||||
# Check sync version
|
||||
output = store.similarity_search("fee", filter=lambda doc: doc.metadata["id"] == 1)
|
||||
assert output == [Document(page_content="foo", metadata={"id": 1}, id=AnyStr())]
|
||||
|
||||
# filter with not stored document id
|
||||
output = await store.asimilarity_search(
|
||||
"baz", filter=lambda doc: doc.metadata["id"] == 3
|
||||
)
|
||||
assert output == []
|
||||
|
||||
|
||||
async def test_inmemory_upsert() -> None:
|
||||
"""Test upsert documents."""
|
||||
embedding = DeterministicFakeEmbedding(size=2)
|
||||
store = InMemoryVectorStore(embedding=embedding)
|
||||
|
||||
# Check sync version
|
||||
store.upsert([Document(page_content="foo", id="1")])
|
||||
assert sorted(store.store.keys()) == ["1"]
|
||||
|
||||
# Check async version
|
||||
await store.aupsert([Document(page_content="bar", id="2")])
|
||||
assert sorted(store.store.keys()) == ["1", "2"]
|
||||
|
||||
# update existing document
|
||||
await store.aupsert(
|
||||
[Document(page_content="baz", id="2", metadata={"metadata": "value"})]
|
||||
)
|
||||
item = store.store["2"]
|
||||
|
||||
baz_vector = embedding.embed_query("baz")
|
||||
assert item == {
|
||||
"id": "2",
|
||||
"text": "baz",
|
||||
"vector": baz_vector,
|
||||
"metadata": {"metadata": "value"},
|
||||
}
|
||||
|
||||
|
||||
async def test_inmemory_get_by_ids() -> None:
|
||||
"""Test get by ids."""
|
||||
|
||||
store = InMemoryVectorStore(embedding=DeterministicFakeEmbedding(size=3))
|
||||
|
||||
store.upsert(
|
||||
[
|
||||
Document(page_content="foo", id="1", metadata={"metadata": "value"}),
|
||||
Document(page_content="bar", id="2"),
|
||||
Document(page_content="baz", id="3"),
|
||||
],
|
||||
)
|
||||
|
||||
# Check sync version
|
||||
output = store.get_by_ids(["1", "2"])
|
||||
assert output == [
|
||||
Document(page_content="foo", id="1", metadata={"metadata": "value"}),
|
||||
Document(page_content="bar", id="2"),
|
||||
]
|
||||
|
||||
# Check async version
|
||||
output = await store.aget_by_ids(["1", "3", "5"])
|
||||
assert output == [
|
||||
Document(page_content="foo", id="1", metadata={"metadata": "value"}),
|
||||
Document(page_content="baz", id="3"),
|
||||
]
|
||||
|
||||
|
||||
async def test_inmemory_call_embeddings_async() -> None:
|
||||
embeddings_mock = Mock(
|
||||
wraps=DeterministicFakeEmbedding(size=3),
|
||||
aembed_documents=AsyncMock(),
|
||||
aembed_query=AsyncMock(),
|
||||
)
|
||||
store = InMemoryVectorStore(embedding=embeddings_mock)
|
||||
|
||||
await store.aadd_texts("foo")
|
||||
await store.asimilarity_search("foo", k=1)
|
||||
|
||||
# Ensure the async embedding function is called
|
||||
assert embeddings_mock.aembed_documents.await_count == 1
|
||||
assert embeddings_mock.aembed_query.await_count == 1
|
||||
|
||||
@@ -12,7 +12,6 @@ from typing import (
|
||||
TypedDict,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from langchain_community.chat_models.ollama import ChatOllama
|
||||
@@ -24,10 +23,7 @@ from langchain_core.language_models import LanguageModelInput
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
ToolCall,
|
||||
ToolMessage,
|
||||
)
|
||||
from langchain_core.output_parsers.base import OutputParserLike
|
||||
from langchain_core.output_parsers.json import JsonOutputParser
|
||||
@@ -282,59 +278,6 @@ class OllamaFunctions(ChatOllama):
|
||||
else:
|
||||
return llm | parser_chain
|
||||
|
||||
def _convert_messages_to_ollama_messages(
|
||||
self, messages: List[BaseMessage]
|
||||
) -> List[Dict[str, Union[str, List[str]]]]:
|
||||
ollama_messages: List = []
|
||||
for message in messages:
|
||||
role = ""
|
||||
if isinstance(message, HumanMessage):
|
||||
role = "user"
|
||||
elif isinstance(message, AIMessage) or isinstance(message, ToolMessage):
|
||||
role = "assistant"
|
||||
elif isinstance(message, SystemMessage):
|
||||
role = "system"
|
||||
else:
|
||||
raise ValueError("Received unsupported message type for Ollama.")
|
||||
|
||||
content = ""
|
||||
images = []
|
||||
if isinstance(message.content, str):
|
||||
content = message.content
|
||||
else:
|
||||
for content_part in cast(List[Dict], message.content):
|
||||
if content_part.get("type") == "text":
|
||||
content += f"\n{content_part['text']}"
|
||||
elif content_part.get("type") == "image_url":
|
||||
if isinstance(content_part.get("image_url"), str):
|
||||
image_url_components = content_part["image_url"].split(",")
|
||||
# Support data:image/jpeg;base64,<image> format
|
||||
# and base64 strings
|
||||
if len(image_url_components) > 1:
|
||||
images.append(image_url_components[1])
|
||||
else:
|
||||
images.append(image_url_components[0])
|
||||
else:
|
||||
raise ValueError(
|
||||
"Only string image_url content parts are supported."
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unsupported message content type. "
|
||||
"Must either have type 'text' or type 'image_url' "
|
||||
"with a string 'image_url' field."
|
||||
)
|
||||
|
||||
ollama_messages.append(
|
||||
{
|
||||
"role": role,
|
||||
"content": content,
|
||||
"images": images,
|
||||
}
|
||||
)
|
||||
|
||||
return ollama_messages
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
|
||||
21
libs/experimental/poetry.lock
generated
21
libs/experimental/poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohttp"
|
||||
@@ -1461,7 +1461,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "0.2.6"
|
||||
version = "0.2.11"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -1471,7 +1471,7 @@ develop = true
|
||||
[package.dependencies]
|
||||
aiohttp = "^3.8.3"
|
||||
async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""}
|
||||
langchain-core = "^0.2.10"
|
||||
langchain-core = "^0.2.23"
|
||||
langchain-text-splitters = "^0.2.0"
|
||||
langsmith = "^0.1.17"
|
||||
numpy = [
|
||||
@@ -1490,7 +1490,7 @@ url = "../langchain"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-community"
|
||||
version = "0.2.6"
|
||||
version = "0.2.10"
|
||||
description = "Community contributed LangChain integrations."
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -1500,8 +1500,8 @@ develop = true
|
||||
[package.dependencies]
|
||||
aiohttp = "^3.8.3"
|
||||
dataclasses-json = ">= 0.5.7, < 0.7"
|
||||
langchain = "^0.2.6"
|
||||
langchain-core = "^0.2.10"
|
||||
langchain = "^0.2.9"
|
||||
langchain-core = "^0.2.23"
|
||||
langsmith = "^0.1.0"
|
||||
numpy = [
|
||||
{version = ">=1,<2", markers = "python_version < \"3.12\""},
|
||||
@@ -1518,7 +1518,7 @@ url = "../community"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.11"
|
||||
version = "0.2.23"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -1542,7 +1542,7 @@ url = "../core"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "0.1.13"
|
||||
version = "0.1.18"
|
||||
description = "An integration package connecting OpenAI and LangChain"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -1550,7 +1550,7 @@ files = []
|
||||
develop = true
|
||||
|
||||
[package.dependencies]
|
||||
langchain-core = ">=0.2.2,<0.3"
|
||||
langchain-core = "^0.2.20"
|
||||
openai = "^1.32.0"
|
||||
tiktoken = ">=0.7,<1"
|
||||
|
||||
@@ -2622,7 +2622,6 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
|
||||
@@ -3704,4 +3703,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools",
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "1c7a8eae7e62464f7bd2eb9bda374c221ec0eb5c286aa61526c0d76db9326aed"
|
||||
content-hash = "dec43275b986de6578d6cd1236d077a7c74441d172319762d452d6b4a5db094c"
|
||||
|
||||
@@ -4,25 +4,25 @@ build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "langchain-experimental"
|
||||
version = "0.0.62"
|
||||
version = "0.0.63"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/langchain-ai/langchain"
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/experimental"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-experimental%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.mypy]
|
||||
ignore_missing_imports = "True"
|
||||
disallow_untyped_defs = "True"
|
||||
exclude = [ "notebooks", "examples", "example_data",]
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/experimental"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-experimental%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
langchain-core = "^0.2.10"
|
||||
langchain-core = "^0.2.23"
|
||||
langchain-community = "^0.2.6"
|
||||
|
||||
[tool.ruff.lint]
|
||||
|
||||
30
libs/experimental/tests/unit_tests/test_ollama_functions.py
Normal file
30
libs/experimental/tests/unit_tests/test_ollama_functions.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import json
|
||||
from typing import Any
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.pydantic_v1 import BaseModel
|
||||
|
||||
from langchain_experimental.llms.ollama_functions import OllamaFunctions
|
||||
|
||||
|
||||
class Schema(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
@patch.object(OllamaFunctions, "_create_stream")
|
||||
def test_convert_image_prompt(
|
||||
_create_stream_mock: Any,
|
||||
) -> None:
|
||||
response = {"message": {"content": '{"tool": "Schema", "tool_input": {}}'}}
|
||||
_create_stream_mock.return_value = [json.dumps(response)]
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[("human", [{"image_url": "data:image/jpeg;base64,{image_url}"}])]
|
||||
)
|
||||
|
||||
lmm = prompt | OllamaFunctions().with_structured_output(schema=Schema)
|
||||
|
||||
schema_instance = lmm.invoke(dict(image_url=""))
|
||||
|
||||
assert schema_instance is not None
|
||||
@@ -366,6 +366,11 @@ def _init_chat_model_helper(
|
||||
|
||||
# TODO: update to use model= once ChatBedrock supports
|
||||
return ChatBedrock(model_id=model, **kwargs)
|
||||
elif model_provider == "bedrock_converse":
|
||||
_check_pkg("langchain_aws")
|
||||
from langchain_aws import ChatBedrockConverse
|
||||
|
||||
return ChatBedrockConverse(model=model, **kwargs)
|
||||
else:
|
||||
supported = ", ".join(_SUPPORTED_PROVIDERS)
|
||||
raise ValueError(
|
||||
@@ -388,6 +393,7 @@ _SUPPORTED_PROVIDERS = {
|
||||
"huggingface",
|
||||
"groq",
|
||||
"bedrock",
|
||||
"bedrock_converse",
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -283,8 +283,7 @@ The following is the expected answer. Use this to measure correctness:
|
||||
|
||||
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
|
||||
"""Validate and prep inputs."""
|
||||
if "reference" not in inputs:
|
||||
inputs["reference"] = self._format_reference(inputs.get("reference"))
|
||||
inputs["reference"] = self._format_reference(inputs.get("reference"))
|
||||
return super().prep_inputs(inputs)
|
||||
|
||||
def _call(
|
||||
|
||||
4
libs/langchain/poetry.lock
generated
4
libs/langchain/poetry.lock
generated
@@ -1760,7 +1760,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.22"
|
||||
version = "0.2.23"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -4561,4 +4561,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools",
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "3bfb687a4835f55d71f5fc1d23c16a42afcf8e9ad496b4da61bd3cb9b026b6ca"
|
||||
content-hash = "73968d6c48a9e2523485914ecd420b6bd1e3cfdcef432f400ebe2b18ebadd51d"
|
||||
|
||||
@@ -4,17 +4,13 @@ build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "langchain"
|
||||
version = "0.2.10"
|
||||
version = "0.2.11"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/langchain-ai/langchain"
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/langchain"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.ruff]
|
||||
exclude = [ "tests/integration_tests/examples/non-utf8-encoding.py",]
|
||||
|
||||
@@ -28,12 +24,16 @@ skip = ".git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package
|
||||
ignore-regex = ".*(Stati Uniti|Tense=Pres).*"
|
||||
ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin"
|
||||
|
||||
[tool.poetry.urls]
|
||||
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/langchain"
|
||||
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
langchain-server = "langchain.server:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
langchain-core = "^0.2.22"
|
||||
langchain-core = "^0.2.23"
|
||||
langchain-text-splitters = "^0.2.0"
|
||||
langsmith = "^0.1.17"
|
||||
pydantic = ">=1,<3"
|
||||
|
||||
@@ -130,24 +130,46 @@ class Milvus(VectorStore):
|
||||
drop_old (Optional[bool]): Whether to drop the current collection. Defaults
|
||||
to False.
|
||||
auto_id (bool): Whether to enable auto id for primary key. Defaults to False.
|
||||
If False, you needs to provide text ids (string less than 65535 bytes).
|
||||
If False, you need to provide text ids (string less than 65535 bytes).
|
||||
If True, Milvus will generate unique integers as primary keys.
|
||||
primary_field (str): Name of the primary key field. Defaults to "pk".
|
||||
text_field (str): Name of the text field. Defaults to "text".
|
||||
vector_field (str): Name of the vector field. Defaults to "vector".
|
||||
metadata_field (str): Name of the metadta field. Defaults to None.
|
||||
enable_dynamic_field (Optional[bool]): Whether to enable
|
||||
dynamic schema or not in Milvus. Defaults to False.
|
||||
For more information about dynamic schema, please refer to
|
||||
https://milvus.io/docs/enable-dynamic-field.md
|
||||
metadata_field (str): Name of the metadata field. Defaults to None.
|
||||
When metadata_field is specified,
|
||||
the document's metadata will store as json.
|
||||
This argument is about to be deprecated,
|
||||
because it can be replaced by setting `enable_dynamic_field`=True.
|
||||
partition_key_field (Optional[str]): Name of the partition key field.
|
||||
Defaults to None. For more information about partition key, please refer to
|
||||
https://milvus.io/docs/use-partition-key.md#Use-Partition-Key
|
||||
partition_names (Optional[list]): List of specific partition names.
|
||||
Defaults to None. For more information about partition, please refer to
|
||||
https://milvus.io/docs/manage-partitions.md#Manage-Partitions
|
||||
replica_number (int): The number of replicas for the collection. Defaults to 1.
|
||||
For more information about replica, please refer to
|
||||
https://milvus.io/docs/replica.md#In-Memory-Replica
|
||||
timeout (Optional[float]): The timeout for Milvus operations. Defaults to None.
|
||||
An optional duration of time in seconds to allow for the RPCs.
|
||||
If timeout is not set, the client keeps waiting until the server responds
|
||||
or an error occurs.
|
||||
num_shards (Optional[int]): The number of shards for the collection.
|
||||
Defaults to None. For more information about shards, please refer to
|
||||
https://milvus.io/docs/glossary.md#Shard
|
||||
|
||||
The connection args used for this class comes in the form of a dict,
|
||||
here are a few of the options:
|
||||
address (str): The actual address of Milvus
|
||||
instance. Example address: "localhost:19530"
|
||||
uri (str): The uri of Milvus instance. Example uri:
|
||||
"path/to/local/directory/milvus_demo.db" for Milvus Lite.
|
||||
"http://randomwebsite:19530",
|
||||
"tcp:foobarsite:19530",
|
||||
"https://ok.s3.south.com:19530".
|
||||
or "path/to/local/directory/milvus_demo.db" for Milvus Lite.
|
||||
host (str): The host of Milvus instance. Default at "localhost",
|
||||
PyMilvus will fill in the default host if only port is provided.
|
||||
port (str/int): The port of Milvus instance. Default at 19530, PyMilvus
|
||||
@@ -178,6 +200,7 @@ class Milvus(VectorStore):
|
||||
milvus_store = Milvus(
|
||||
embedding_function = Embeddings,
|
||||
collection_name = "LangChainCollection",
|
||||
connection_args = {"uri": "./milvus_demo.db"},
|
||||
drop_old = True,
|
||||
auto_id = True
|
||||
)
|
||||
@@ -202,6 +225,7 @@ class Milvus(VectorStore):
|
||||
primary_field: str = "pk",
|
||||
text_field: str = "text",
|
||||
vector_field: str = "vector",
|
||||
enable_dynamic_field: bool = False,
|
||||
metadata_field: Optional[str] = None,
|
||||
partition_key_field: Optional[str] = None,
|
||||
partition_names: Optional[list] = None,
|
||||
@@ -260,6 +284,17 @@ class Milvus(VectorStore):
|
||||
self._text_field = text_field
|
||||
# In order for compatibility, the vector field needs to be called "vector"
|
||||
self._vector_field = vector_field
|
||||
if metadata_field:
|
||||
logger.warning(
|
||||
"DeprecationWarning: `metadata_field` is about to be deprecated, "
|
||||
"please set `enable_dynamic_field`=True instead."
|
||||
)
|
||||
if enable_dynamic_field and metadata_field:
|
||||
metadata_field = None
|
||||
logger.warning(
|
||||
"When `enable_dynamic_field` is True, `metadata_field` is ignored."
|
||||
)
|
||||
self.enable_dynamic_field = enable_dynamic_field
|
||||
self._metadata_field = metadata_field
|
||||
self._partition_key_field = partition_key_field
|
||||
self.fields: list[str] = []
|
||||
@@ -389,13 +424,36 @@ class Milvus(VectorStore):
|
||||
# Determine embedding dim
|
||||
dim = len(embeddings[0])
|
||||
fields = []
|
||||
if self._metadata_field is not None:
|
||||
# If enable_dynamic_field, we don't need to create fields, and just pass it.
|
||||
# In the future, when metadata_field is deprecated,
|
||||
# This logical structure will be simplified like this:
|
||||
# ```
|
||||
# if not self.enable_dynamic_field and metadatas:
|
||||
# for key, value in metadatas[0].items():
|
||||
# ...
|
||||
# ```
|
||||
if self.enable_dynamic_field:
|
||||
pass
|
||||
elif self._metadata_field is not None:
|
||||
fields.append(FieldSchema(self._metadata_field, DataType.JSON))
|
||||
else:
|
||||
# Determine metadata schema
|
||||
if metadatas:
|
||||
# Create FieldSchema for each entry in metadata.
|
||||
for key, value in metadatas[0].items():
|
||||
if key in [
|
||||
self._vector_field,
|
||||
self._primary_field,
|
||||
self._text_field,
|
||||
]:
|
||||
logger.error(
|
||||
(
|
||||
"Failure to create collection, "
|
||||
"metadata key: %s is reserved."
|
||||
),
|
||||
key,
|
||||
)
|
||||
raise ValueError(f"Metadata key {key} is reserved.")
|
||||
# Infer the corresponding datatype of the metadata
|
||||
dtype = infer_dtype_bydata(value)
|
||||
# Datatype isn't compatible
|
||||
@@ -408,7 +466,7 @@ class Milvus(VectorStore):
|
||||
key,
|
||||
)
|
||||
raise ValueError(f"Unrecognized datatype for {key}.")
|
||||
# Dataype is a string/varchar equivalent
|
||||
# Datatype is a string/varchar equivalent
|
||||
elif dtype == DataType.VARCHAR:
|
||||
fields.append(
|
||||
FieldSchema(key, DataType.VARCHAR, max_length=65_535)
|
||||
@@ -447,6 +505,7 @@ class Milvus(VectorStore):
|
||||
fields,
|
||||
description=self.collection_description,
|
||||
partition_key_field=self._partition_key_field,
|
||||
enable_dynamic_field=self.enable_dynamic_field,
|
||||
)
|
||||
|
||||
# Create the collection
|
||||
@@ -617,16 +676,26 @@ class Milvus(VectorStore):
|
||||
|
||||
texts = list(texts)
|
||||
if not self.auto_id:
|
||||
assert isinstance(
|
||||
ids, list
|
||||
), "A list of valid ids are required when auto_id is False."
|
||||
assert isinstance(ids, list), (
|
||||
"A list of valid ids are required when auto_id is False. "
|
||||
"You can set `auto_id` to True in this Milvus instance to generate "
|
||||
"ids automatically, or specify string-type ids for each text."
|
||||
)
|
||||
assert len(set(ids)) == len(
|
||||
texts
|
||||
), "Different lengths of texts and unique ids are provided."
|
||||
assert all(isinstance(x, str) for x in ids), "All ids should be strings."
|
||||
assert all(
|
||||
len(x.encode()) <= 65_535 for x in ids
|
||||
), "Each id should be a string less than 65535 bytes."
|
||||
|
||||
else:
|
||||
if ids is not None:
|
||||
logger.warning(
|
||||
"The ids parameter is ignored when auto_id is True. "
|
||||
"The ids will be generated automatically."
|
||||
)
|
||||
|
||||
try:
|
||||
embeddings = self.embedding_func.embed_documents(texts)
|
||||
except NotImplementedError:
|
||||
@@ -647,34 +716,39 @@ class Milvus(VectorStore):
|
||||
kwargs["timeout"] = self.timeout
|
||||
self._init(**kwargs)
|
||||
|
||||
# Dict to hold all insert columns
|
||||
insert_dict: dict[str, list] = {
|
||||
self._text_field: texts,
|
||||
self._vector_field: embeddings,
|
||||
}
|
||||
insert_list: list[dict] = []
|
||||
|
||||
if not self.auto_id:
|
||||
insert_dict[self._primary_field] = ids # type: ignore[assignment]
|
||||
assert len(texts) == len(
|
||||
embeddings
|
||||
), "Mismatched lengths of texts and embeddings."
|
||||
if metadatas is not None:
|
||||
assert len(texts) == len(
|
||||
metadatas
|
||||
), "Mismatched lengths of texts and metadatas."
|
||||
|
||||
if self._metadata_field is not None:
|
||||
for d in metadatas: # type: ignore[union-attr]
|
||||
insert_dict.setdefault(self._metadata_field, []).append(d)
|
||||
else:
|
||||
# Collect the metadata into the insert dict.
|
||||
if metadatas is not None:
|
||||
for d in metadatas:
|
||||
for key, value in d.items():
|
||||
keys = (
|
||||
[x for x in self.fields if x != self._primary_field]
|
||||
if self.auto_id
|
||||
else [x for x in self.fields]
|
||||
)
|
||||
if key in keys:
|
||||
insert_dict.setdefault(key, []).append(value)
|
||||
for i, text, embedding in zip(range(len(texts)), texts, embeddings):
|
||||
entity_dict = {}
|
||||
metadata = metadatas[i] if metadatas else {}
|
||||
if not self.auto_id:
|
||||
entity_dict[self._primary_field] = ids[i] # type: ignore[index]
|
||||
|
||||
entity_dict[self._text_field] = text
|
||||
entity_dict[self._vector_field] = embedding
|
||||
|
||||
if self._metadata_field and not self.enable_dynamic_field:
|
||||
entity_dict[self._metadata_field] = metadata
|
||||
else:
|
||||
for key, value in metadata.items():
|
||||
# if not enable_dynamic_field, skip fields not in the collection.
|
||||
if not self.enable_dynamic_field and key not in self.fields:
|
||||
continue
|
||||
# If enable_dynamic_field, all fields are allowed.
|
||||
entity_dict[key] = value
|
||||
|
||||
insert_list.append(entity_dict)
|
||||
|
||||
# Total insert count
|
||||
vectors: list = insert_dict[self._vector_field]
|
||||
total_count = len(vectors)
|
||||
total_count = len(insert_list)
|
||||
|
||||
pks: list[str] = []
|
||||
|
||||
@@ -682,15 +756,12 @@ class Milvus(VectorStore):
|
||||
for i in range(0, total_count, batch_size):
|
||||
# Grab end index
|
||||
end = min(i + batch_size, total_count)
|
||||
# Convert dict to list of lists batch for insertion
|
||||
insert_list = [
|
||||
insert_dict[x][i:end] for x in self.fields if x in insert_dict
|
||||
]
|
||||
batch_insert_list = insert_list[i:end]
|
||||
# Insert into the collection.
|
||||
try:
|
||||
res: Collection
|
||||
timeout = self.timeout or timeout
|
||||
res = self.col.insert(insert_list, timeout=timeout, **kwargs)
|
||||
res = self.col.insert(batch_insert_list, timeout=timeout, **kwargs)
|
||||
pks.extend(res.primary_keys)
|
||||
except MilvusException as e:
|
||||
logger.error(
|
||||
@@ -699,6 +770,61 @@ class Milvus(VectorStore):
|
||||
raise e
|
||||
return pks
|
||||
|
||||
def _collection_search(
|
||||
self,
|
||||
embedding: List[float],
|
||||
k: int = 4,
|
||||
param: Optional[dict] = None,
|
||||
expr: Optional[str] = None,
|
||||
timeout: Optional[float] = None,
|
||||
**kwargs: Any,
|
||||
) -> "pymilvus.client.abstract.SearchResult | None": # type: ignore[name-defined] # noqa: F821
|
||||
"""Perform a search on an embedding and return milvus search results.
|
||||
|
||||
For more information about the search parameters, take a look at the pymilvus
|
||||
documentation found here:
|
||||
https://milvus.io/api-reference/pymilvus/v2.4.x/ORM/Collection/search.md
|
||||
|
||||
Args:
|
||||
embedding (List[float]): The embedding vector being searched.
|
||||
k (int, optional): The amount of results to return. Defaults to 4.
|
||||
param (dict): The search params for the specified index.
|
||||
Defaults to None.
|
||||
expr (str, optional): Filtering expression. Defaults to None.
|
||||
timeout (float, optional): How long to wait before timeout error.
|
||||
Defaults to None.
|
||||
kwargs: Collection.search() keyword arguments.
|
||||
|
||||
Returns:
|
||||
pymilvus.client.abstract.SearchResult: Milvus search result.
|
||||
"""
|
||||
if self.col is None:
|
||||
logger.debug("No existing collection to search.")
|
||||
return None
|
||||
|
||||
if param is None:
|
||||
param = self.search_params
|
||||
|
||||
# Determine result metadata fields with PK.
|
||||
if self.enable_dynamic_field:
|
||||
output_fields = ["*"]
|
||||
else:
|
||||
output_fields = self.fields[:]
|
||||
output_fields.remove(self._vector_field)
|
||||
timeout = self.timeout or timeout
|
||||
# Perform the search.
|
||||
res = self.col.search(
|
||||
data=[embedding],
|
||||
anns_field=self._vector_field,
|
||||
param=param,
|
||||
limit=k,
|
||||
expr=expr,
|
||||
output_fields=output_fields,
|
||||
timeout=timeout,
|
||||
**kwargs,
|
||||
)
|
||||
return res
|
||||
|
||||
def similarity_search(
|
||||
self,
|
||||
query: str,
|
||||
@@ -778,7 +904,7 @@ class Milvus(VectorStore):
|
||||
|
||||
For more information about the search parameters, take a look at the pymilvus
|
||||
documentation found here:
|
||||
https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md
|
||||
https://milvus.io/api-reference/pymilvus/v2.4.x/ORM/Collection/search.md
|
||||
|
||||
Args:
|
||||
query (str): The text being searched.
|
||||
@@ -814,11 +940,11 @@ class Milvus(VectorStore):
|
||||
timeout: Optional[float] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
"""Perform a search on a query string and return results with score.
|
||||
"""Perform a search on an embedding and return results with score.
|
||||
|
||||
For more information about the search parameters, take a look at the pymilvus
|
||||
documentation found here:
|
||||
https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md
|
||||
https://milvus.io/api-reference/pymilvus/v2.4.x/ORM/Collection/search.md
|
||||
|
||||
Args:
|
||||
embedding (List[float]): The embedding vector being searched.
|
||||
@@ -833,32 +959,14 @@ class Milvus(VectorStore):
|
||||
Returns:
|
||||
List[Tuple[Document, float]]: Result doc and score.
|
||||
"""
|
||||
if self.col is None:
|
||||
logger.debug("No existing collection to search.")
|
||||
return []
|
||||
|
||||
if param is None:
|
||||
param = self.search_params
|
||||
|
||||
# Determine result metadata fields with PK.
|
||||
output_fields = self.fields[:]
|
||||
output_fields.remove(self._vector_field)
|
||||
timeout = self.timeout or timeout
|
||||
# Perform the search.
|
||||
res = self.col.search(
|
||||
data=[embedding],
|
||||
anns_field=self._vector_field,
|
||||
param=param,
|
||||
limit=k,
|
||||
expr=expr,
|
||||
output_fields=output_fields,
|
||||
timeout=timeout,
|
||||
**kwargs,
|
||||
col_search_res = self._collection_search(
|
||||
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
|
||||
)
|
||||
# Organize results.
|
||||
if col_search_res is None:
|
||||
return []
|
||||
ret = []
|
||||
for result in res[0]:
|
||||
data = {x: result.entity.get(x) for x in output_fields}
|
||||
for result in col_search_res[0]:
|
||||
data = {x: result.entity.get(x) for x in result.entity.fields}
|
||||
doc = self._parse_document(data)
|
||||
pair = (doc, result.score)
|
||||
ret.append(pair)
|
||||
@@ -947,40 +1055,27 @@ class Milvus(VectorStore):
|
||||
Returns:
|
||||
List[Document]: Document results for search.
|
||||
"""
|
||||
if self.col is None:
|
||||
logger.debug("No existing collection to search.")
|
||||
return []
|
||||
|
||||
if param is None:
|
||||
param = self.search_params
|
||||
|
||||
# Determine result metadata fields.
|
||||
output_fields = self.fields[:]
|
||||
output_fields.remove(self._vector_field)
|
||||
timeout = self.timeout or timeout
|
||||
# Perform the search.
|
||||
res = self.col.search(
|
||||
data=[embedding],
|
||||
anns_field=self._vector_field,
|
||||
col_search_res = self._collection_search(
|
||||
embedding=embedding,
|
||||
k=fetch_k,
|
||||
param=param,
|
||||
limit=fetch_k,
|
||||
expr=expr,
|
||||
output_fields=output_fields,
|
||||
timeout=timeout,
|
||||
**kwargs,
|
||||
)
|
||||
# Organize results.
|
||||
if col_search_res is None:
|
||||
return []
|
||||
ids = []
|
||||
documents = []
|
||||
scores = []
|
||||
for result in res[0]:
|
||||
data = {x: result.entity.get(x) for x in output_fields}
|
||||
for result in col_search_res[0]:
|
||||
data = {x: result.entity.get(x) for x in result.entity.fields}
|
||||
doc = self._parse_document(data)
|
||||
documents.append(doc)
|
||||
scores.append(result.score)
|
||||
ids.append(result.id)
|
||||
|
||||
vectors = self.col.query(
|
||||
vectors = self.col.query( # type: ignore[union-attr]
|
||||
expr=f"{self._primary_field} in {ids}",
|
||||
output_fields=[self._primary_field, self._vector_field],
|
||||
timeout=timeout,
|
||||
@@ -1089,6 +1184,8 @@ class Milvus(VectorStore):
|
||||
return vector_db
|
||||
|
||||
def _parse_document(self, data: dict) -> Document:
|
||||
if self._vector_field in data:
|
||||
data.pop(self._vector_field)
|
||||
return Document(
|
||||
page_content=data.pop(self._text_field),
|
||||
metadata=data.pop(self._metadata_field) if self._metadata_field else data,
|
||||
|
||||
17
libs/partners/milvus/poetry.lock
generated
17
libs/partners/milvus/poetry.lock
generated
@@ -308,7 +308,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.20"
|
||||
version = "0.2.23"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -332,13 +332,13 @@ url = "../../core"
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.1.88"
|
||||
version = "0.1.93"
|
||||
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8.1"
|
||||
files = [
|
||||
{file = "langsmith-0.1.88-py3-none-any.whl", hash = "sha256:460ebb7de440afd150fcea8f54ca8779821f2228cd59e149e5845c9dbe06db16"},
|
||||
{file = "langsmith-0.1.88.tar.gz", hash = "sha256:28a07dec19197f4808aa2628d5a3ccafcbe14cc137aef0e607bbd128e7907821"},
|
||||
{file = "langsmith-0.1.93-py3-none-any.whl", hash = "sha256:811210b9d5f108f36431bd7b997eb9476a9ecf5a2abd7ddbb606c1cdcf0f43ce"},
|
||||
{file = "langsmith-0.1.93.tar.gz", hash = "sha256:285b6ad3a54f50fa8eb97b5f600acc57d0e37e139dd8cf2111a117d0435ba9b4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1104,18 +1104,19 @@ test = ["Cython", "array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "me
|
||||
|
||||
[[package]]
|
||||
name = "setuptools"
|
||||
version = "70.3.0"
|
||||
version = "71.1.0"
|
||||
description = "Easily download, build, install, upgrade, and uninstall Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "setuptools-70.3.0-py3-none-any.whl", hash = "sha256:fe384da74336c398e0d956d1cae0669bc02eed936cdb1d49b57de1990dc11ffc"},
|
||||
{file = "setuptools-70.3.0.tar.gz", hash = "sha256:f171bab1dfbc86b132997f26a119f6056a57950d058587841a0082e8830f9dc5"},
|
||||
{file = "setuptools-71.1.0-py3-none-any.whl", hash = "sha256:33874fdc59b3188304b2e7c80d9029097ea31627180896fb549c578ceb8a0855"},
|
||||
{file = "setuptools-71.1.0.tar.gz", hash = "sha256:032d42ee9fb536e33087fb66cac5f840eb9391ed05637b3f2a76a7c8fb477936"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
|
||||
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
|
||||
test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.10.0)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
|
||||
test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (<0.4)", "pytest-ruff (>=0.2.1)", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
|
||||
|
||||
[[package]]
|
||||
name = "six"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain-milvus"
|
||||
version = "0.1.2"
|
||||
version = "0.1.3"
|
||||
description = "An integration package connecting Milvus and LangChain"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Test Milvus functionality."""
|
||||
from typing import Any, List, Optional
|
||||
|
||||
import pytest
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_milvus.vectorstores import Milvus
|
||||
@@ -27,6 +28,7 @@ def _milvus_from_texts(
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
ids: Optional[List[str]] = None,
|
||||
drop: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> Milvus:
|
||||
return Milvus.from_texts(
|
||||
fake_texts,
|
||||
@@ -36,6 +38,7 @@ def _milvus_from_texts(
|
||||
# connection_args={"uri": "http://127.0.0.1:19530"},
|
||||
connection_args={"uri": "./milvus_demo.db"},
|
||||
drop_old=drop,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
@@ -50,6 +53,15 @@ def test_milvus() -> None:
|
||||
assert_docs_equal_without_pk(output, [Document(page_content="foo")])
|
||||
|
||||
|
||||
def test_milvus_vector_search() -> None:
|
||||
"""Test end to end construction and search by vector."""
|
||||
docsearch = _milvus_from_texts()
|
||||
output = docsearch.similarity_search_by_vector(
|
||||
FakeEmbeddings().embed_query("foo"), k=1
|
||||
)
|
||||
assert_docs_equal_without_pk(output, [Document(page_content="foo")])
|
||||
|
||||
|
||||
def test_milvus_with_metadata() -> None:
|
||||
"""Test with metadata"""
|
||||
docsearch = _milvus_from_texts(metadatas=[{"label": "test"}] * len(fake_texts))
|
||||
@@ -110,6 +122,21 @@ def test_milvus_max_marginal_relevance_search() -> None:
|
||||
)
|
||||
|
||||
|
||||
def test_milvus_max_marginal_relevance_search_with_dynamic_field() -> None:
|
||||
"""Test end to end construction and MRR search with enabling dynamic field."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = _milvus_from_texts(metadatas=metadatas, enable_dynamic_field=True)
|
||||
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
|
||||
assert_docs_equal_without_pk(
|
||||
output,
|
||||
[
|
||||
Document(page_content="foo", metadata={"page": 0}),
|
||||
Document(page_content="baz", metadata={"page": 2}),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def test_milvus_add_extra() -> None:
|
||||
"""Test end to end construction and MRR search."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
@@ -123,7 +150,7 @@ def test_milvus_add_extra() -> None:
|
||||
|
||||
|
||||
def test_milvus_no_drop() -> None:
|
||||
"""Test end to end construction and MRR search."""
|
||||
"""Test construction without dropping old data."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = _milvus_from_texts(metadatas=metadatas)
|
||||
@@ -171,14 +198,95 @@ def test_milvus_upsert_entities() -> None:
|
||||
assert len(ids) == 2 # type: ignore[arg-type]
|
||||
|
||||
|
||||
def test_milvus_enable_dynamic_field() -> None:
|
||||
"""Test end to end construction and enable dynamic field"""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
metadatas = [{"id": i} for i in range(len(texts))]
|
||||
docsearch = _milvus_from_texts(metadatas=metadatas, enable_dynamic_field=True)
|
||||
output = docsearch.similarity_search("foo", k=10)
|
||||
assert len(output) == 3
|
||||
|
||||
# When enable dynamic field, any new field data will be added to the collection.
|
||||
new_metadatas = [{"id_new": i} for i in range(len(texts))]
|
||||
docsearch.add_texts(texts, new_metadatas)
|
||||
|
||||
output = docsearch.similarity_search("foo", k=10)
|
||||
assert len(output) == 6
|
||||
|
||||
assert set(docsearch.fields) == {
|
||||
docsearch._primary_field,
|
||||
docsearch._text_field,
|
||||
docsearch._vector_field,
|
||||
}
|
||||
|
||||
|
||||
def test_milvus_disable_dynamic_field() -> None:
|
||||
"""Test end to end construction and disable dynamic field"""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
metadatas = [{"id": i} for i in range(len(texts))]
|
||||
docsearch = _milvus_from_texts(metadatas=metadatas, enable_dynamic_field=False)
|
||||
output = docsearch.similarity_search("foo", k=10)
|
||||
assert len(output) == 3
|
||||
# ["pk", "text", "vector", "id"]
|
||||
assert set(docsearch.fields) == {
|
||||
docsearch._primary_field,
|
||||
docsearch._text_field,
|
||||
docsearch._vector_field,
|
||||
"id",
|
||||
}
|
||||
|
||||
# Try to add new fields "id_new", but since dynamic field is disabled,
|
||||
# all fields in the collection is specified as ["pk", "text", "vector", "id"],
|
||||
# new field information "id_new" will not be added.
|
||||
new_metadatas = [{"id": i, "id_new": i} for i in range(len(texts))]
|
||||
docsearch.add_texts(texts, new_metadatas)
|
||||
output = docsearch.similarity_search("foo", k=10)
|
||||
assert len(output) == 6
|
||||
for doc in output:
|
||||
assert set(doc.metadata.keys()) == {"id", "pk"} # `id_new` is not added.
|
||||
|
||||
# When disable dynamic field,
|
||||
# missing data of the created fields "id", will raise an exception.
|
||||
with pytest.raises(Exception):
|
||||
new_metadatas = [{"id_new": i} for i in range(len(texts))]
|
||||
docsearch.add_texts(texts, new_metadatas)
|
||||
|
||||
|
||||
def test_milvus_metadata_field() -> None:
|
||||
"""Test end to end construction and use metadata field"""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
metadatas = [{"id": i} for i in range(len(texts))]
|
||||
docsearch = _milvus_from_texts(metadatas=metadatas, metadata_field="metadata")
|
||||
output = docsearch.similarity_search("foo", k=10)
|
||||
assert len(output) == 3
|
||||
|
||||
new_metadatas = [{"id_new": i} for i in range(len(texts))]
|
||||
docsearch.add_texts(texts, new_metadatas)
|
||||
|
||||
output = docsearch.similarity_search("foo", k=10)
|
||||
assert len(output) == 6
|
||||
|
||||
assert set(docsearch.fields) == {
|
||||
docsearch._primary_field,
|
||||
docsearch._text_field,
|
||||
docsearch._vector_field,
|
||||
docsearch._metadata_field,
|
||||
}
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# test_milvus()
|
||||
# test_milvus_vector_search()
|
||||
# test_milvus_with_metadata()
|
||||
# test_milvus_with_id()
|
||||
# test_milvus_with_score()
|
||||
# test_milvus_max_marginal_relevance_search()
|
||||
# test_milvus_max_marginal_relevance_search_with_dynamic_field()
|
||||
# test_milvus_add_extra()
|
||||
# test_milvus_no_drop()
|
||||
# test_milvus_get_pks()
|
||||
# test_milvus_delete_entities()
|
||||
# test_milvus_upsert_entities()
|
||||
# test_milvus_enable_dynamic_field()
|
||||
# test_milvus_disable_dynamic_field()
|
||||
# test_milvus_metadata_field()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user