mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-20 09:57:32 +00:00
Merge branch 'master' into copilot/fix-31398
This commit is contained in:
commit
da536abde6
@ -237,6 +237,157 @@
|
|||||||
" print(chunk.text(), end=\"|\")"
|
" print(chunk.text(), end=\"|\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "a009400a",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Extended Thinking \n",
|
||||||
|
"\n",
|
||||||
|
"This guide focuses on implementing Extended Thinking using AWS Bedrock with LangChain's `ChatBedrockConverse` integration.\n",
|
||||||
|
"\n",
|
||||||
|
"### Supported Models\n",
|
||||||
|
"\n",
|
||||||
|
"Extended Thinking is available for the following Claude models on AWS Bedrock:\n",
|
||||||
|
"\n",
|
||||||
|
"| Model | Model ID |\n",
|
||||||
|
"|-------|----------|\n",
|
||||||
|
"| **Claude Opus 4** | `anthropic.claude-opus-4-20250514-v1:0` |\n",
|
||||||
|
"| **Claude Sonnet 4** | `anthropic.claude-sonnet-4-20250514-v1:0` |\n",
|
||||||
|
"| **Claude 3.7 Sonnet** | `us.anthropic.claude-3-7-sonnet-20250219-v1:0` |\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "abc790ca",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content=[{'type': 'reasoning_content', 'reasoning_content': {'text': 'The user wants me to translate \"I love programming\" from English to French.\\n\\n\"I love\" translates to \"J\\'aime\" or \"J\\'adore\" in French\\n\"Programming\" translates to \"la programmation\" in French\\n\\nSo the translation would be \"J\\'aime la programmation\" or \"J\\'adore la programmation\"\\n\\nBoth are correct, but \"J\\'aime\" is more commonly used for expressing love/liking something.', 'signature': 'EpgECkgIBRABGAIqQDub6nRpiusjbxZONXVlGXg5ZjUY1Eka1Yp4oBBHmRqGjId+StTBPuwD3CXLyb2rUDRhSc3hTpTM4krVqlFZrIsSDI/WLa1mu38DDqt1HRoMUjm+jF+03MZFD+WQIjBZtHaYiqgY0JQgU0NdXDwwBSZX44gXwuX9EDekh12VM1ysq+WxVtkp0WMU0dKCJo4q/QKpguFFlZtEZjF9PftzOgTIyy+1H5pY+Dsb2pnrGtfAgwTR7PuZ/d8ibY0A8ywjVEZtGm+PtcnCJiK53BWxhGYOtxnfN/RRKtuZhvPQj+QQOWeRWqH+GcbeISCgyTYn5WG75fmVL707byjQZ3IuhMfyZWmiTFE2fc4Jn/bxX7OsU+DbTWv2K1a+g7eW+dvQwYzCBO1hfEn4699/CHII8UAcHh1L3bnxOWGKkeVQ0KMfgfwVb0vuGG4QBYKIDs87QL414i69D68DxqCTZAHK4lMA6Xs7zW+m0MMCct4iHRnJI8kat1mlBEpMz6NRo9KacZJXpLJxofIU4ho7R5/QHccdni0IidNkUtrLBSB3toNJoQEcStts2UR67NHTxn47zk1/hi4v4Ahtw9OEQFONaH6XaG1wjpqEdjQ8/Tmg9eB6ZLoQ4sQfhcMF8Uo3hHbBY8jA3jZ+9pa9VbuVbO6Eup8NX3XXZm2nk50OMWX7hBwgBmlZbEew6pWFu7+13EkYAQ=='}}, {'type': 'text', 'text': \"J'aime la programmation.\"}], additional_kwargs={}, response_metadata={'ResponseMetadata': {'RequestId': '169ca92f-19c9-480c-9fc3-4e5284507e67', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Tue, 22 Jul 2025 04:40:22 GMT', 'content-type': 'application/json', 'content-length': '1498', 'connection': 'keep-alive', 'x-amzn-requestid': '169ca92f-19c9-480c-9fc3-4e5284507e67'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [2839]}, 'model_name': 'us.anthropic.claude-sonnet-4-20250514-v1:0'}, id='run--42e05e5d-ba86-4dce-9e29-2a4ba32c5804-0', usage_metadata={'input_tokens': 58, 'output_tokens': 122, 'total_tokens': 180, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from langchain_aws import ChatBedrockConverse\n",
|
||||||
|
"\n",
|
||||||
|
"llm = ChatBedrockConverse(\n",
|
||||||
|
" model_id=\"us.anthropic.claude-sonnet-4-20250514-v1:0\",\n",
|
||||||
|
" region_name=\"us-west-2\",\n",
|
||||||
|
" max_tokens=4096,\n",
|
||||||
|
" additional_model_request_fields={\n",
|
||||||
|
" \"thinking\": {\"type\": \"enabled\", \"budget_tokens\": 1024},\n",
|
||||||
|
" },\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"ai_msg = llm.invoke(messages)\n",
|
||||||
|
"ai_msg"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "7fb27b941602401d91542211134fc71a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"[{'type': 'reasoning_content', 'reasoning_content': {'text': 'The user wants me to translate \"I love programming\" from English to French.\\n\\n\"I love\" translates to \"J\\'aime\" or \"J\\'adore\" in French\\n\"Programming\" translates to \"la programmation\" in French\\n\\nSo the translation would be \"J\\'aime la programmation\" or \"J\\'adore la programmation\"\\n\\nBoth are correct, but \"J\\'aime\" is more commonly used for expressing love/liking something.', 'signature': 'EpgECkgIBRABGAIqQDub6nRpiusjbxZONXVlGXg5ZjUY1Eka1Yp4oBBHmRqGjId+StTBPuwD3CXLyb2rUDRhSc3hTpTM4krVqlFZrIsSDI/WLa1mu38DDqt1HRoMUjm+jF+03MZFD+WQIjBZtHaYiqgY0JQgU0NdXDwwBSZX44gXwuX9EDekh12VM1ysq+WxVtkp0WMU0dKCJo4q/QKpguFFlZtEZjF9PftzOgTIyy+1H5pY+Dsb2pnrGtfAgwTR7PuZ/d8ibY0A8ywjVEZtGm+PtcnCJiK53BWxhGYOtxnfN/RRKtuZhvPQj+QQOWeRWqH+GcbeISCgyTYn5WG75fmVL707byjQZ3IuhMfyZWmiTFE2fc4Jn/bxX7OsU+DbTWv2K1a+g7eW+dvQwYzCBO1hfEn4699/CHII8UAcHh1L3bnxOWGKkeVQ0KMfgfwVb0vuGG4QBYKIDs87QL414i69D68DxqCTZAHK4lMA6Xs7zW+m0MMCct4iHRnJI8kat1mlBEpMz6NRo9KacZJXpLJxofIU4ho7R5/QHccdni0IidNkUtrLBSB3toNJoQEcStts2UR67NHTxn47zk1/hi4v4Ahtw9OEQFONaH6XaG1wjpqEdjQ8/Tmg9eB6ZLoQ4sQfhcMF8Uo3hHbBY8jA3jZ+9pa9VbuVbO6Eup8NX3XXZm2nk50OMWX7hBwgBmlZbEew6pWFu7+13EkYAQ=='}}, {'type': 'text', 'text': \"J'aime la programmation.\"}]\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(ai_msg.content)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "f1eb1ce1",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### How extended thinking works\n",
|
||||||
|
"\n",
|
||||||
|
"When extended thinking is turned on, Claude creates thinking content blocks where it outputs its internal reasoning. Claude incorporates insights from this reasoning before crafting a final response. The API response will include thinking content blocks, followed by text content blocks."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "951d8206",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[('system',\n",
|
||||||
|
" 'You are a helpful assistant that translates English to French. Translate the user sentence.'),\n",
|
||||||
|
" ('human', 'I love programming.'),\n",
|
||||||
|
" ('ai',\n",
|
||||||
|
" [{'type': 'reasoning_content',\n",
|
||||||
|
" 'reasoning_content': {'text': 'The user wants me to translate \"I love programming\" from English to French.\\n\\n\"I love\" translates to \"J\\'aime\" or \"J\\'adore\" in French\\n\"Programming\" translates to \"la programmation\" in French\\n\\nSo the translation would be \"J\\'aime la programmation\" or \"J\\'adore la programmation\"\\n\\nBoth are correct, but \"J\\'aime\" is more commonly used for expressing love/liking something.',\n",
|
||||||
|
" 'signature': 'EpgECkgIBRABGAIqQDub6nRpiusjbxZONXVlGXg5ZjUY1Eka1Yp4oBBHmRqGjId+StTBPuwD3CXLyb2rUDRhSc3hTpTM4krVqlFZrIsSDI/WLa1mu38DDqt1HRoMUjm+jF+03MZFD+WQIjBZtHaYiqgY0JQgU0NdXDwwBSZX44gXwuX9EDekh12VM1ysq+WxVtkp0WMU0dKCJo4q/QKpguFFlZtEZjF9PftzOgTIyy+1H5pY+Dsb2pnrGtfAgwTR7PuZ/d8ibY0A8ywjVEZtGm+PtcnCJiK53BWxhGYOtxnfN/RRKtuZhvPQj+QQOWeRWqH+GcbeISCgyTYn5WG75fmVL707byjQZ3IuhMfyZWmiTFE2fc4Jn/bxX7OsU+DbTWv2K1a+g7eW+dvQwYzCBO1hfEn4699/CHII8UAcHh1L3bnxOWGKkeVQ0KMfgfwVb0vuGG4QBYKIDs87QL414i69D68DxqCTZAHK4lMA6Xs7zW+m0MMCct4iHRnJI8kat1mlBEpMz6NRo9KacZJXpLJxofIU4ho7R5/QHccdni0IidNkUtrLBSB3toNJoQEcStts2UR67NHTxn47zk1/hi4v4Ahtw9OEQFONaH6XaG1wjpqEdjQ8/Tmg9eB6ZLoQ4sQfhcMF8Uo3hHbBY8jA3jZ+9pa9VbuVbO6Eup8NX3XXZm2nk50OMWX7hBwgBmlZbEew6pWFu7+13EkYAQ=='}},\n",
|
||||||
|
" {'type': 'text', 'text': \"J'aime la programmation.\"}]),\n",
|
||||||
|
" ('human', 'I love AI')]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"next_messages = messages + [(\"ai\", ai_msg.content), (\"human\", \"I love AI\")]\n",
|
||||||
|
"next_messages"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "9d8c506c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content=[{'type': 'reasoning_content', 'reasoning_content': {'text': 'The user wants me to translate \"I love AI\" from English to French. \\n\\n\"I love\" translates to \"J\\'aime\" in French.\\n\"AI\" stands for \"Artificial Intelligence\" which in French is \"Intelligence Artificielle\" or abbreviated as \"IA\".\\n\\nSo the translation would be \"J\\'aime l\\'IA\" (using the abbreviation) or \"J\\'aime l\\'intelligence artificielle\" (using the full term).\\n\\nI think using the abbreviation \"IA\" would be more natural and commonly used, similar to how we use \"AI\" in English.', 'signature': 'EoMFCkgIBRABGAIqQOwp9d0YWm8NctfL9lf1MeWR1OxeAKB3Es19Lei2bdHQ4W0ezTK4wVcm/VLM+7kICX2aB9RAmUD5sJxoKHfdX38SDIR/aSJhHZifGOHqwBoMhzNsyPmB7FFNvNESIjBMVRpRUDTFGn5+nL0x5CjWhKA8H/XFnKYRrUyMYb1n7lCQA7BeEjsaWwxZ3YV9rZsq6APuaXaA40Bt+KnpPOo06r72L/DceliRAw1a6cuT5E0Dv0eIAOYblbXaKYn0jy8UzTUuctOP3As/zT5pK5yC+Rx0d2l9kuP3+COERM98u0R04bWn6qh0HcyE+zNc7c4YWkncjdmOxF/j6OxhcMhZEoX2035v9eUJ9+O/u1xaff08YAEfg7TGWrSIwalpjs1mzWA9ijKg8YyjmXjWnMeFn0z6LDqLaaKc+nC8IN9SLwA/eHpf/ayoEgmogn7gWzijW8MDbnlwpQDS75wK7An3RMEcpWD/OXrKb1EhWKEmOBro5BOTGsfK3ZDveRL0aCBINdOu+AHMQDFXJ04cRDEjs9GE3YC218UcFtS42TFO7/Ct5CYCTknETPx93zcGTOM2VPOZ02Uem1A7Nda/Fa4l2b03EUEtwlgske5K1RbeohN9sclxYsxX5nGJ5sSZurVCk9plkyTG3aiPvbohfVVarVgukKoKwoMDYz5rHVscWlUe+qeqJE/H+KKlhtzO+lWWDN4knqeYsZ55flO5Hq4vT20QCYnF8hcUx07ngGKXuGID9n5kFnLsP8sBUHYKm7bmopFFZvfPcmsqiV9yvG/8Ly9DHbmY5ZwxyrbdJCFT6HD6kq/mEBDftZ6dhmyKMimJBfbTj7d3VAILbRgB'}}, {'type': 'text', 'text': \"J'aime l'IA.\"}], additional_kwargs={}, response_metadata={'ResponseMetadata': {'RequestId': '023799d6-7ed5-4e49-8ad7-7460a49a9a45', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Tue, 22 Jul 2025 04:40:34 GMT', 'content-type': 'application/json', 'content-length': '1737', 'connection': 'keep-alive', 'x-amzn-requestid': '023799d6-7ed5-4e49-8ad7-7460a49a9a45'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [3473]}, 'model_name': 'us.anthropic.claude-sonnet-4-20250514-v1:0'}, id='run--ca8abc92-60a9-4bd1-93b4-7788496eda7a-0', usage_metadata={'input_tokens': 75, 'output_tokens': 153, 'total_tokens': 228, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"ai_msg = llm.invoke(next_messages)\n",
|
||||||
|
"ai_msg"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "e53e3ebb",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"[{'type': 'reasoning_content', 'reasoning_content': {'text': 'The user wants me to translate \"I love AI\" from English to French. \\n\\n\"I love\" translates to \"J\\'aime\" in French.\\n\"AI\" stands for \"Artificial Intelligence\" which in French is \"Intelligence Artificielle\" or abbreviated as \"IA\".\\n\\nSo the translation would be \"J\\'aime l\\'IA\" (using the abbreviation) or \"J\\'aime l\\'intelligence artificielle\" (using the full term).\\n\\nI think using the abbreviation \"IA\" would be more natural and commonly used, similar to how we use \"AI\" in English.', 'signature': 'EoMFCkgIBRABGAIqQOwp9d0YWm8NctfL9lf1MeWR1OxeAKB3Es19Lei2bdHQ4W0ezTK4wVcm/VLM+7kICX2aB9RAmUD5sJxoKHfdX38SDIR/aSJhHZifGOHqwBoMhzNsyPmB7FFNvNESIjBMVRpRUDTFGn5+nL0x5CjWhKA8H/XFnKYRrUyMYb1n7lCQA7BeEjsaWwxZ3YV9rZsq6APuaXaA40Bt+KnpPOo06r72L/DceliRAw1a6cuT5E0Dv0eIAOYblbXaKYn0jy8UzTUuctOP3As/zT5pK5yC+Rx0d2l9kuP3+COERM98u0R04bWn6qh0HcyE+zNc7c4YWkncjdmOxF/j6OxhcMhZEoX2035v9eUJ9+O/u1xaff08YAEfg7TGWrSIwalpjs1mzWA9ijKg8YyjmXjWnMeFn0z6LDqLaaKc+nC8IN9SLwA/eHpf/ayoEgmogn7gWzijW8MDbnlwpQDS75wK7An3RMEcpWD/OXrKb1EhWKEmOBro5BOTGsfK3ZDveRL0aCBINdOu+AHMQDFXJ04cRDEjs9GE3YC218UcFtS42TFO7/Ct5CYCTknETPx93zcGTOM2VPOZ02Uem1A7Nda/Fa4l2b03EUEtwlgske5K1RbeohN9sclxYsxX5nGJ5sSZurVCk9plkyTG3aiPvbohfVVarVgukKoKwoMDYz5rHVscWlUe+qeqJE/H+KKlhtzO+lWWDN4knqeYsZ55flO5Hq4vT20QCYnF8hcUx07ngGKXuGID9n5kFnLsP8sBUHYKm7bmopFFZvfPcmsqiV9yvG/8Ly9DHbmY5ZwxyrbdJCFT6HD6kq/mEBDftZ6dhmyKMimJBfbTj7d3VAILbRgB'}}, {'type': 'text', 'text': \"J'aime l'IA.\"}]\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(ai_msg.content)"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "a77519e5-897d-41a0-a9bb-55300fa79efc",
|
"id": "a77519e5-897d-41a0-a9bb-55300fa79efc",
|
||||||
@ -379,7 +530,7 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3 (ipykernel)",
|
"display_name": ".venv",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
@ -393,7 +544,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.4"
|
"version": "3.12.9"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -54,7 +54,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "c84fb993",
|
"id": "c84fb993",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
"source": [
|
||||||
|
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -118,7 +120,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -257,7 +259,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.4"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -125,7 +125,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -264,7 +264,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.5"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -54,7 +54,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "c84fb993",
|
"id": "c84fb993",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
"source": [
|
||||||
|
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -118,7 +120,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -257,7 +259,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.4"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -203,7 +203,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -327,7 +327,7 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "langchain_ibm",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
@ -341,9 +341,9 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.12"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -286,7 +286,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.5"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -53,7 +53,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "c84fb993",
|
"id": "c84fb993",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
"source": [
|
||||||
|
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -117,7 +119,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -256,7 +258,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.4"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -128,7 +128,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -277,7 +277,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.16"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -112,7 +112,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -249,7 +249,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.12.3"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
"id": "36521c2a",
|
"id": "36521c2a",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
@ -44,15 +45,14 @@
|
|||||||
"start_time": "2025-03-20T01:53:27.764291Z"
|
"start_time": "2025-03-20T01:53:27.764291Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import getpass\n",
|
"import getpass\n",
|
||||||
"import os\n",
|
"import os\n",
|
||||||
"\n",
|
"\n",
|
||||||
"if not os.getenv(\"NETMIND_API_KEY\"):\n",
|
"if not os.getenv(\"NETMIND_API_KEY\"):\n",
|
||||||
" os.environ[\"NETMIND_API_KEY\"] = getpass.getpass(\"Enter your Netmind API key: \")"
|
" os.environ[\"NETMIND_API_KEY\"] = getpass.getpass(\"Enter your Netmind API key: \")"
|
||||||
],
|
]
|
||||||
"outputs": [],
|
|
||||||
"execution_count": 1
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -64,6 +64,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
"id": "39a4953b",
|
"id": "39a4953b",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
@ -71,12 +72,11 @@
|
|||||||
"start_time": "2025-03-20T01:53:32.141858Z"
|
"start_time": "2025-03-20T01:53:32.141858Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||||
],
|
]
|
||||||
"outputs": [],
|
|
||||||
"execution_count": 2
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -90,6 +90,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
"id": "64853226",
|
"id": "64853226",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
@ -97,22 +98,21 @@
|
|||||||
"start_time": "2025-03-20T01:53:36.171640Z"
|
"start_time": "2025-03-20T01:53:36.171640Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"source": [
|
|
||||||
"%pip install -qU langchain-netmind"
|
|
||||||
],
|
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"\r\n",
|
"\r\n",
|
||||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m24.0\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m25.0.1\u001B[0m\r\n",
|
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m25.0.1\u001b[0m\r\n",
|
||||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\r\n",
|
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\r\n",
|
||||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"execution_count": 3
|
"source": [
|
||||||
|
"%pip install -qU langchain-netmind"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -126,6 +126,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
"id": "9ea7a09b",
|
"id": "9ea7a09b",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
@ -133,15 +134,14 @@
|
|||||||
"start_time": "2025-03-20T01:54:30.146876Z"
|
"start_time": "2025-03-20T01:54:30.146876Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from langchain_netmind import NetmindEmbeddings\n",
|
"from langchain_netmind import NetmindEmbeddings\n",
|
||||||
"\n",
|
"\n",
|
||||||
"embeddings = NetmindEmbeddings(\n",
|
"embeddings = NetmindEmbeddings(\n",
|
||||||
" model=\"nvidia/NV-Embed-v2\",\n",
|
" model=\"nvidia/NV-Embed-v2\",\n",
|
||||||
")"
|
")"
|
||||||
],
|
]
|
||||||
"outputs": [],
|
|
||||||
"execution_count": 4
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -150,13 +150,14 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
"id": "d817716b",
|
"id": "d817716b",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
@ -164,6 +165,18 @@
|
|||||||
"start_time": "2025-03-20T01:54:34.500805Z"
|
"start_time": "2025-03-20T01:54:34.500805Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"# Create a vector store with a sample text\n",
|
"# Create a vector store with a sample text\n",
|
||||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||||
@ -183,21 +196,8 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# show the retrieved document's content\n",
|
"# show the retrieved document's content\n",
|
||||||
"retrieved_documents[0].page_content"
|
"retrieved_documents[0].page_content"
|
||||||
],
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 5,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"execution_count": 5
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "e02b9855",
|
"id": "e02b9855",
|
||||||
@ -216,6 +216,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
"id": "0d2befcd",
|
"id": "0d2befcd",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
@ -223,10 +224,6 @@
|
|||||||
"start_time": "2025-03-20T01:54:45.196528Z"
|
"start_time": "2025-03-20T01:54:45.196528Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"source": [
|
|
||||||
"single_vector = embeddings.embed_query(text)\n",
|
|
||||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
|
||||||
],
|
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
@ -236,7 +233,10 @@
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"execution_count": 6
|
"source": [
|
||||||
|
"single_vector = embeddings.embed_query(text)\n",
|
||||||
|
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -250,6 +250,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
"id": "2f4d6e97",
|
"id": "2f4d6e97",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"ExecuteTime": {
|
"ExecuteTime": {
|
||||||
@ -257,14 +258,6 @@
|
|||||||
"start_time": "2025-03-20T01:54:52.468719Z"
|
"start_time": "2025-03-20T01:54:52.468719Z"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"source": [
|
|
||||||
"text2 = (\n",
|
|
||||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
|
||||||
")\n",
|
|
||||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
|
||||||
"for vector in two_vectors:\n",
|
|
||||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
|
||||||
],
|
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
@ -275,7 +268,14 @@
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"execution_count": 7
|
"source": [
|
||||||
|
"text2 = (\n",
|
||||||
|
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||||
|
")\n",
|
||||||
|
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||||
|
"for vector in two_vectors:\n",
|
||||||
|
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -291,12 +291,12 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"metadata": {},
|
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"outputs": [],
|
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"source": "",
|
"id": "adb9e45c34733299",
|
||||||
"id": "adb9e45c34733299"
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@ -315,7 +315,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.5"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -53,7 +53,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "c84fb993",
|
"id": "c84fb993",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
"source": [
|
||||||
|
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -138,7 +140,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
|
@ -55,7 +55,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "c84fb993",
|
"id": "c84fb993",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
"source": [
|
||||||
|
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -123,7 +125,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -262,7 +264,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.4"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -133,7 +133,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -244,7 +244,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.5"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -141,7 +141,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -252,7 +252,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.5"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -53,7 +53,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "c84fb993",
|
"id": "c84fb993",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
"source": [
|
||||||
|
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -128,7 +130,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -267,7 +269,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.4"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -54,7 +54,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "c84fb993",
|
"id": "c84fb993",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
"source": [
|
||||||
|
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@ -130,7 +132,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Indexing and Retrieval\n",
|
"## Indexing and Retrieval\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||||
]
|
]
|
||||||
@ -269,7 +271,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.12.3"
|
"version": "3.9.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -123,6 +123,9 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
|
|||||||
Chain (LLMChain) that can be used to answer questions with citations.
|
Chain (LLMChain) that can be used to answer questions with citations.
|
||||||
"""
|
"""
|
||||||
output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer)
|
output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer)
|
||||||
|
if hasattr(QuestionAnswer, "model_json_schema"):
|
||||||
|
schema = QuestionAnswer.model_json_schema()
|
||||||
|
else:
|
||||||
schema = QuestionAnswer.schema()
|
schema = QuestionAnswer.schema()
|
||||||
function = {
|
function = {
|
||||||
"name": schema["title"],
|
"name": schema["title"],
|
||||||
|
@ -70,8 +70,11 @@ class JsonSchemaEvaluator(StringEvaluator):
|
|||||||
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
|
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
|
||||||
if isinstance(node, str):
|
if isinstance(node, str):
|
||||||
return parse_json_markdown(node)
|
return parse_json_markdown(node)
|
||||||
|
if hasattr(node, "model_json_schema") and callable(node.model_json_schema):
|
||||||
|
# Pydantic v2 model
|
||||||
|
return node.model_json_schema()
|
||||||
if hasattr(node, "schema") and callable(node.schema):
|
if hasattr(node, "schema") and callable(node.schema):
|
||||||
# Pydantic model
|
# Pydantic v1 model
|
||||||
return node.schema()
|
return node.schema()
|
||||||
return node
|
return node
|
||||||
|
|
||||||
|
@ -43,7 +43,15 @@ class YamlOutputParser(BaseOutputParser[T]):
|
|||||||
|
|
||||||
def get_format_instructions(self) -> str:
|
def get_format_instructions(self) -> str:
|
||||||
# Copy schema to avoid altering original Pydantic schema.
|
# Copy schema to avoid altering original Pydantic schema.
|
||||||
|
if hasattr(self.pydantic_object, "model_json_schema"):
|
||||||
|
# Pydantic v2
|
||||||
|
schema = dict(self.pydantic_object.model_json_schema().items())
|
||||||
|
elif hasattr(self.pydantic_object, "schema"):
|
||||||
|
# Pydantic v1
|
||||||
schema = dict(self.pydantic_object.schema().items())
|
schema = dict(self.pydantic_object.schema().items())
|
||||||
|
else:
|
||||||
|
msg = "Pydantic object must have either model_json_schema or schema method"
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
# Remove extraneous fields.
|
# Remove extraneous fields.
|
||||||
reduced_schema = schema
|
reduced_schema = schema
|
||||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user