mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-07 09:40:07 +00:00
Compare commits
82 Commits
langchain-
...
sr/fix-cod
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
76d3aa08f3 | ||
|
|
e09abf8170 | ||
|
|
9164e6f906 | ||
|
|
cc4f5269b1 | ||
|
|
8878a7b143 | ||
|
|
7cdd53390d | ||
|
|
6d71b6b6ee | ||
|
|
b6f74bff40 | ||
|
|
c7e82ad95d | ||
|
|
8a0782c46c | ||
|
|
8868701c16 | ||
|
|
ee83993b91 | ||
|
|
e6191d58e7 | ||
|
|
40bb7d00fc | ||
|
|
b1cc972567 | ||
|
|
dcf5c7b472 | ||
|
|
af2188b848 | ||
|
|
ba38997c7a | ||
|
|
643741497a | ||
|
|
b268ab6a28 | ||
|
|
4ee6112161 | ||
|
|
9de4f22205 | ||
|
|
6105a5841b | ||
|
|
cf5a442e4c | ||
|
|
5015188530 | ||
|
|
26030abb70 | ||
|
|
5271fd76f1 | ||
|
|
39a8a1121a | ||
|
|
4fe490c0ea | ||
|
|
2c9859956a | ||
|
|
9d4d258162 | ||
|
|
22e6d90937 | ||
|
|
7ff405077d | ||
|
|
2842e0c8c1 | ||
|
|
5d0bea8378 | ||
|
|
7e046ea848 | ||
|
|
29e17fbd6b | ||
|
|
e2a0ff07fd | ||
|
|
19544ba3c9 | ||
|
|
0cadf4fc9a | ||
|
|
a79998800c | ||
|
|
da97013f96 | ||
|
|
6409498f6c | ||
|
|
3044bd37a9 | ||
|
|
c1c3e13a54 | ||
|
|
b610859633 | ||
|
|
bc1b5ffc91 | ||
|
|
bb7c190d2c | ||
|
|
0b5c06e89f | ||
|
|
c4c39c1ae6 | ||
|
|
cc1e53008f | ||
|
|
7702691baf | ||
|
|
e979cd106a | ||
|
|
1682b59f92 | ||
|
|
d4c84acc39 | ||
|
|
b9357d456e | ||
|
|
532e6455e9 | ||
|
|
52e57cdc20 | ||
|
|
cecfec5efa | ||
|
|
50f998a138 | ||
|
|
f345ae5a1d | ||
|
|
01fcdff118 | ||
|
|
5839801897 | ||
|
|
0c10ff6418 | ||
|
|
bb625081c8 | ||
|
|
ddc850ca72 | ||
|
|
50f9354d31 | ||
|
|
446a9d5647 | ||
|
|
d10fd02bb3 | ||
|
|
4071670f56 | ||
|
|
40d6d4c738 | ||
|
|
42eb356a44 | ||
|
|
40bd71caa5 | ||
|
|
1935e4526a | ||
|
|
323850fae1 | ||
|
|
eadbb9077e | ||
|
|
b0f100af7e | ||
|
|
5b165effcd | ||
|
|
e455fab5d3 | ||
|
|
b21526fe38 | ||
|
|
9ce974247c | ||
|
|
16e5a12806 |
4
.github/workflows/codspeed.yml
vendored
4
.github/workflows/codspeed.yml
vendored
@@ -55,8 +55,8 @@ jobs:
|
||||
run: |
|
||||
cd ${{ matrix.working-directory }}
|
||||
if [ "${{ matrix.working-directory }}" = "libs/core" ]; then
|
||||
uv run --no-sync pytest ./tests/benchmarks --codspeed
|
||||
uv run --no-sync pytest ./tests/benchmarks --codspeed --codspeed-max-time=5
|
||||
else
|
||||
uv run --no-sync pytest ./tests/ --codspeed
|
||||
uv run --no-sync pytest ./tests/ --codspeed --codspeed-max-time=5
|
||||
fi
|
||||
mode: ${{ matrix.mode || 'instrumentation' }}
|
||||
|
||||
@@ -185,7 +185,7 @@
|
||||
" )\n",
|
||||
" # Text summary chain\n",
|
||||
" model = VertexAI(\n",
|
||||
" temperature=0, model_name=\"gemini-pro\", max_tokens=1024\n",
|
||||
" temperature=0, model_name=\"gemini-2.0-flash-lite-001\", max_tokens=1024\n",
|
||||
" ).with_fallbacks([empty_response])\n",
|
||||
" summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n",
|
||||
"\n",
|
||||
@@ -254,7 +254,7 @@
|
||||
"\n",
|
||||
"def image_summarize(img_base64, prompt):\n",
|
||||
" \"\"\"Make image summary\"\"\"\n",
|
||||
" model = ChatVertexAI(model=\"gemini-pro-vision\", max_tokens=1024)\n",
|
||||
" model = ChatVertexAI(model=\"gemini-2.0-flash\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
" msg = model.invoke(\n",
|
||||
" [\n",
|
||||
@@ -394,7 +394,7 @@
|
||||
"# The vectorstore to use to index the summaries\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_cj_blog\",\n",
|
||||
" embedding_function=VertexAIEmbeddings(model_name=\"textembedding-gecko@latest\"),\n",
|
||||
" embedding_function=VertexAIEmbeddings(model_name=\"text-embedding-005\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create retriever\n",
|
||||
@@ -553,7 +553,7 @@
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" # Multi-modal LLM\n",
|
||||
" model = ChatVertexAI(temperature=0, model_name=\"gemini-pro-vision\", max_tokens=1024)\n",
|
||||
" model = ChatVertexAI(temperature=0, model_name=\"gemini-2.0-flash\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
" # RAG pipeline\n",
|
||||
" chain = (\n",
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
" * [Oracle Blockchain](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_blockchain_table.html#GUID-B469E277-978E-4378-A8C1-26D3FF96C9A6)\n",
|
||||
" * [JSON](https://docs.oracle.com/en/database/oracle/oracle-database/23/adjsn/json-in-oracle-database.html)\n",
|
||||
"\n",
|
||||
"This guide demonstrates how Oracle AI Vector Search can be used with Langchain to serve an end-to-end RAG pipeline. This guide goes through examples of:\n",
|
||||
"This guide demonstrates how Oracle AI Vector Search can be used with LangChain to serve an end-to-end RAG pipeline. This guide goes through examples of:\n",
|
||||
"\n",
|
||||
" * Loading the documents from various sources using OracleDocLoader\n",
|
||||
" * Summarizing them within/outside the database using OracleSummary\n",
|
||||
@@ -47,7 +47,19 @@
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"Please install Oracle Python Client driver to use Langchain with Oracle AI Vector Search. "
|
||||
"Please install the Oracle Database [python-oracledb driver](https://pypi.org/project/oracledb/) to use LangChain with Oracle AI Vector Search:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ python -m pip install --upgrade oracledb\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Demo User\n",
|
||||
"First, connect as a privileged user to create a demo user with all the required privileges. Change the credentials for your environment. Also set the DEMO_PY_DIR path to a directory on the database host where your model file is located:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -56,65 +68,30 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pip install oracledb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Demo User\n",
|
||||
"First, create a demo user with all the required privileges. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connection successful!\n",
|
||||
"User setup done!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# Update with your username, password, hostname, and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"# Please update with your SYSTEM (or privileged user) username, password, and database connection string\n",
|
||||
"username = \"SYSTEM\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
"with oracledb.connect(user=username, password=password, dsn=dsn) as connection:\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
" try:\n",
|
||||
" with connection.cursor() as cursor:\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" begin\n",
|
||||
" -- Drop user\n",
|
||||
" begin\n",
|
||||
" execute immediate 'drop user testuser cascade';\n",
|
||||
" exception\n",
|
||||
" when others then\n",
|
||||
" dbms_output.put_line('Error dropping user: ' || SQLERRM);\n",
|
||||
" end;\n",
|
||||
" \n",
|
||||
" execute immediate 'drop user if exists testuser cascade';\n",
|
||||
"\n",
|
||||
" -- Create user and grant privileges\n",
|
||||
" execute immediate 'create user testuser identified by testuser';\n",
|
||||
" execute immediate 'grant connect, unlimited tablespace, create credential, create procedure, create any index to testuser';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/scratch/hroy/view_storage/hroy_devstorage/demo/orachain''';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/home/yourname/demo/orachain''';\n",
|
||||
" execute immediate 'grant read, write on directory DEMO_PY_DIR to public';\n",
|
||||
" execute immediate 'grant create mining model to testuser';\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" -- Network access\n",
|
||||
" begin\n",
|
||||
" DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(\n",
|
||||
@@ -127,15 +104,7 @@
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" print(\"User setup done!\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"User setup failed with error: {e}\")\n",
|
||||
" finally:\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(f\"Connection failed with error: {e}\")\n",
|
||||
" sys.exit(1)"
|
||||
" print(\"User setup done!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -143,13 +112,13 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Process Documents using Oracle AI\n",
|
||||
"Consider the following scenario: users possess documents stored either in an Oracle Database or a file system and intend to utilize this data with Oracle AI Vector Search powered by Langchain.\n",
|
||||
"Consider the following scenario: users possess documents stored either in an Oracle Database or a file system and intend to utilize this data with Oracle AI Vector Search powered by LangChain.\n",
|
||||
"\n",
|
||||
"To prepare the documents for analysis, a comprehensive preprocessing workflow is necessary. Initially, the documents must be retrieved, summarized (if required), and chunked as needed. Subsequent steps involve generating embeddings for these chunks and integrating them into the Oracle AI Vector Store. Users can then conduct semantic searches on this data.\n",
|
||||
"\n",
|
||||
"The Oracle AI Vector Search Langchain library encompasses a suite of document processing tools that facilitate document loading, chunking, summary generation, and embedding creation.\n",
|
||||
"The Oracle AI Vector Search LangChain library encompasses a suite of document processing tools that facilitate document loading, chunking, summary generation, and embedding creation.\n",
|
||||
"\n",
|
||||
"In the sections that follow, we will detail the utilization of Oracle AI Langchain APIs to effectively implement each of these processes."
|
||||
"In the sections that follow, we will detail the utilization of Oracle AI LangChain APIs to effectively implement each of these processes."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -157,38 +126,24 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Demo User\n",
|
||||
"The following sample code will show how to connect to Oracle Database. By default, python-oracledb runs in a ‘Thin’ mode which connects directly to Oracle Database. This mode does not need Oracle Client libraries. However, some additional functionality is available when python-oracledb uses them. Python-oracledb is said to be in ‘Thick’ mode when Oracle Client libraries are used. Both modes have comprehensive functionality supporting the Python Database API v2.0 Specification. See the following [guide](https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_a.html#featuresummary) that talks about features supported in each mode. You might want to switch to thick-mode if you are unable to use thin-mode."
|
||||
"The following sample code shows how to connect to Oracle Database using the python-oracledb driver. By default, python-oracledb runs in a ‘Thin’ mode which connects directly to Oracle Database. This mode does not need Oracle Client libraries. However, some additional functionality is available when python-oracledb uses them. Python-oracledb is said to be in ‘Thick’ mode when Oracle Client libraries are used. Both modes have comprehensive functionality supporting the Python Database API v2.0 Specification. See the following [guide](https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_a.html#featuresummary) that talks about features supported in each mode. You can switch to Thick mode if you are unable to use Thin mode."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connection successful!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# please update with your username, password, hostname and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"# please update with your username, password, and database connection string\n",
|
||||
"username = \"testuser\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Connection failed!\")\n",
|
||||
" sys.exit(1)"
|
||||
"connection = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
"print(\"Connection successful!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -201,22 +156,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Table created and populated.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
"\n",
|
||||
" drop_table_sql = \"\"\"drop table demo_tab\"\"\"\n",
|
||||
"with connection.cursor() as cursor:\n",
|
||||
" drop_table_sql = \"\"\"drop table if exists demo_tab\"\"\"\n",
|
||||
" cursor.execute(drop_table_sql)\n",
|
||||
"\n",
|
||||
" create_table_sql = \"\"\"create table demo_tab (id number, data clob)\"\"\"\n",
|
||||
@@ -239,15 +184,9 @@
|
||||
" ]\n",
|
||||
" cursor.executemany(insert_row_sql, rows_to_insert)\n",
|
||||
"\n",
|
||||
" conn.commit()\n",
|
||||
"connection.commit()\n",
|
||||
"\n",
|
||||
" print(\"Table created and populated.\")\n",
|
||||
" cursor.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Table creation failed.\")\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
" sys.exit(1)"
|
||||
"print(\"Table created and populated.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -261,30 +200,22 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load ONNX Model\n",
|
||||
"### Load the ONNX Model\n",
|
||||
"\n",
|
||||
"Oracle accommodates a variety of embedding providers, enabling users to choose between proprietary database solutions and third-party services such as OCIGENAI and HuggingFace. This selection dictates the methodology for generating and managing embeddings.\n",
|
||||
"Oracle accommodates a variety of embedding providers, enabling you to choose between proprietary database solutions and third-party services such as Oracle Generative AI Service and HuggingFace. This selection dictates the methodology for generating and managing embeddings.\n",
|
||||
"\n",
|
||||
"***Important*** : Should users opt for the database option, they must upload an ONNX model into the Oracle Database. Conversely, if a third-party provider is selected for embedding generation, uploading an ONNX model to Oracle Database is not required.\n",
|
||||
"***Important*** : Should you opt for the database option, you must upload an ONNX model into the Oracle Database. Conversely, if a third-party provider is selected for embedding generation, uploading an ONNX model to Oracle Database is not required.\n",
|
||||
"\n",
|
||||
"A significant advantage of utilizing an ONNX model directly within Oracle is the enhanced security and performance it offers by eliminating the need to transmit data to external parties. Additionally, this method avoids the latency typically associated with network or REST API calls.\n",
|
||||
"A significant advantage of utilizing an ONNX model directly within Oracle Database is the enhanced security and performance it offers by eliminating the need to transmit data to external parties. Additionally, this method avoids the latency typically associated with network or REST API calls.\n",
|
||||
"\n",
|
||||
"Below is the example code to upload an ONNX model into Oracle Database:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ONNX model loaded.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.oracleai import OracleEmbeddings\n",
|
||||
"\n",
|
||||
@@ -294,12 +225,8 @@
|
||||
"onnx_file = \"tinybert.onnx\"\n",
|
||||
"model_name = \"demo_model\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" OracleEmbeddings.load_onnx_model(conn, onnx_dir, onnx_file, model_name)\n",
|
||||
" print(\"ONNX model loaded.\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"ONNX model loading failed!\")\n",
|
||||
" sys.exit(1)"
|
||||
"OracleEmbeddings.load_onnx_model(connection, onnx_dir, onnx_file, model_name)\n",
|
||||
"print(\"ONNX model loaded.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -321,8 +248,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
"with connection.cursor() as cursor:\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" declare\n",
|
||||
@@ -349,12 +275,7 @@
|
||||
" params => json(jo.to_string));\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" cursor.close()\n",
|
||||
" print(\"Credentials created.\")\n",
|
||||
"except Exception as ex:\n",
|
||||
" cursor.close()\n",
|
||||
" raise"
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -362,33 +283,24 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Documents\n",
|
||||
"Users have the flexibility to load documents from either the Oracle Database, a file system, or both, by appropriately configuring the loader parameters. For comprehensive details on these parameters, please consult the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-73397E89-92FB-48ED-94BB-1AD960C4EA1F).\n",
|
||||
"You have the flexibility to load documents from either the Oracle Database, a file system, or both, by appropriately configuring the loader parameters. For comprehensive details on these parameters, please consult the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-73397E89-92FB-48ED-94BB-1AD960C4EA1F).\n",
|
||||
"\n",
|
||||
"A significant advantage of utilizing OracleDocLoader is its capability to process over 150 distinct file formats, eliminating the need for multiple loaders for different document types. For a complete list of the supported formats, please refer to the [Oracle Text Supported Document Formats](https://docs.oracle.com/en/database/oracle/oracle-database/23/ccref/oracle-text-supported-document-formats.html).\n",
|
||||
"\n",
|
||||
"Below is a sample code snippet that demonstrates how to use OracleDocLoader"
|
||||
"Below is a sample code snippet that demonstrates how to use OracleDocLoader:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of docs loaded: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.oracleai import OracleDocLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# loading from Oracle Database table\n",
|
||||
"# make sure you have the table with this specification\n",
|
||||
"loader_params = {}\n",
|
||||
"loader_params = {\n",
|
||||
" \"owner\": \"testuser\",\n",
|
||||
" \"tablename\": \"demo_tab\",\n",
|
||||
@@ -396,7 +308,7 @@
|
||||
"}\n",
|
||||
"\n",
|
||||
"\"\"\" load the docs \"\"\"\n",
|
||||
"loader = OracleDocLoader(conn=conn, params=loader_params)\n",
|
||||
"loader = OracleDocLoader(conn=connection, params=loader_params)\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
@@ -409,23 +321,23 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Summary\n",
|
||||
"Now that the user loaded the documents, they may want to generate a summary for each document. The Oracle AI Vector Search Langchain library offers a suite of APIs designed for document summarization. It supports multiple summarization providers such as Database, OCIGENAI, HuggingFace, among others, allowing users to select the provider that best meets their needs. To utilize these capabilities, users must configure the summary parameters as specified. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-EC9DDB58-6A15-4B36-BA66-ECBA20D2CE57)."
|
||||
"Now that you have loaded the documents, you may want to generate a summary for each document. The Oracle AI Vector Search LangChain library offers a suite of APIs designed for document summarization. It supports multiple summarization providers such as Database, Oracle Generative AI Service, HuggingFace, among others, allowing you to select the provider that best meets their needs. To utilize these capabilities, you must configure the summary parameters as specified. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-EC9DDB58-6A15-4B36-BA66-ECBA20D2CE57)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** The users may need to set proxy if they want to use some 3rd party summary generation providers other than Oracle's in-house and default provider: 'database'. If you don't have proxy, please remove the proxy parameter when you instantiate the OracleSummary."
|
||||
"***Note:*** You may need to set proxy if you want to use some 3rd party summary generation providers other than Oracle's in-house and default provider: 'database'. If you don't have proxy, please remove the proxy parameter when you instantiate the OracleSummary."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# proxy to be used when we instantiate summary and embedder object\n",
|
||||
"# proxy to be used when we instantiate summary and embedder objects\n",
|
||||
"proxy = \"\""
|
||||
]
|
||||
},
|
||||
@@ -433,22 +345,14 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following sample code will show how to generate summary:"
|
||||
"The following sample code shows how to generate a summary:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of Summaries: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.utilities.oracleai import OracleSummary\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
@@ -463,7 +367,7 @@
|
||||
"\n",
|
||||
"# get the summary instance\n",
|
||||
"# Remove proxy if not required\n",
|
||||
"summ = OracleSummary(conn=conn, params=summary_params, proxy=proxy)\n",
|
||||
"summ = OracleSummary(conn=connection, params=summary_params, proxy=proxy)\n",
|
||||
"\n",
|
||||
"list_summary = []\n",
|
||||
"for doc in docs:\n",
|
||||
@@ -487,17 +391,9 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of Chunks: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.oracleai import OracleTextSplitter\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
@@ -506,7 +402,7 @@
|
||||
"splitter_params = {\"normalize\": \"all\"}\n",
|
||||
"\n",
|
||||
"\"\"\" get the splitter instance \"\"\"\n",
|
||||
"splitter = OracleTextSplitter(conn=conn, params=splitter_params)\n",
|
||||
"splitter = OracleTextSplitter(conn=connection, params=splitter_params)\n",
|
||||
"\n",
|
||||
"list_chunks = []\n",
|
||||
"for doc in docs:\n",
|
||||
@@ -523,19 +419,19 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Embeddings\n",
|
||||
"Now that the documents are chunked as per requirements, the users may want to generate embeddings for these chunks. Oracle AI Vector Search provides multiple methods for generating embeddings, utilizing either locally hosted ONNX models or third-party APIs. For comprehensive instructions on configuring these alternatives, please refer to the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-C6439E94-4E86-4ECD-954E-4B73D53579DE)."
|
||||
"Now that the documents are chunked as per requirements, you may want to generate embeddings for these chunks. Oracle AI Vector Search provides multiple methods for generating embeddings, utilizing either locally hosted ONNX models or third-party APIs. For comprehensive instructions on configuring these alternatives, please refer to the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-C6439E94-4E86-4ECD-954E-4B73D53579DE)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** Users may need to configure a proxy to utilize third-party embedding generation providers, excluding the 'database' provider that utilizes an ONNX model."
|
||||
"***Note:*** You may need to configure a proxy to utilize third-party embedding generation providers, excluding the 'database' provider that utilizes an ONNX model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -547,22 +443,14 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following sample code will show how to generate embeddings:"
|
||||
"The following sample code shows how to generate embeddings:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of embeddings: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.oracleai import OracleEmbeddings\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
@@ -572,7 +460,7 @@
|
||||
"\n",
|
||||
"# get the embedding instance\n",
|
||||
"# Remove proxy if not required\n",
|
||||
"embedder = OracleEmbeddings(conn=conn, params=embedder_params, proxy=proxy)\n",
|
||||
"embedder = OracleEmbeddings(conn=connection, params=embedder_params, proxy=proxy)\n",
|
||||
"\n",
|
||||
"embeddings = []\n",
|
||||
"for doc in docs:\n",
|
||||
@@ -591,19 +479,19 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Oracle AI Vector Store\n",
|
||||
"Now that you know how to use Oracle AI Langchain library APIs individually to process the documents, let us show how to integrate with Oracle AI Vector Store to facilitate the semantic searches."
|
||||
"Now that you know how to use Oracle AI LangChain library APIs individually to process the documents, let us show how to integrate with Oracle AI Vector Store to facilitate the semantic searches."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, let's import all the dependencies."
|
||||
"First, let's import all the dependencies:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 52,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -626,100 +514,80 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, let's combine all document processing stages together. Here is the sample code below:"
|
||||
"Next, let's combine all document processing stages together. Here is the sample code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connection successful!\n",
|
||||
"ONNX model loaded.\n",
|
||||
"Number of total chunks with metadata: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\"\"\"\n",
|
||||
"In this sample example, we will use 'database' provider for both summary and embeddings.\n",
|
||||
"So, we don't need to do the followings:\n",
|
||||
"In this sample example, we will use 'database' provider for both summary and embeddings\n",
|
||||
"so, we don't need to do the following:\n",
|
||||
" - set proxy for 3rd party providers\n",
|
||||
" - create credential for 3rd party providers\n",
|
||||
"\n",
|
||||
"If you choose to use 3rd party provider, \n",
|
||||
"please follow the necessary steps for proxy and credential.\n",
|
||||
"If you choose to use 3rd party provider, please follow the necessary steps for proxy and credential.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"# oracle connection\n",
|
||||
"# please update with your username, password, hostname, and service_name\n",
|
||||
"# please update with your username, password, and database connection string\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
"with oracledb.connect(user=username, password=password, dsn=dsn) as connection:\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Connection failed!\")\n",
|
||||
" sys.exit(1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# load onnx model\n",
|
||||
"# please update with your related information\n",
|
||||
"onnx_dir = \"DEMO_PY_DIR\"\n",
|
||||
"onnx_file = \"tinybert.onnx\"\n",
|
||||
"model_name = \"demo_model\"\n",
|
||||
"try:\n",
|
||||
" OracleEmbeddings.load_onnx_model(conn, onnx_dir, onnx_file, model_name)\n",
|
||||
" # load onnx model\n",
|
||||
" # please update with your related information\n",
|
||||
" onnx_dir = \"DEMO_PY_DIR\"\n",
|
||||
" onnx_file = \"tinybert.onnx\"\n",
|
||||
" model_name = \"demo_model\"\n",
|
||||
" OracleEmbeddings.load_onnx_model(connection, onnx_dir, onnx_file, model_name)\n",
|
||||
" print(\"ONNX model loaded.\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"ONNX model loading failed!\")\n",
|
||||
" sys.exit(1)\n",
|
||||
"\n",
|
||||
" # params\n",
|
||||
" # please update necessary fields with related information\n",
|
||||
" loader_params = {\n",
|
||||
" \"owner\": \"testuser\",\n",
|
||||
" \"tablename\": \"demo_tab\",\n",
|
||||
" \"colname\": \"data\",\n",
|
||||
" }\n",
|
||||
" summary_params = {\n",
|
||||
" \"provider\": \"database\",\n",
|
||||
" \"glevel\": \"S\",\n",
|
||||
" \"numParagraphs\": 1,\n",
|
||||
" \"language\": \"english\",\n",
|
||||
" }\n",
|
||||
" splitter_params = {\"normalize\": \"all\"}\n",
|
||||
" embedder_params = {\"provider\": \"database\", \"model\": \"demo_model\"}\n",
|
||||
"\n",
|
||||
"# params\n",
|
||||
"# please update necessary fields with related information\n",
|
||||
"loader_params = {\n",
|
||||
" \"owner\": \"testuser\",\n",
|
||||
" \"tablename\": \"demo_tab\",\n",
|
||||
" \"colname\": \"data\",\n",
|
||||
"}\n",
|
||||
"summary_params = {\n",
|
||||
" \"provider\": \"database\",\n",
|
||||
" \"glevel\": \"S\",\n",
|
||||
" \"numParagraphs\": 1,\n",
|
||||
" \"language\": \"english\",\n",
|
||||
"}\n",
|
||||
"splitter_params = {\"normalize\": \"all\"}\n",
|
||||
"embedder_params = {\"provider\": \"database\", \"model\": \"demo_model\"}\n",
|
||||
" # instantiate loader, summary, splitter, and embedder\n",
|
||||
" loader = OracleDocLoader(conn=connection, params=loader_params)\n",
|
||||
" summary = OracleSummary(conn=connection, params=summary_params)\n",
|
||||
" splitter = OracleTextSplitter(conn=connection, params=splitter_params)\n",
|
||||
" embedder = OracleEmbeddings(conn=connection, params=embedder_params)\n",
|
||||
"\n",
|
||||
"# instantiate loader, summary, splitter, and embedder\n",
|
||||
"loader = OracleDocLoader(conn=conn, params=loader_params)\n",
|
||||
"summary = OracleSummary(conn=conn, params=summary_params)\n",
|
||||
"splitter = OracleTextSplitter(conn=conn, params=splitter_params)\n",
|
||||
"embedder = OracleEmbeddings(conn=conn, params=embedder_params)\n",
|
||||
" # process the documents\n",
|
||||
" chunks_with_mdata = []\n",
|
||||
" for id, doc in enumerate(docs, start=1):\n",
|
||||
" summ = summary.get_summary(doc.page_content)\n",
|
||||
" chunks = splitter.split_text(doc.page_content)\n",
|
||||
" for ic, chunk in enumerate(chunks, start=1):\n",
|
||||
" chunk_metadata = doc.metadata.copy()\n",
|
||||
" chunk_metadata[\"id\"] = (\n",
|
||||
" chunk_metadata[\"_oid\"] + \"$\" + str(id) + \"$\" + str(ic)\n",
|
||||
" )\n",
|
||||
" chunk_metadata[\"document_id\"] = str(id)\n",
|
||||
" chunk_metadata[\"document_summary\"] = str(summ[0])\n",
|
||||
" chunks_with_mdata.append(\n",
|
||||
" Document(page_content=str(chunk), metadata=chunk_metadata)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# process the documents\n",
|
||||
"chunks_with_mdata = []\n",
|
||||
"for id, doc in enumerate(docs, start=1):\n",
|
||||
" summ = summary.get_summary(doc.page_content)\n",
|
||||
" chunks = splitter.split_text(doc.page_content)\n",
|
||||
" for ic, chunk in enumerate(chunks, start=1):\n",
|
||||
" chunk_metadata = doc.metadata.copy()\n",
|
||||
" chunk_metadata[\"id\"] = chunk_metadata[\"_oid\"] + \"$\" + str(id) + \"$\" + str(ic)\n",
|
||||
" chunk_metadata[\"document_id\"] = str(id)\n",
|
||||
" chunk_metadata[\"document_summary\"] = str(summ[0])\n",
|
||||
" chunks_with_mdata.append(\n",
|
||||
" Document(page_content=str(chunk), metadata=chunk_metadata)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of total chunks with metadata: {len(chunks_with_mdata)}\")"
|
||||
" \"\"\" verify \"\"\"\n",
|
||||
" print(f\"Number of total chunks with metadata: {len(chunks_with_mdata)}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -733,23 +601,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 55,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Vector Store Table: oravs\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# create Oracle AI Vector Store\n",
|
||||
"vectorstore = OracleVS.from_documents(\n",
|
||||
" chunks_with_mdata,\n",
|
||||
" embedder,\n",
|
||||
" client=conn,\n",
|
||||
" client=connection,\n",
|
||||
" table_name=\"oravs\",\n",
|
||||
" distance_strategy=DistanceStrategy.DOT_PRODUCT,\n",
|
||||
")\n",
|
||||
@@ -778,12 +638,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 56,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"oraclevs.create_index(\n",
|
||||
" conn, vectorstore, params={\"idx_name\": \"hnsw_oravs\", \"idx_type\": \"HNSW\"}\n",
|
||||
" connection, vectorstore, params={\"idx_name\": \"hnsw_oravs\", \"idx_type\": \"HNSW\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Index created.\")"
|
||||
@@ -793,7 +653,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This example demonstrates the creation of a default HNSW index on embeddings within the 'oravs' table. Users may adjust various parameters according to their specific needs. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/vecse/manage-different-categories-vector-indexes.html).\n",
|
||||
"This example demonstrates the creation of a default HNSW index on embeddings within the 'oravs' table. You may adjust various parameters according to your specific needs. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/vecse/manage-different-categories-vector-indexes.html).\n",
|
||||
"\n",
|
||||
"Additionally, various types of vector indices can be created to meet diverse requirements. More details can be found in our [comprehensive guide](https://python.langchain.com/v0.1/docs/integrations/vectorstores/oracle/).\n"
|
||||
]
|
||||
@@ -805,29 +665,16 @@
|
||||
"## Perform Semantic Search\n",
|
||||
"All set!\n",
|
||||
"\n",
|
||||
"We have successfully processed the documents and stored them in the vector store, followed by the creation of an index to enhance query performance. We are now prepared to proceed with semantic searches.\n",
|
||||
"You have successfully processed the documents and stored them in the vector store, followed by the creation of an index to enhance query performance. You are now prepared to proceed with semantic searches.\n",
|
||||
"\n",
|
||||
"Below is the sample code for this process:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='The database stores LOBs differently from other data types. Creating a LOB column implicitly creates a LOB segment and a LOB index. The tablespace containing the LOB segment and LOB index, which are always stored together, may be different from the tablespace containing the table. Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.', metadata={'_oid': '662f2f257677f3c2311a8ff999fd34e5', '_rowid': 'AAAR/xAAEAAAAAnAAC', 'id': '662f2f257677f3c2311a8ff999fd34e5$3$1', 'document_id': '3', 'document_summary': 'Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.\\n\\n'})]\n",
|
||||
"[]\n",
|
||||
"[(Document(page_content='The database stores LOBs differently from other data types. Creating a LOB column implicitly creates a LOB segment and a LOB index. The tablespace containing the LOB segment and LOB index, which are always stored together, may be different from the tablespace containing the table. Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.', metadata={'_oid': '662f2f257677f3c2311a8ff999fd34e5', '_rowid': 'AAAR/xAAEAAAAAnAAC', 'id': '662f2f257677f3c2311a8ff999fd34e5$3$1', 'document_id': '3', 'document_summary': 'Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.\\n\\n'}), 0.055675752460956573)]\n",
|
||||
"[]\n",
|
||||
"[Document(page_content='If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.', metadata={'_oid': '662f2f253acf96b33b430b88699490a2', '_rowid': 'AAAR/xAAEAAAAAnAAA', 'id': '662f2f253acf96b33b430b88699490a2$1$1', 'document_id': '1', 'document_summary': 'If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.\\n\\n'})]\n",
|
||||
"[Document(page_content='If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.', metadata={'_oid': '662f2f253acf96b33b430b88699490a2', '_rowid': 'AAAR/xAAEAAAAAnAAA', 'id': '662f2f253acf96b33b430b88699490a2$1$1', 'document_id': '1', 'document_summary': 'If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.\\n\\n'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What is Oracle AI Vector Store?\"\n",
|
||||
"filter = {\"document_id\": [\"1\"]}\n",
|
||||
@@ -872,7 +719,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.13.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1 +1 @@
|
||||
eNqFVW1sU1UYBrcfhgQlEhMlRg8NEBN229vbj63DKGNzOnRu0IqAWebpuaftZbf3XM85d1u3LIQh0YREcxMTEzXRSGmxzI0JBAySaBCjBH/rMEr4YdQoGn+YqInO99y1MGTB/mhOz/u8X8/7vKeT1WHKhcWc5VOWIynHRMIP4U9WOX3eo0K+UClSWWBmub8vnTnkcWtufUFKV7RHIti1wtiRBc5ci4QJK0aGo5EiFQLnqShnmVm6tPy38VARjw5KNkQdEWpHUd2It6BQAwU3z46HOLMpnEKeoDwEVsKgFEeqq4K1NjQxoDyYSW11Q2zsmVSLaYI5DpWaARF1w0gpR8mYXY/p4GIQU+Jhyy4NCoo5KQxyKjxbisE94KwcTCoIt1zVswJ3oAUcok7ecihiYClaY9REOcYRdOhyWoBGrGHagjAhHsdSnRwTSe4JCcB6hjB6WtCcZweOI+CDSsxDDgWEZOAgRihHAcGKboSzzJMI4nHoG9Fh+IYQPY4Lt6LAPNtEWYpwozxw5KWwasBSkEFBCrSIoYPxkAvToFxaAbfjoQAZnP7T6uJIqiSbsSHkuQGLJTegTkhuOfnQxATcKTVYnJqK3HrQgUVQlt1DiQTowES1QLEJmnqlXGBC+rM3qWQGiKOu1KhDmAkJ/PfyY5bbgkyas4HOGlFzDWTo14YodTVsA9+VBS//GHZd2yJY2SNqjFN1tWiqlpvNNSUqDbTmSP9UR6OOSH8JRO0gPRyLh41jo5qQ2HJsUKVmYyip4gb2M4sNLiZDEEerL4xfWXCeXoxhwj/ci0lf+oaQimn/MObFZPz44nvuOaAv6lc7+29OVzdeTxcLR6Ph1tkbAouSQ/zDOWwLOnuN5GsuNdiNmKYnNT063WDJBmnLgn8onmo9Alp1QX10fwVCSk9MlmEi9OJn1fp6vtP3RGOa3y67q9wF0/HPdnOrBRmtKE1dpHYPRZPterRdb0OP9WamOutpMksOYzbDQfo5GMijjeFXScFzhqhZ61xy7HOh622pZbNhH6VWf5tgWOqnX47ruj634ZZIDgtiOSpjOZZKpf4nLjBDpX9C9afpKc1ozSx0mYjvnkNLeS48cPV6KqoeqGjdLZDX62mg0S3RS9ejt+2u1YvWLNP/EM6DejS9Q+R6jQQpxLaYu2h/b7JrqMtJnBzViM08U5PwylMtEMSo9OdQnEQTRipptMbNeLYtnkglcjgWTyaTuqEn4/HsoWEL+7VoOIryjOVtOtPZrXVieHK0dCAbv9q166mO3p7OqZ3adpZlwF8GA88Oc2glTTnI0a8FqWHBOa2A+/aOXf6JNpIi2QSNJUjMMHI5om2BvWkI6JpAyup1CP5N9lUWXqRPvnjg4O3Lgk/Tky9/s/Xc5lUH3jovzu6TX6/45ezOI6OREP78jrW52OrY8edWlkZ6rI+PNl3+80rLD6tv6776+tjIuvvPXKEPP3Lnr6/NnBoYO3l5+sD+gQ2XtpfXbJo811Jq2nbymYO2e8/6bd2b3yA9masb7869f+HLf/rH55tf3PHVSyuvkA/61vxtf3e0+c0H86WP/vrx7Q3pWDqDm1dVP71w8fS9Wy8fOL2+OZza9OpPM8fe/v7dncRYtXFy70Ntj8/O/3x6ei/UPT/ftOz8H/et+B3O/wLqW+ZT
|
||||
eNrNWA2MHFUdL6CxEQI2aMVq9Lk5257d2bu93ftsmvS8a8u117v27oqlvXp5O/N29/Vm5w3z3uzetpSkKIQoDW4hIMW0Ktc7uZztVRoRoUmRQqoiKibilQIx2GgABZQqGCj+35vZ3dm9j0KCiZu93M7M+7//x+/3/5h383iWOJwy65JJagniYF3ABS/cPO6QG1zCxTfGMkSkmTG6qbd/4H7XodNfSgth87a6OmzTCLZE2mE21SM6y9Rlo3UZwjlOET6aYEb+zKLI7lAGjwwJNkwsHmqL1jfEw6HimlDb9t0hh5kk1BZyOXFC4ZDOwApLwI1r6RdDe3bAYmYQE651E7sG0WJao8aZZRGhmViAgSAkGDO9zSyckZsJnKVmfogT7OhpWEAt2xVDXE+TDA617Q7ZYDJxBJUm7A6Bn05e/jAI1x1qywjAJv1KGqmnSDBkMjaMXFvqy9tSCxcOtVKhPXJ/3QTjhgyWwdRSm2Ir35tUNlFBMurWDDH/BnYcnIfr0grLNU3lu0GS2DUhGtvVRdC6dmRSLhBLIl+pNNEhcnNdIM9zeQ3i8lFk0Bq0kP/ZwgkSacqRjR0IGMCOcmlitZVXRCNoIE2QBAWREdukOhVmHvmc4IhaSeZksLQFJR2WQdwmOk1SHeVIgoPHHC0nkVQkjAZDa6llIN2ksJwgAwvsSViY40iKZQdDtWW9DQG9GeCB5CLCFmJOClt0l6eQOQjIZkOEUY4CN13hq89DXMEx4oekyoSgzTghpeimNLPAVGVPu22bRBlTNqfLQgkm0kjHnPAwyjMXcdBnGsiQUctQiyh92JaMcqjy0IfDV759MFT2dIe0He5gqUtmDNyqBf8MAExUQVKBWJ8PZI6aJurt6b5eRoB4hksD/PATo6ReQxZDYDsEkjPX0YkvnCDIZ6sRKe/f6RENgQE9EBK0HIT9IBY5BWGrDUhI/pKR/w/a+3b4TKyg/ofMe1CGs4wCn6F8yOjMQfeZXCsmgJ7GVoqgBNyymPARBBpLxBUhLpYNUioHZVdyvZjflSkYTBZeShEojfOmhy1zAXbMUpLjRfvSUuR/kh4B2MqZUhGIDydZktSUOMsIYLj0+8s8mfP+skLZzmfJCY9+QwaxRboyIYjlZuBHKIE51aGLYCOLLZ0YoR0zO8o8OVGSr0yMDnAMWikv8h9Ad5ibSkN94ypoHldU5XQIsJ7TLJEPffsrs2TQ0zIYQsBjxGkGIFCdEFqmygYqDYU7VB8OIzAb01RawNocdgxQx3MwVQQiU7170XdQsNz3rFapkuaZZKSoK+zhg026CxASctCAe+XdHOyQYLKFJWdkTsNNTWEAxmAzzwFC+FqEVNa9YPOG/EyRqiLmo5CAAYNga35gktjkpBqWzmIigP6kR7hifRKuY8lgmgRiASRT6oGkDAIrkxYJMiICdawijP0yBRgacFzi+SvmK1tZyl2wrhwcT1dO5Wp5U05TlqwgYA5IEystMUKuZQCYAjgkIS8Vjf40y0FVgg2xQAkT68MoDVHi3pxk0mESBnQDFPDLDLQSF5ySJbyPWJjCKCi1YEdAdZmVMt0EZ2EBR2tlhKsIk2HQDgIMwGaJp+AqsENFEbDnbhJco5D8VfgLmiFDjqzKs+erAV0qHMoRMhyS0ygMvPA/Dyh+wMSVd6rp0U0zVPDAnFYqULabgE6XBtZLLkDFweUCLy1Oyso3a7jUeFAqkXOSozxeqf0QDMSUBdlQQtqbs1F7F+RPjitUoXkLqpvF2cnE8FxGaC4I1wJSpqxENrNdEzuSiRaFebyU02AeSjgMA9c8exQiPGjPoBd90A/SgwoC0Kc6icWBUwGIy2k1R/70KgR4G+wD+Moy1BBP10pvPTfQ8mZDXRZVLo/Vezc8tXDd1GjM7mxV26imm3R4dqaloCA72AR6yUDDvyS1ZHJ80A5R3qfqjcbvdaUmoQOyKQavNzKTGCzLqDo7R8y80u1vPqNyq0Qsle2uno7uLZ1dPetkIwI5v6A5Ga5KQ0Vd8Pjl8Qoc938rjB2iA5pwpWjskCRxVOMBzH07grk/Y9gbLEbQb2QZ7AwTAeZRKwtqZArABYGcYxnIK/lyojqIJwVtRzF+lm3lbdhTpZrc2GYmFYrH3GaO3FRp2yk7mus4kpEkK7VBfsMLN8Q4kQ/EQA0ewiE4A2lpUAzTe0/vgNd38zCy61imB/bfRTEf5kqrsgPMqAxBmWzANq9Xw5AB9PLec8tcYomdRBeSk9Uztk8PYqXkCFcmRrFDlwaIMMI6OAgAhtWQIRyXiwCDZLiSrqkEFYRyRpRdWE3SakxAqkd5lciblCsCFkFd3qTMLNXRgP/wgKMtfd3eXMMtatsKVDmzsiRQBB7480XREzUKwBJ/esvAUJAiEv5woNj4syL3XFE90pcPe7VUnjPAViaVZyZgCzY1KV0dBKivCgstAU0xEIwueQpRnJATEk8eOGKIQBLvGU8TWQH5CwsWjULeiMKxGYcsR0EZeKMRS2eyIxceTO2iNrhGkqYyYRcXxoQuj0jUS1thYpgQW8PS6gdHNC5fW2QpVqZDvAsPANWG1nVdt6ZnzNu6MCVHbhgDpHjdTs6sSb8raZI5Mx9PyN6iQbgsUXiovWhs3aY85L6F6iON8Uj9VFC1CeEujNnq+SPBBzZEDPbR/EOpwpgnfCS4hvHC4Y1Y7+2v2FIGsnAYO5mmeIWXjmtJRwvjHZtmqvMfltSNxyLRKHyPVezM85ZeOKzGu2MlKEoyEw31DTGtvkmrjz5UsTdQNa/pDFQUvl9/pBhBE5JKpAuj8Vhryw+BGTYQn3x9DOSEy28eBUzJU6fH/ROyH/RuKDPiM6OdgG/hxIALIEeb0XrXQqC7EUXjbQ3wbUbrNg5Mdvh6BmZF6tgAMJ1DimhrivQZ19OuBTSd6JiVMtNLyy5LiptyYtHUgZrmnepp6lZhLF4vP9Nfvuh6h8gklLqLMkvfhwxMNIXj0l8Z62jzgO9107bpZbNJA7NnmHi4RWlbcfH1ZRN9mWXvR2YOE5u3TYdmEy+O5751o/E5Q1FaWbZrNNba2nqRfee0KL5tGs0mWQWo53rNPCuDUHqr0byr5wRxwjdao0bhUfg9VB+NdmzuzNkN6/TrEls3p3uyuK8xmRp42DuN0oQksuy1GidQfKnIF6bDGTwiy9CqWLQx1gTWrCyedfW7iU7v9X4lgu5lwpz5cNnQ4IGJNGC6JtbaFGs0YgmNJJKGFm9tadZaWxuiWqKhocWIt0Sb40bT/VmKCxNQLlCKsZRJjupJTccw02peMhfGO6/vad/Y1TG5VetjCQaADGAAzoKZcKyfOFBAChO6yVwDCrdDxjrWan3t1xeOtzZGY6Ack5ZYU2ss0aqt+WrfVDGtS2k7Kqu+OmTfO+YNhU9csv8L31q4QH0ug7/33jM2b7jj96uvuLDiyXtOjG6583nn9Eff+ERi0cdqVl87uSneffbUL089/fTImX0TF3J3rV50250/eo385q3bzyb26S8f2PH2/vOvnzt/9VWvvXjTW//63Qt3Df7930enVt+3bNvAXftfWXj57UsWnzx86hffvWZxy84Nlwy/+di5u5seuu/dpQ8snB47syV37sjE1OqjiZpbzz/+9pKdbx7/47N9L638ePuhx9799cJVay7t/2ttd/3wCz9ftrh3w+UnVu4/e81P45suv/KZzoNXvfqnaH/DdXvvndyqhx7Ofs25SXvkTK+5quXeT+eN9StanGefO/DoLbtf77nQfWtn5D+fv3LJ3nPfu0G8csWnWn52+sDV3+l59eo/n3pm7/mPHHq+8PiN7n4RPn7L0ROxFnrl3w58dmTroZbbDp5897dm3ev/OHHSaGssDPY9tqTrzJPOJ9/ZWLPrqZqDU3d/7it786vW71l7d+12ve3bNZO/euTln6x+7cbefXccP3L2nRXTO94+d+eFv8Sfe/H0P398svON0weX6N/c+pLzzh+eWFx3zyEV+MsWTL1Yd3L9pQsW/BfdSCzc
|
||||
@@ -1 +1 @@
|
||||
eNqFVX1sU1UU3yBREIiJX4kY4VLY0GSve6/tuo8/MKVjfIxtyAYOcJbb927Xt76++3jvvm2lzAjjDwN+PWUQoghhXWeaMjYYERE1zkxZ+HCJBDIiQqKJMRElUYMxmnle18KQBftHc3vP75x7zu/8zumO3laiGzJV89OyyoiORQY/DGtHr062mMRgO5NRwsJUSqypq2/oNnV5rCDMmGZUFBdjTXZilYV1qsmiU6TR4lahOEoMAzcTIxGkUuxK/t9xRxS3BxiNENVwVCCBd3mKkCOHgptNcYdOFQInh2kQ3QFWkUIqKrOv2sKYGSgaQyqOkuccHU22M5WIYhtFBZsS4dycQVWVMM4FwXmXq9yOwShVsuFtVxvOcKusxAIGwboYDujEMBVmBFrA2XaQiCHqsmaXb4N9aAKHiNosqwRRsETlrURCIaojKFbTSRhqkltJEcKiaOqY2SdVQkw3DQbA7AtOtM4gIVPJOLaBD4pRE6kEEIyCg9FGdJTh2mYe4SA1GYJ4OlCASCt8Q4iVqga3RpiaioSCBOFceuCox5x2AbINCRhimEQxVBB3aNAYojM5Q3PckUFmTv8pdXIkOyWF0ggytQyLMS1DncF0WW12dHTAnS0MWSeSTW42aNMkKA22EJEBtKmjN0ywBPJ6MxGmBrMG7hHMUSCOaIwjqkgleMA60rxV1oqQREIK0JkS7b5mFGmlIoRoHFaA7+SEl9WPNU2RRWzbi+02prPC4exc7jWnbH1xIDuVWR/6cnkUr4mBvlXEO90ep6u/nTMYllUFBMopGFJKahn7x5MNGhYjEIfLzo6VnHDum4yhhtVTg8W6+rtC2kxbPViPej3HJ9/rpgr6Ilavf829z2WNd55zOwXBWTpwV2AjpopWTwgrBhm4TfJtlxTMhpvjvRwv9OVYUkDaLGx1l/BlH4BWNVAf6UxCSGYaOxLQEXLuTG92Ug/XVee6+V3eI4lK6I71SZUuFyFXKaonGrJnDwneCl6o8HjQ8pqGtD/7TMOUzRho0EH6IWjIslzze8WwqUaIlPJP2fYxx52y7GFTYB4Zl11T0Cz7p5Xw8Dw/VnhfpA4DIqv2iwl3eXn5/8QFZgizBu36OL6cc5U2TFRZ4tk4hqbynNh12XySdj6Q0aL7IO/kk0Oj+6Knzsfj2ZjKJs3JknUazgFecOvrlpWuEr2Vonc58QXXR6pWY6n2RDsnKtSUOAYLn3AZQbQzawy5giVBF1/mFjwCCXqDJWUlPC9K7qDXzQtC0OPqbpWxlRKcAmqmtFkhR/1VnB/DyuHqM7Kxeis31PpqVvrTjdxaGqTAXwMGnlWqkmQ90UGOVirzNAy4TpLgvta3wRosE8vFoFfkyyTscvOuEm4pzE1OQLcFkrC3Q+aPZXtyYiMN5y+Zv3tGXuYz/cW3qqun+2Z/9dmqM8NnBy/emHXr0tKH/yh85iEH+fb1G6P7pQMjg86ea03JP4fqr+8bX/jDzr7huf4ratXlby5vad9Y+1PfhqsPvrSkL33he27uxU+7Dx/e8/Pe0c2PBV6pGSk4kQ7zK6rnRZTT57k5X3YuOpfobGmoqVpndj7wFC5Ynz5Yd81aNX9425MlS26+W3le6dpGN885crNr6MK8Ga8W0LbruxY1mkOHpsX3Lvz97YX9czw7l00729/4xdewh0+9kx59fvXQJX91y4G6zYFZvxSMR45pgx0zbzniZ55eUTRv1+fCP3t9obpnH89r2tMZmfneE4FDC94PzC7sUh8dKcRp6+TV8BuvBXZX/rr/5IJ0eZzygZc3HRzdss/sPra4tvGF4x/5Rv7aVxP68bfFQNT4+PS8g+n+7afy8/L+BTWeI00=
|
||||
eNrNWAtwXFUZLoI8qqMCFoRx4Li2pYG9m928k0ofJmknJSQl2VpIA+nZe8/unubuPbf3nLvJpuJYXoIU6FZAi6AiaQKxA+kAVmxBFCLMWCsjpRCqwICIIr6t4CCt/zn37jOPwozOmGknueee//yP7/sf91w1miYOp8w6bie1BHGwLuCBZ68adchGl3BxzUiKiCQzhld3dkfvcR06uSAphM2bKiuxTUPYEkmH2VQP6SxVmY5UpgjnOEH4cIwZmRdPbdoUSOHBPsH6icUDTZFwVU0wkNsTaFq3KeAwkwSaAi4nTiAY0BlYYQlYWJvE4jyOUhlk4RRZGrjychBkBjHhnW5i1yBatVarcWZZRGgmFmAsHCAYM72DpRjsFThNzUwfJ9jRk7CBWrYr+rieJCkcAPNsMJ84gkpzNgXAZycj/zAI1x1qy2jAId1KGqm3SDBkMtaPXFvqy9hSCxcOtRKBK+X5ugnG9RkshamlDsVWpjOubKKCpNTSFDF/ATsOzsBzfoflmqby3SBx7JoQmXXqodi65cikXCAWR75SaaJD5OG6QJ7n8hnE5atQr9VrIf9nDSdIJClHNnYgYEABNJAkVlNhRySEokmCJECIDNom1akwM8jnB0fUijMnhaUtKO6wFOI20Wmc6miAxDh4zNEiEkqEgqg3sIJaBtJNCtsJMrDAnoSFOQ4lWLo3UFHQW1WkNwWckLxE2ELMSWCLDnkKmYOAeDZEGA1Q4KkrfPUZiCs4RvyQlJlQbDOOSSm6OsksMFXZs9y2TaKMKZjTZqEYE0mkY054EGWYizjoMw1kyKilqEWUPmxLRjlUeejD4Stf1xsoeHq5tB1WsNQlsweWKsA/AwATZZCUINblAzlATRN1drRfJiNAPMOlAX74iZFXryGLIbAdAsmZ6+jEF44R5LPVCBXOb/GIhsCADggJWgTCfhBznIKwVRRJSP6Swf8P2vt2+Ewsof5/mfegDKcZBT5D+ZDRmYHuU7mWSwA9ia0EQTFYspjwEQQaS8QVIY6VDVJqAEqw5Houv0tTsDhZeD5FoDTOmh62zAU4MU3JAM/Zl5Qi/5P0KIKtkCklgfjvJEucmhJnGQEMj36vmSVz3l9WKNv5NDnh0a/PILZIliYEsdwU/BGIYU516CLYSGNLJ0bg8qkdZZacyMuXJkYzOAZtlef4D6A7zE0kob5xFTSPK6pyOgRYz2mayJe+/aVZ0utp6Q0g4DHiNAUQqE4ILVNlA5WGwgrV+4MIzMY0kRSwdwA7BqjjAzBhFEWm/PSc76Bgke9ZhVIlzTPJYE5X0MMHm3QIEBJy6IC1wmkOdkhxsgUlZ2ROw6KmMABjsJnhACH8swgprXvFzRvyM0HKipiPQgwGDIKt2YGJY5OTclhacokA+uMe4XL1SbiOJYNpEogFkEypB5IyCKxMWiTIoCiqYyVh7JYpwFDUcYnnr5itbKUpd8G6QnA8XQMqVwuHcpqwZAUBc0CaWEmJEXItA8AUwCEJeb5odCfZAFQlOBALFDOx3o+SECXuzUkm7SdBQLeIAn6ZgVbiglOyhHcRC1MYC6UW7AioLtNSpp3gNGzgaIWMcBlhUgzaQREDsJnnKbgK7FBRBOy5GwfXKCR/Gf6CpkifI6vy9PlqQJcKBgYI6Q/IaRSGX/idARQ/YOLKlXJ6tNMUFbxoTssXKNuNQadLAuslF6Di4EKBlxbHZeWbNlxqPMiXyBnJURiv1HkIBmLKitmQR9qbs9HyNsifAa5QheYtqG7mZicTw3sZoZkgXAFImbIS2cx2TexIJloU5vF8ToN5KOYwDFzz7FGI8GJ7er3og36Q7lUQgD7VSSwOnCqCuJBWM+RPp0KAN8E5gK8sQ1U1yQrprecGWlRvqMecykXVYW/BUwvPdbXG9M6WtY1yukmHp2daAgqyg02glww0/IpTSybHB+0QhXPKvmj8XpdvEjogm2DweSMzicG2lKqzM8TMK93+4VMqt0rEfNlu62huX9PS1rFSNiKQ8wuak+KqNJTUBY9fHq/Acf9vhbFDdEATnhSNHRInjmo8gLlvR3HuTxn2enMR9BtZCjv9RIB51EqDGpkC8EAg51gK8kp+nKgO4klB21GMn+ZYuQxnqlSTB9vMpELxmNvMkYcqbRtkR3MdRzKSpKU2yG/4+IYYxzJFMVCDh3AITkFaGhTD9N7RGfX6bgZGdh3L9MD+tyjm/VxpVXaAGaUhKJAN2Ob1ahgygF7ed26BSyy2gehCcrJ8xvbpQayEHOEKxMh16PwAEURYBwcBwKAaMoTjclHEIBmuuGsqQQWhnBFlF1aTtBoTkOpRXiXyJuWSgIVQmzcpM0t1NOA/vOBoTVe7N9dwi9q2AlXOrCwOFIEX/nyR80SNArDFn95SMBQkiIQ/WFRs/FmRe66oHunLB71aKu8Z4CiTyvsTsAWbmpQuDwLUV4WFFoOmWBSMNnkLkZuQYxJPXnTFEIIkvnI0SWQF5C/NOXUY8kZkd025cHkAlIE3GrF0Jjty9sHEELXBNRI3lQlDXBhjurwiUR9t2bF+QmwNS6sfHNS4/GyRpViZDvHO3gdU61vZ9oXWjhHv6Oy4HLlhDJDilRs4s3b6XUmTzJn6ekz2Fg3CZYns7uU5YytXZyD3LRQO1daEwuPFqk0Id3bEVu/3FL+wIWJwjuZfUGVHPOH7i/cwnt1xMdY7u0uOlIHM7sBOqq6mxEvHtaSj2dHm1VPV+S/z6karQ5EI/NtVcjLPWHp2hxrvduWhyMuMVYWrqrVwnRaO7C45G6ia0XQGKrJ3h+/PRdCEpBLJ7HBNTSR8LzDDBuKTq0dATrj8qmHAlOx7etS/Lftu50UFRpw13AL4Zh+NugBypB6tci0EumtRpKapqrYp0oBWXhzd2ezriU6L1K4oMJ1DimitOfqM6knXApqONU9LmcmFBZclxU05sWjqQk3zbvg0tZQdqQnLn8nzj7nfITIJpe6czML3IQMTTfYh6a+MdaQ+6ntd3zN53nTSwOwpJu5oUNouOPb+gom+zHnvR2YGExt6JgPTiefGc9+64ZoZQ5HfWbBruLqxsfEY585oUV3PJJpOsgxQz/X5s+wshtLbjWbdPSOIY77RGjWye+HvvnAk0nxJywC361sTkTU1G6vWWtFVuD39iHcbpQlJZNlrNU6g+FKRyU4GU3hQlqELqyO11XVgzeLcXVe3G2vxPu8XI+heJsyZjxQMLb4wkQZMzq9urKuuNapjGonFDa2msaFea2ysimixqqoGo6YhUl9j1N2Tpjg7BuUCJRhLmOQBPa7pGGZazUvm7GjLZR3LL25r3nmp1sViDACJYgDOgplwpJs4UECyY7rJXAMKt0NGmldoXcsvyz7UWBuprq6P6LUNYb2xOtaota7tGs+ldT5th2XVVxfum0e8oXDiuEPn3njyHPVzPPw/elR0/XjDoWWfeO82HHrs6Z+mv9Yl/rDjwPDm04YumNiSfurgtue2/HDx91pq33s78LPr7tj67E82sZf3ZvadcvVTZ+/54q8eqL9l6W/oO0cOH+aZ1W88ds3Koa2s89K7b7qw8/z922/eMm/hTacva/yKOf+6eek3G2M3hbr+uvff43v74vPwOuv5Q3t+PrRnYu5dX931qYNLO9d2v/b879/9130bTzrQf0B/54l5NRPrT7qC4Gd++fiy/Rv3r587/7Pf2LBvvXbW3uQN52zRr7kZH9j2nTduXbPo81uvHrk5eO+yj/7izJ57X5v31uPZL6/69Geqn2hpuG1z97cOzz9nVXz89s0nzs3e8fTfX5l7xYuv9NgLFpxx42vm6U9OvHrWyVXddwy+ed8pkRcaVt6F9z/z9YUnDpze+frnKq7feNr3L1pyyR7rpdHw1sjvFr97yT0T6PqPN//l2lc/ueT1eW997NmDH1l65vqhdc8daP3wntaTtMPvRYPz//SD/obl4Ybk0LP75h49Mhb+5j/fffLWNeMrzvjR7m0nVLb89uVrJyIHu2I7x++8ZXQJ3vrYCy/9Y/sC/dFHDn1p5zPZ/syfT/v2H9/eve3wbZ3tR7TRiSMrl/x67dHh6J090du333l237btPcPGjhNWDVfc0Fqx6uGH5ygEj59Td8K5W/72oTlz/gMJcU9B
|
||||
@@ -1 +0,0 @@
|
||||
eNqNVktv20YQRi75HQtdlATiU9Tzpjh2GzgvVA6KICiIDTmStloumd2lHjF8aPq46y80rl0EaZtTb7n01EN/QX5NZ5emIhvuAxBAzrczs/P4ZqjX5wuQiuXixjsmNEiaaBTU5vW5hJclKP3dWQZ6lqenTx6Pj96Ukn2YaV2ooefRgrmaLhhfu0meeQqoTGanL/J0/fHGzeMGHsdzWDeGpKEXfO3oo3ud3mj8xXwf7pYvHz/rr9LO4aGG1TjyH84f9Rot0sAbpbVYzqgmTBE9A7IEig9JmCDjA6OV0VUsQZVcK9QNEanujlMo9MyY03RBRQKp0WYi4WUKcZpnlAlj8fwrhGF1LVxrU6GWIBGdUK5g50DSZZzkWCqhrzllGZ2Cqg9OzmdAU6zvD2+fKpDOaIpWm9+LNRZUOBcFVl7otvH32yhJMH5nXyR5ysR088v0FStaJIUJpxrOquPNmzvenbd7uRBgO7V5OwcoHMrZAn7dq+JyHoCY6tnmTdgN39XY0bqAzXtaFJwl1Fh6X6tc/Ix1LLDf8O2Z0lSX6vUp3g5//XmegVKYy4+PD+skvv8PX1evPw26fng2BokM2/wkpkysTu9hJpsPB5K1SNgjYyhI6IcRCbpDP8Af+ezh0T8kZ4n1DUYpsTYfb949rsnyb1xpTHLO82VcFrGttaF2YyhKzluNusOVVDcOSdDYcuv5cUMzzQEv+XLHMRXkQCK/mEpyvKOUHBXqqVgul+5FFGZAzGSgzpYxjeMmz6uiNYfkuCloBvjSvOS02SJNCdNKp7mH+U9yKRg1eJKXQsu1OXgqmIaUjLFxoEg+IaMMJHbEqCFlUKXdc3t9I1lPThCGboTz0tSvYpYaFxcW3oNcxSMxBQ7KWmOIXLMMYijyZIaaQS/sRZ2w3/Z3j40L00DHHzjYT38w9P3miYmylBLztSlyqjR2IMUo0yv+goFv/e1oXHXZH0YdE5KGrIgTY9lxu7U8QbnruwHKTMUpNWUJbJFEyrY1xh2jbY1LIdbGF0uqynpekoqr3boQvW606kYe+vSCoO0WYlpVPzVJB77vmzSXTKRxVpiEItevgbkFum6nBlKYSjB2vWALMWki2H+0b9wWSDlVSoizF9Z7EFpvW5iZcMOB2+9XaMKKOMsQ82s9g1itCpmVGcMKmHr0TbUSnpemtOZsAsAVZ3PYLecncKemJtJkxjivNNvbFCvQaHY6Nk1cERpxWFWa4RXQakYWTGFZ5PipqRQDN9rFrF5lvMCGzk2KQddea+SMGX4OycAi5cIw3L5OSyRQ1Ye+tbZA1QcMu3tygjOIgyVxlH13MOj1/aCHg767y80eOGl9mvjPcxzsNcHJgwT5aV4uj36LfJrMFnk6HhGHmKGgIjVMNmS6djtc0dkSrlTUU1Q4k/oCb2ZDuLQ+TFTOi7VjnuTg/8fmknqDHeXI6a10kWUQkXuIbh0+w2UJ0pD/iRFr7T2Oe1IDuTXCnW4W5m2yV006XyNrQvLH+wOXjHG/oTWuJks75ZJbtQPzkcGxHF6Oltw3fz2EPaKcjJgscqlt3LfRHQDJMDBysVTqJe9e6mkfP3ZRu3NdU6uVbj9zsSl+Y9h2o8HJ3wjDCKE=
|
||||
@@ -1 +0,0 @@
|
||||
eNrtWH+QE9UdB/lRRVBwKlKldc2ABzab7K9kkysK4ThQ8Y7DOwbRO7eb3Zdkuc1u2N0kl8NzAIXB6qDBEQeVqUq40xM5UAQ7pxbaolIY20JbBQttBZTyo8zY3ghisd+3m+RyxwE6I+0/MEzu7fd9v9/3fd9fn/feorYUMkxF1/qvVTQLGaJkwYf55KI2A81LItN6uDWOrJgu52pm1NatThrKnrExy0qY5V6vmFA8ombFDD2hSB5Jj3tTtDeOTFOMIjMX1uXM3ssGz3fFxSbB0huRZrrKCZpiODfhKnAB5b75LkNXEYxcSRMZLpiVdDBFszApHRMtk7BiiEgjEf4YhKIRZmSiq6UBq9FlpGI2SRWTMiJZ0tQ1DVkkA8tQDBPE2ixdV/MLaWLcXsgSU4qaEUwkGlJMMJCZVC1TmAvCWEBGpmQoCewIzBwiHD4CaVFFQ4QOM3GlGclERDcI2HbCQDHYnZJCbkKUpKQhWnikyYRlJE0LGPMreIhZJookVVswDTJERk8SGgIOSwcBMw37s72OY0CIYT1pEaDPAGcQKAW/oOIOLQFUM6YnVZkII0IsmAeCRsaDN6BgFsGUYiguwg7muxIQImRYiu3w+S6b0x712mqpJmySquuNRDJhezGTsF1nWoaiRV0tLUDDKaIYSMbOzSttKGHVw3ORZAFrQ0tbDIkyJNrjuZhuWtkNZ6VOBzgOJSwSaZIuwwLZV6PNSsJNyCiigjvbJRxXOzez7Y0IJUhRBX+3OlLZ9WIioSqSiOe9OIxr8ylEYlvOnm7HmUZCAmpWdnOoYIe3JgOZrhGUh+U8zPom0rRERVMhVUlVBJNaE/Z8Z+lEQpQaQQ+Zr6JsqyO8rpRHN7NrqkRpRm0PldjT2TWiEfdzr5fSjaQG+YWybRU1Zy+Xn+xejvXQtIff0EOxmdGk7JqIqJpoQ9HJRZF2qA2WpPwkRa8reEmF1LZi2dU+OvAS5GoCsg891AoqraS5KAcRQTvfb8vX7Iszpheiub/fNbkpEJ3s21MNxU0wPFGLEgSuPYL2l1N0OUMT06rq1lbkl6nrMxgb6gxI/QgEpLIQ/DYpltQakdxe0WfY97i6t4WLTYV6tMh8w4Jg4c9sjqMoas/N5+U0oEAUDa+YY4PB4AX0gmeQld2I90dSQZLh65xd+rh79xB9STpdL29PK7YHLBpzHs5uewrcxHm5+7aHoe9tzxtNKnL2LRgLFD3tnmnBKn+suoql+ZkV98TT8ebqOu2NJlJS9aRMWtD6EWknRJOV3UNwwaAYZH0yI/MsoliRpX0RnqeDFO+XIsFgYHVKEbPttIcmoroeVVFHxVSyQoSWQ9baaZNtmzKnOlR1R8Xae8i79bAO/qsTwc+arqHWWmRAOmbb7aWhwA3UCuJ3h+ZkNwakoBT2sxQt8zwTiUjkZKibQgIVEySHu4MNMQtbnY60rf8NNz56eT/734C6J/bd+etJwxcLY3d2DV4eHy+f+FnX9wbOPLZ43Gvu7R+1apGPV+358/DP/7l86fNPmx/sTF/FTvp4wcJl6U3jXjn0zgMtfzn82br7Dj+9Y8Vm4egr7LgnzizdPZLp/OGiRaO+GhP4dMW27IJFHu7w1knHG+ZSwsHRS2uMhsPoheSgIUcmLRrz0/1LhNzDf4t/8kjwupWfyo9+/vLLVz17tPPZNz/9/O+Jd44/29Xy5oLLjH0PfPbBqyfI1Y927Z968ODi4wsHVI0pv7k/u2W8eGzrlkGrjtEREh0bd3rpsiMrjj5xR6z+6gMn1y0YOzo+hb/pTNczw96tndw+YujYE9fmTo2InNz+2jW3ZUc9dv+sIZOX/3LvnY8f4MELX389oN/OT64IRvv363duOH+7FM0dXLSh3IECB8X7XznfBdNCI8rY6JlSM6RVN8XHh2rvbqxEk5PzZswJNMm+6dMheWo5qqqxmseYUcAZVyl2ixoRgYKXFFPSAWNkMYNZ8SEhj5IgwLgLWCTI0N9jWIcop0AKwMYGOEkFvBdkHdeLDe4YeFBTn+QCtwOwQLVbY8mEIaaF7jNH71klnj+p2BNFKFvSPguDSMgGkU0ONBR7hZfxsPC/I+SgWmXfqNbqTGdX3+K95Rytbl0h9e/Kt2jGz1+gn36bDr74Arp6L5+juaCvUL5r8Hmo6QII4MMIcI7N9SjgvYM+LJ5MLpgwroiuqnpaSCaE4nnJVa4lVdXtKoTZ+SpEDzLBVUwwOApaioUPna7Z3SvVwkpTCyvBGkkDzpauQn2k02lP3ixcKrhGgKeYNq75ZarueK4MTlZl+KgJg7IeSsvcRJmBog5PWQU4AU6CmiJiuqQDthsZPDFLU/CpETdSZBJ6hAjFkQFhwWyQN8DC8h4+gL9sTSTNMB4OiqbMahYUGavIS3jv0k0hpEURnAxsaTBRxScIASV0KQacNM/wnI8JsFTpNFbRjSsEFSynqLIWbKVzGrW3qIqmBRGQwUq5lz7ACVtfCUdvlYFyzodNslA8IUhY0ufxF74j8O2nPDR8K6YAIcfztpM0WSn6GEOV7eOkpmWwLkVyPOv1SrLWO1r5T6+fa/JzXtDppWnWk9CijvdlvGkaMBdvM61oshBP4A1xHqpAaLQJfo+vQJBR1EBYjqeLJMXAFlRWV2K1cDUwzaSBhHjY1k4ztrYiWcHmMkFPIOBQJSUhxONAowp8mGJzOZRYMq6AB7A/AthbNoLiWRhHEFJNVWlEpe7sJpb4FFsqxRRVdTjZ4hYdIub0+extQp+wgI6aHE6mF9Hm5GyijNIJHe6QDiPt4UppNp8jnIKANuIt0n57WfwdV3B+lhNBm5JM4Qy3h1G4P+XjELClbYITBzDb39ICNQiFZUApUx44lgT4IAOFXtrQcR9ocXdXfBVMxNQMUaj8qSAtQaLat7Ie5eomKkIEWbh54iQ6uyt0zxUTLO4s4FW9/mAYyZJf9IX9KMjzwSAbCbPIx3IcYiXKLwfCEusX6UjYz/s4iZXlSNjH0pTISn7ZLwX4Hg1mdvdSRBh3TNO+PuK7cRwuVcWrJ5Ffv3hjjpxngx6PB3dtniN+tZ7w0fiXCXgIvz3yMfgXWHo6GQ6Qforry8tOj7XBR8BtxFUO95OW7+gt4fLrL9pbgpvoFhRNUwHcBJEe0hgy8vda/KAggH7M8M1eFOwLeY+r94UQDl+vFbmwWhLO7zPl4NwplbdPNUImXT03NS0jh6braRdcrkutP3vbvQ13bOvlmvvm1+PErodx/fkQrx7k6guCDvcl3LuEe5dw73+Oe/V21+qrai8qIvXRAC4yLtUnKSpMYWzKj/L4VKAzhRGIgFMaCk++uEML36iF4lZrCsgw9OLlELrqpfflS+/Ll96Xv9P35RxD8dx3+8DMX3pg/j88MPN9PTD7xAqmZjYT5Kw5986a1synKius6unnfGD2USzFRCQZwSAghVk6GGHpCBfgGF+EE8PixX1g9nFUgAt/uwfmpu4H5vqZ+7TdNUMfvHbHbGLs2GFzPjy0bNhTV066PNk5+SXmkT+NPvSbstGna15sP7Ex+tGTL1h1zzfM+fKr/8y57cXGDe+GvugMn/rHvuN74xM7Tnf9e0PzX2ddfeOuyKlr2Y3KwZXb2jdGd1KDJw7IHfnj7UPZ0ZN3/KhRnbBmxb82/2LwiE1vuX/bPm7HDTvXXxF2L+nafGDbrpNfuG97/PoTP1ky4Mnk6NDK8O6QXLX/+89tXHeTtH7Z0Oe2rBjoz+bm7R24/ve7BnZsmnD91RMDkzOrto6+tfbVA9elOl5nm7YbQ56ZcnMgcHTBlqZRweVr3p9Ej1HEWRNu1fa7O0a+0vZe56CdVW/8PIIOrPzk9V2dLdFEYteRytmHfjzqXa2/8OC8MSe2/uDk8OZBJ5qX/IEd/8jCsgP1u6nEV7OPbnvqygktv6shH+t8PxdaP3VFKjp+yJmGcV/f5R4mkdtPD919Suoaseqx7amRzSO3fdl6CzN0+5n31nVUt03Mv15XPXO/OPyyfv3+CwLsDoI=
|
||||
@@ -1 +1 @@
|
||||
eNqFVVtsFFUYLjQEHwjXJvqiHDcIATrb2QvdbuVib0CF2tpWuQXXszNnd6ednTPMOdN2aUoCKAaqwTEEgiY8yHZXNxWoLSGKoIlNNMELGqLUEEN4gBhM0BBF5AH/M7tLCzS4D5uz5//+2/f9/9ld2S5iMY0aUwY1gxMLKxx+MGdX1iLbbML4a5kk4Qmqplua29qP2pY29kyCc5NVV1RgU/NigycsamqKV6HJii5fRZIwhuOEpaNUTf0y9fteTxL3RDjtJAbzVCOf7A+WI08RBTdbej0W1QmcPDYjlgesCoVSDC6uEhrSkihKo097+srROBQzpjEO2R/AryW6TlEt4FEjX8SQoSkEcYqShHCUorYXNSKVGos4gk5trOspZBCiCghkR9hIwZHqTFxYhJnUcG0YMS1p6gTFLQikGXEvWku7EbaICAoR4QpwKk6tur/OSVrqTmDOUBIS4yQB+FbBB1WJLoyKjm2VSAGJUcMgXPIDX7LfHxYx3MLyjAlXAee4S9NTEUawpSQiULCtcxbpAGfhoBKmWJopFBXgGpTHIWLENYMgCpakth26j1ELgX6mRRIgk9ZFyoEexbYwFydBgWUzDsBCBi96iZGYrbuO3eDjslAkEhusm1jIHR8xTAhHqc0RxLOAAkS64BtCNBom3LIEtXUVRYngOF8eOFopr2hAE5AIUxIkiaGDXo8Js0YsrrmT0+txke7pgVYnRhIl6ZR2Itt0WUyZLnWMWyCapw/k8ohZ1yyiCnILQbdOgNJoB1E4QLf2ZRMEq7Ax+9MJyrgz9NAOHAfiiMklYihUhQTOR/HtmlmOVBLTgc6cInR1l8zJdRJiSlgHvjN5L+cENk1dU7CwVwgZBwuDI4laHjbnxHxJsEkGd07VFOuoaEnByhpI9gaCXv+JHgk2RTN02DlJx1BSxnTtpycaTKx0Qhyp8Bw4mbzzsYkYypyBJqw0t90XUjDtDGArWRkcnnhv2QbMF3GydS0PpysYx9MFvD6fNzR0X2CWMhRnIIZ1RobukXzPJQe7EZDkSkn2HSuypMNo84RzNOQLf5BfX0Z2ZyAkt9muNChCvvk6W3h83m9eV1Tz15K56XpQxzmz2tLKkT+E2oiJxO4hX2W17KsO+tGapvbBukKa9knFGGq3YPRjIEhDUfyskrCNTqLm6iaVfcwz3pZYNh32kUuFlxfEEj+ddFCW5bGFj0RasCCaITKmA+Fw+H/iAjOEOyOiP0kOS/5Qe77LZcHNY2gyz/zzXagnI+qBihY8AjleTxGNHomevJ6gf3OuULSkqc5ncI7Ivhcbarv4ho6e+hhes622pTbVpqnm+pM9kqJTW5U4/IcRyR2IHu6MoYAcCIZUfywmK1gNh6pilWQZVn3hUDQoV/nl0NEuDTs5n9eH4pTGdXK8brVUh+HJkdrcsXGy9ZteqGlqrBvcKLXSKAX+2jHwbFCDZNqIBePo5NzUsOAWyYB7a80mZ6RKCSvRyqgfk6pQQPYvk2phb4oDdG9A0uJ1cP8rd2byL9Lov/P7HytxP6Xr93/7/JfPlb0eIcuvf5jWpgXW1cz+4a1nh+ZmDq/zBz+Zx25c3PNj96GyaRumn71Zeu3NJy7La7c8fv3UjdPJc9l9kSM3r1ddWrWyCa/8ovaac7Bs7z8HX/UN7JjyyoJ921/eOGNxy6w3MrfC5zuG3xld/+mSfmvLATKtf+HQ3KWXvrpNW38bOXD8yh9HPp4zf/fhtiMz7uxcX3bz7b+vXpxTcWFkntMqlx5WLnz3+/mGedOHm++896Sx7fbI7CZlhXNo1EQzT17d+2fnY33kyp7LVbfONJzdYcXrnxr+6d2Zo1NPLV7Sf+CvVUt/PnRuFjR5925pyefLN6zwTykp+Q/z/kl9
|
||||
eNrNWX9wHFUdB6sMyI8RnCo/Rvu8Ak3x9pLLJWkSBiEkbQmmSZuklKap4d3uu9uX7O5b9u3e5dphOqDIjIzoKcIICCppQmMFKghUqDiDBSz++EemxB/AKCOMDKIMI8yg4vf7du9uL78KMzhjppncvX3f9/3x+Xx/7Ot1MwXmSS6c4/dzx2ce1X34IsvXzXjs6oBJ/0vTNvNNYUxtHhgavjvw+Nx5pu+7srOxkbo8RR3f9ITL9ZQu7MZCutFmUtI8k1NZYZR+d8aZuxM2nRzzxQRzZKIz3dTckkxU9iQ6d+xOeMJiic5EIJmXSCZ0AVY4PixcxpOkd41NLhXZzySuSVY3Uim59EFv/W5mWULtJf1cZ8QXxGbMJyURpNQxJvPUqsksF1dJkfsm4U5OeDZFnwl1DPiVRYgHCSR38sQ3GaEFyi2atRiRjHq6CWcIS6bINpP6pCgCy1CnWXxCHT/hiCKhWRH4F8eNXuAdiq+RxC4Rh9oM9u6EsAiDWfBMt2hgMC2jtWpSOA7zNYv6AAUcoJSrsKEY7PVpgVulsdA22MAdN/DHpG4ymyY6dydcAId5Psdg704Aol4JPxhM6h530W84ZCj0TD1FJywhJkjgor6Si1qk70E8wCE4X7fAuDFD2JQ76lDqlAZyyibuM1stLRCLFqjn0ZKKS7TgBJalfDdYjgYWRGaH+hK3rguCK30iciRSiiZ6DA/X/Qoq8B3E8VFq1Bl1SPSzVQIqJpfEpR4EDAhOiiZzOms70ikyDDAjQIRNuhbXuW+VSMR+WceQnCdsIl2m8xzXSZFlJXgsSQNL5VNJMprYwIFCusVhOyMG9Wko4VBJU3lRGE2sreltjum1gROYdUA/Irw8dfiuUKHwCKSVCxFWdAVWRepLFXqGIZlnQh2rkYuEbzaFA6Yqe7pc12LKmJo5vQ7JCkgInUomk4rS0lTsNjBqNndYmA4uMsrjysMIjkj5jtFEzdOdaDusUNSFtQGW1qoUk5CV9ZDUITYYAVnklkUG+vu2YwRYaDgaEIWfGVX1GnEEAdshkFIEns4i4SwjEVuNVO38npBoBAzoh5CQBhCOgljhFIRtbUwC+csm/z9oH9kRMbGO+h8w70EZLQgOfIbygdFZgu4LuVZJAN2kTp6RLCw5wo8QBBoj4ooQx8oGlCpCoUeuV/K7PgXjySKrKQKlcdn0cDEX4MQCZ0VZsc9Ekf9JesRgq2VKXSA+mGTJcQtxxghQ+Br1mmUy571lhbJdLpITIf3GDOb6Zn1CMCew4UMiSyXXoYtQo0AdnRmJnQs7yjI5UZWvT4xucAzaqqw2ZFN4IsibUN+kClrIFVU5PQasl7zA8GFkf32WjIZaRhMEeEwktwEC1QmhZaps4GgorHB9IknAbMrzpg97i9SrzguxyMw/veI7KGiIPFurVKF5Fpus6EqG+FCL7wKEfBypYK12mkc9Fk+2JHIGcxoWNYUBGEOtEgxHiKLDWH3dizdvyM88m1fEIhSyMGAw6iwPTI5aks2HpaeSCKA/FxKuUp/8wHMwmBaDWADJlHogqYDAqjHMZ5N+rI7VhXEIU0CQYS9gob/+cmWrwGUA1tWCE+oKB7XaoZLnHawgYA5IM8dEjEjgGAAmzJYGQl4tGkMmzHQ2aoeRL2tRfYKYECUZzkk4+SUB3RgFojIDrSQAp7CEDzKHchh6UQv1fKgui1Kmj9ECbJBkA0Z4HmFsAe0gxgBqVXlaDKdbjCJgL4McuMYh+efh73ObjXlYlRfPVwO6VDJRZGwigdMojPbwtwQovs/ExZX59OjjNvdlbE6rFig3yEKnM4H1yAWoOLRW4NHiHFa+RcOlxoNqiVySHLXxSp1HYCDmIs6GKtLhnE26eiF/ilKhCs3b57pVmZ0sCs8xQktBuAGQsrASucINLOohEx0O83g1p8E8kvUEBa6F9ihEZNye0TD6oB+kRxUEoE91EkcCp2IQ19JqifwZUAjITjgH8MUy1NxirkVvQzdIwzpDfa2obMg0hQuhWvje1mos7uy8tjGfbujw4kzLQ0H2qAX0wkDDnxx3MDneb4eonTPvjSbqddUmoQOyeQGvN5hJArbZqs4uEbOwdEeHL6jcKhGrZbu3v7tva09v/0ZsRCAXFTTPluFLYbwuhPwKeQWOR58Vxh7TAU34pmjssRzzVOMBzCM74rm/YNgbrUQwamQ29SaYD+ZxpwBqMAXgC4OcEzbkFb6cqA4SSkHbUYxf5FhchjNVquHBrrC4r3gsXeHhoUrbOHa0wPOQkayA2iC/C1CVDJItxWKgBg/fY9SGtDQ4hem9f2A47LslGNl1iulBo3dRKiek0qrsADPqQ1AjG7At7NUwZAC9wvfcGpdEdpzpPnJy/owd0YM5eRzhasSodOjqAJEkVAcHAcCkGjJ8L5B+jEEYrlxgKUEFIc6I2IXVJK3GBKJ6VFiJwkm5LmAp0htOysJRHQ34Dw8k2TrYF8410uGuq0DFmVXkcnhTUZkvKp6oUQC2RNObDUNBniH8yVixiWZFGbqiemQknwxrKd4zwFEWx9shsIVaGkrPDwLUV4WFloWmGAtGL95CVCbkLOIpY1cMKUjia2ZMhhVQPn/c6VOQN375wILrpPtAGXijMUcX2JHLD+R3cRdcYzlLmbBL+sasjlck6qWtPDvBmKtRtPqBSU3iawuWYmU6xLu8D6g2trH3ivX90+HR5ftx5IYxAMUbx6Vw9kddSUPmLHw8i71Fg3A5fvnhroqxjZtLkPsOaUq1tqSa7o+rtiDc5WlXPX80/sCFiME5WnT9Vp4Ohe+N7xGyvHcT1QeG6o7EQJb3Us9ua6nz0gscdLQ80715obroYVXdTCaVTsO/A3Uny5Kjl/eq8e5AFYqqzGxzU3NGa2rTmtIP150NVC1pugAV5e813VuJoAVJ5ZvlqZa2TPM9wAwXiM++OA1yfiCvmwJM2S+fnonuAr8/8PkaI86a6gF8y4eGAwA5vY5cHjgEdLeSdEtnc2tnuo1s3DS8vzvSM7woUgeGgekSUkRbX6HPjG4GDtB0tntRysydX3MZKW7hxKKpCzUtvL/U1FJ5uqUJf+YuOOZ+j2ESou6KzPnvQQYmmvKD6C/GOr1uOPK6dWRuzWLSwOwFJu5tV9o+e+z9NRMjmTXvRWYJE9tG5hKLiVfG88i6qZYlQ1HdWbNrKtPR0XGMc5e0qGVkjiwmOQ/Q0PVzl9kZhzLcTZbdvSSIs5HRGjfKj8HnsaZ0untLT1Gal27csql0ZXfPFn28MKhvPxjeRmk+Ehl7rSYZFF/ul8pzSZtOYhm6KJNuzbSBNRdW7rqGgmxP+Hp/IYHuZcGcebBmaPzCBA2YOzfT0ZZpNTJZjWVzhtbS0b5O6+hoTmvZ5uZ2o6U9va7FaLu7wGl5FsoFyQuRt9h9ek7TKcy0WpjM5Zme7f1dm3q791+pDYqsAECGKQDnwEw4PcQ8KCDlWd0SgQGF22PT3Ru0wa7t5Qc7WtOZzLp0c4YZ7R2ZbIe2ftvg/ZW0rqbtFFZ99d8J106HQ+Hh47esuvHE49TPCvh9912j/Kv+HV2nXD92+I3p2086cuowz39707PBJ797y1/HO+XNfQcPPPHc0NnrS0X/2ivIFbf8VP5rz59G2t94/qE/e2+/UXr73+5br/1l5PE9s4/87XZq75NXXfzppkevuuWhF3+2aeascx8Yv/PnHX//wsnn33THzG9P/sStN37t5KHT387+eHTnqc88+5UV+ZU3tD+1ambP+k/tefnNmY+VvnnK9cOv3HNoy0oxsuufs2fcWZ666J6z7TePHv7hH45su+CCVXd8uOGly/94Se9NxmXa609vXXN76Yzdl6x+YevqH3wjfevdg+f8aGCj952PvvaPnzz1zCm/P2HyW6995K49J7QdfeK5r/ddv7r7yCuHTnr8xC+/c9ovXmy45rYPvTUjD398W4e/aufr5MmrHxlpXPHyzcUzZfdtR895YeQ/r56677GjD74z4R08zTL3jD/5ysqvrv6NWLtv5efuWnU8xm3FcU2v/vq8l+DzfwFf21Ms
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
eNqFVX1sE2UY3wTE8I8wM4mJH6+XTWKy6/qxdusSldExRDI3tiIwstS3d2/bW6/3Hve+t62bSxRQo0TxAtEYNU7oOlMHbDhCdBA1CiJgAokai4gfKPCHGgU/QgLB524tDFmwf7R37/N7vn7P73m7bribGEyhWumIonFiYInDC7PWDRtkrUkY35BNEZ6gcqa1pT28zTSUfGWCc53VV1djXXFhjScMqiuSS6Kp6m5PdYowhuOEZaJUTh+/aXa/kMK9EU6TRGNCPfK4vTVVSCii4GRNv2BQlcCTYDJiCGCVKJSicftoZQLzBQzxBEE9BMOPgRQNtTc9JAx02nGoTFQbJ6nYlInoExnVNMJFL+Rxe71BOxynVC1k0nDKycRxt6KmI4xgQ0pEDMJMlbNIFzjbDjJhkqHoNhM2uAFN4hDR4opGEAVLSukjMopRA0HfukES0J7STaoQliTTwNx+0mTEDZNxABYyuNAKRmKm6jj2gA9KUxNpBBCcggPrgf4c2u0hIBylJkcQzwA2EOmGbwixVNPhlCWoqcooShAulgeORtplN6DYkAiTEiSFoYN+QYcZEYMrDuP9goN0nv7T6tRIdkkqpUlk6g6Lad2hjnFD0eLCwACc2RpRDCLb5BaCdk6B0mgXkThAOweGEwTLoLRNmQRl3Bq7Tjs7gTiic5FoEpUhgbU93qfoVUgmMRXozEn2XB1xWrkkIbqIVeA7O+lljWJdVxUJ2/Zqe4wjBQ2Jdi3Xm3O21ERQoMatPQ3FOqpb0yB1DbldvhqXd7RXZBwrmgpaFVUMJWV1xz4x1aBjKQlxxMIaWdlJ5x1TMZRZQ81Yamm/JqTNtDWEjVSg5t2p54apgb6INRxqvT5dwXg1nc/l8bhqx64JzNKaZA3FsMrI2BWSr7jkYDd8ojsguj07iiypIG2esLb5PcG3Qas6qI+sz0JIbrJ1GZgIOXJwuLC0W1uWFad5sqQs0wjTsfY1GUoV8taidqIje/eQJ1Dv9tS7A2hJc3gkVEgTnnYYY2EDpB+DgSwuDn9YSphaksi50LRjzwtX27KXTYV95GLhxoJh2a9WpsbtdufvuyHSgAVRNDtjxhcMBv8nLjBDuDVu9ye6g6K3NjzZpb+mI4+m85y89gr1ZO16oKKKGyCv1lNEoxuip6/HHejIFYoWFdnaC88Rt+fh0PLuhse6Fre2xVem+jzJjh5fI2vc3StKKjVlkcPdT0RHEL3cyqO6On8s6CXRqOQP+GpxjEheH/H7PCQgByQpSrZ1K9jKeVweFKc0rpKdoSYxhOHKEdsd2VjDjasfbWheGhpZJbbRKAX+whh41qhGsu3EADlaOSc1LLhBsuDe1rDaGq+TglLULwf8xIu9sZgkLoK9KQroikAy9u3g/Mc8lZ28kfaXlt+z8ZYS5zMj/FLLso8Xzt3/wSOvnr/93JLn86+93Dfa1nqrIBzedcc3X/yq7vIlX7j3u86yeX/W0dOXTv9xc6LrqP7jvs/OdV3a/cCeJ1Y8+N6Xv43mT0zcxWbMydC17y+KTSxpnbv+zZmHT4yuKmvV5/j7jn6/t9nvOlw5W9hCrPL5q5eHBz+cVVZResl7UaicSctajgQP1izY/PP+82e2b/n0r1PNrm/fmDh5KDT+yuCTvyh/5zcd+OeZY0097nnBNGY/LbztueMVg7PPlh24u+b1Y4dKT219a9aajr2Ll+3A8x+/M1I+uuGrxvHNFyJb0x99XVnx7OD96Z3o8/KzP5zZaP3+4ieDG1cMoYoL72w/t/bizSUlly/PKDm+/Ok6XFpS8i/VnBjV
|
||||
eNrNWHtsHMUZDxAViiivllLaRgxXN4nh9ux72IlNkXDtOBgcO7GdpAmm7tzu3N3EezvLzuzZlyhVSamomlJyEo2iFlpKHBuciCSFFiECEmpBvCrS/oEwbUFCpVRCrUSDANFW6ffN7j39CEhUqnXW3e7ON9/j9/seO3tmCsyTXDhnHeGOYh41FVzI0p4Zj93qM6lun84zlRPW1MbB4ZGDvsfnvppTypWdLS3U5THqqJwnXG7GTJFvKcRb8kxKmmVyKi2s4quXrN8VydPJMSXGmSMjnfHWRCoaKa+JdN68K+IJm0U6I75kXiQaMQVY4Si4McyoZ+ZIRnhE5RiZYBS+PMIdMtwb2X0L7CIsZsNC06a+xYyk0WZI4ThMGTZVYDnspoSwAy0OzaMWRQvcLo5JvTcs4I7rqzFp5lieRjp3RVzwhXmKo227IhAAr4g/LCZNj7sYmqpl+ilRgthCjBPfRX1FF7VI5XEnG9mN+5s2GDdmiTzljt6UOsXBjLaJK5bXt+aJhTeo59EiXFdWOL5ta98tlqG+DWG6WV/UWtdFbC4VERkSKkUTPYabm4oEnuM1iOOj2Kgz6pDwb7NkEGwuiUs9CBjwgUzkmNNZXRGPkRFAA9EibNK1ucmVXSQhWSTAA4DlKdpCMp7IE+kyk2e4CQCmJXgsyWoWy8aiZDTSyx2LmDaH5YxYVNFAwqGSxrKiMBpprupN1OjNA0GQpIQ6RHhZ6vCdgUKgCrDQhQiTCQ6k9VWovghx1SwKQtJgQq3NNI1SfGNOOGCqtqfLdW2mjama0+eQtFA5YlLJZJQUhU8k6LMtYmHU8txhWh91kVEe1x6GcITKbx6NVD29BW2HOxR1YSrBrWbwzwLAVAMkdYgNhUBOcNsmgwP92zACLDAcDQjDz6yKeoM4ggidS1L4nslC4TQjIVutWHX/noBoBAwYgJCQ1SAcBrHMKQhbc40E8pdN/n/QPrQjZGId9T9h3oMyWhAc+AzlA6OzCN3nc62cAGaOOllG0nDLESpEEGiMiGtCnCkbUGoC6jFyvZzf9SlYmyyykiJQGpdMDxdzAXYscDYhy/blUOR/kh41sFUzpS4Qn0yyZLiNOGMEKFyGjWeJzPloWaFtlwvkREC/MYu5KlefEMzx8/AjkqaSm9BFqFWgjsmsyC3zO8oSOVGRr0+MbnAMeqws8x9A94SfzUF9kzpoAVd05fQYsF7yAsOHof31WTIaaBmN6MYseR4g0J0QWqbOBo6Gwh1ujkcJmE15Nqdg7QT1LFAnJ2DcqIlM4+5l30HB6tCzZq0KzbPZZFlXNMCH2nwnIKRwAoF71d086rHaZIsiZzCn4aahMQBjqF2UACF8HMbq615t84b8zLKGIhaikIYBg1FnaWAy1JasEZaeciKA/kxAuHJ9Ur7nYDBtBrEAkmn1QFIBgcWkJYpNqpo6VhfGYUwBQUY8nwX+qqXKVoFLH6yrBifQNaFztbqp5FkHKwiYA9LMySFGxHcsAFMBhxDyStEYzokJqEqwIVUkbVNznOQgSjKYk2w+zqKAbg0FwjIDrcQHp7CEDzGHcpgRUQv1FFSXBSnTz2gBFkjSixFuIExeQDuoYQC1KzwFV4EdOoqAvfQz4BqH5G/AX/E8G/OwKi+crxZ0qWhkgrHxCE6jMAnDdxFQ/JiJi3ca6dHP81zJmjmtUqBcPw2dLgesRy5AxaHVAo8WZ7DyLRguPR5USuSi5KiOV3o/AgMxF7VsqCAdzNmkqw/yZ0JqVKF5K27a5dnJpvAcI7QYhL2AlI2VyBWub1MPmehwmMcrOQ3mkbQnKHAtsEcjImvtGQ2iD/pBelRDAPp0J3EkcKoG4mpaLZI/gxoB2Qn7AL5YhhKpXDN6G7hBVq+x9GVZ5epka3AjUAvX7W3Wws42tI1GuqHDCzMtCwXZozbQCwMNXxnuYHJ83A5R3afhjSbsdZUmYQKyWQGvN5hJApbldZ1dJGZB6Q43n1e5dSJWynbfQHf/5p6+gfXYiEAuLGheXurSUFcXAn4FvALHw98aY4+ZgCZcaRp7LMM83XgA89CO2tyfN+yNliMYNrI89caZAvO4UwA1mAJwwSDnRB7yCl9OdAcJpKDtaMYvsC3ehj11quHGrrC50jyWrvBwU61tB3Y03/OQkayA2iC/4U0cYpwu1sRADx7KYzQPaWlxCtP7wOBI0HeLMLKbFNODhu+iVI5LrVXbAWbUh6BKNmBb0KthyAB6Be+5VS6J9A5mKuRk44wd0oM5WRzhqsQod+jKABEl1AQHAcCoHjKU50tVwyAMV8a3taCGEGdE7MJ6ktZjAtE9KqhEwaRcF7AY6QsmZeHojgb8hweSbB7qD+Ya6XDX1aDizCoyQBF4EM4XZU/0KABLwuktD0NBliH80ZpiE86KMnBF98hQPhrUUjxngK1sjocpYAu1DZRuDALUV42FkYamWBOMPjyFKE/IacRT1hwxxCCJd8/kGFZA+dqyS6Ygb1Tp+LzTl6OgDLwxmGMK7Milh7M7uQuusYytTdgplTVr4hGJfmkrzY4z5hoUrX540pD42oKlWJsO8S49CFQbW9+3Zd3AdLB16RiO3DAGoHjLDimcI2FXMpA58x/PYm8xIFyOKj3aVTa2ZWMRct8hrbG2VKz1WK1qG8Jdmnb188drH7gQMdjHCE+rStOB8EO1a4QsHdpAzcHhui0xkKVD1Mu3p+q89HwHHS3NdG+cry58WFE3k4zF4/A5XrezLDpm6ZAe745XoKjIzCZaE0mjtd1ojT9atzdQtWiYAlSUftH6UDmCNiSVypWmUqlE8gFghgvEZ9+dBjnlyz1TgCl78dmZ8Ojs/sGbqoy4YqoH8C09MeIDyPE15EbfIaC7jcRTnQn8kPUbRo50h3pGFkTq+AgwXUKKGOvK9Jkxc74DNJ3tXpAycyurLiPFbZxYDH2gZgTHfYa+VZpOteLf3NVnXO8xTELUXZZZ+RFkYKIpPYL+Yqzja0ZCrxPb51YtJA3MnmfiobVa2zVnXl81MZRZ9VFkFjExtX0uspB4eTwPrZtKLRqKysqqXVPJjo6OM+y7mEXxju1zZCHJBkAD15uWWFkLZbCaLLl6URBnQ6MNbpVOwO+x1ni8e1PPhNjaf4PjuyNu/Ib+ofbkTcnHgtMoQyGRsdcakkHx5apYmovm6SSWoeuS8bZkO1hzbfmsa9hP9wSv99cS6F42zJmPVQ2tPTBBA+aakh3tyTYrmTZYOmMZqY61a4yOjkTcSCcSa63U2vialNV+sMBpaRbKBckKkbXZUTNjmBRmWiNI5tJMz7aBrg193Ue+YQyJtABARigA58BMOD3MPCggpVnTFr4Fhdtj0929xlDXttIjHW3xZLKNsjilazuS6Q5j3dahY+W0rqTtFFZ9ffp+23QwFD59lnXl3vOW6b9z4P/0aWvTH288p+uC//z4qpYn+9njWw6fWvmDkRMb745EEj98+Pcr7tnwa/ng/l+ejj3+9E3vNDetWtWxau47h6/v+uzrKv7tr937p3u+9+EH71j7jx6dPlV46oWDn2+a8vfuvvyZ3GsXNy1/b/dTv/1bOnH39V//zWU7N37riecu+9GFvXccfLbnQC/bNPDMum17lr9w975LH5j53fsnb//5dbvPOz/2/onL2z7ccqRj7/dXvrL9hHfxlbmhO7vW7nzz5IG/HLj9K7eKG0aXd7+87oV/nXzu9cde/OZbrxxeEX2v+PLTk072wrt+curaU//82fPnnjCuuDy57/rpKyYzb/57u9j6vrVDnv2r29gzx/7xmb9evO8Pd92/4tMn/ZcuTb+x5dxUiW99blnPaKZp732rd7VcddH6zT/9+9X3frCiec8XB59/cPm4feern/py9O0/73/jSxflZk9/4a13H00YTT0XXJPdtOqOsz737kuH+x+47+0ndSjPWXbj4eLZ55+9bNl/AbrLDgQ=
|
||||
@@ -0,0 +1 @@
|
||||
eNqNVt1uHDUUFnc8hrVCWkh2fnd3djYVElHSICClVZMqQIVGzox3143HHmxPNtsoFxQkxOW+QhslqCq0V0VclIteccELlKfh2DuzPw2llap0z+fPx+d85xx7HlweE6mo4O89oVwTiVMNhpo+uJTku5Io/eNFTvRIZOe3bu7tPyolfTHSulAbnocL6mp8TNnETUXuKYJlOjo/FNnkVXzagM1y0thAjYMR1ogqpEcEjQmG/ySiHO3tNFqokeOTRBJVMq2AG55djgjOIKCfHt9RRDqbQ8L19HkxgQi4U0WkvNBtw7+nm2lKCu1c56nIKB9Ofx3ep0ULZWTAsCYXs+XpozVv7fGW4JzY1KaPjwgpHMzoMXkCsIYTnP1JQabPcFEwmmLD8u4pwZ9+5Wwxatb3RClTYDDMh+kIU+7MEv+tdrBL+FCPpg+70S+QTgEKkh8ulMa6VA/OITzy91+XOVEKD8nDm1/UWf78lgB+T6tlRVJQXk+cQgBhMn31JySJQTVHyRQ1ueCkeQ2pVNKighRhA4DSWd4rGM2HK7bSE0aWkUOsiAPn1fZAyNyZNcZ8k6kFlcTRslSaZI6GBJQDTGDYMIBTFkMJeTqU2/jJvIDX3lCP1+U8D4MgfA5VWFLJuVnMOvSSC8XpYABV2pE4Xyycb1//8usLaB/o7OlleUxTIfn5NrTE9MV+SVoo6KHPS45CP+yioLMRdjaCEH16Y/+PPS0piLUvMYcaSqh7pfv01QfQqQ5U7+N20G1Hvu8jEJKnrMzIXnm4LXJoCmWb/3uou4R2/Of9T+ZT8H9D0BgIxsQ4KYvEqmNyaGzwkrFWA+IYE1lbNIcAYO3ut63GfGjunjY01YyYQ5YcY45AFZ5SlYoW2tqEc0rJgFRP73g8dqtIzCCbCQZO1XDAO20yMWvF5gY6bXJQGH40Vxw3W6gpyXDGaW5BDaEBOMUGT0XJtZyYhTucQougPRgHopAYoM2cgNCWBpMKlHbP7fW6xrSunCAM3U4QtwHR9xOaGS/VJm9XqGSTDwkjyjqAKJmmOUlIIdIRMINe1w+iOPKj5WXjwlTc8SMHGsCPNrpR88wEWkoJKdssGVYaCpFBoNlr/np937fxLhhXXXa6JiRN8iJJzc7A7dX2AOxu2w3ApirJsFEmsDrxjM5l1uTERNLcEUPjCVrXSut5acZfL1dlelHnJOp44NELO7Fb8OFM/ozYCNpdk+SY8izJC5NOx/Vr4MgCkdutgYwMJTH7wmCBUWkLf2DcFtB3CkY5yQ+Ndz/oWW9zmJpw277rhzM0pUWS54D5Nc8gljVDRmVOIX+jRmzqlTJRGmHN2oAQphg9IrWY7RXQKgp9UgWajihjlhlb1ReYIXZ6FoSLVwNOTiyx78YrmCX2LTEj40LAi1jxomWoopkUj6GWRya/ILLpGDunpjXNLoOUx/Nch3BVVkWIbC4WmBUh8N3w7AwmEMZKwjD7bj+Iu+1eH0Ydj5P5XJqb4Kz19pk3AFxyxN5yV4dfSJY5VQO5lA+EB5NLUmhur1TYU5gng9qVdw/8OMbP6g3xTkejDxfXwkfIQdtEY9AnQ/XunepcZN4OjG6A+xFaQwi90f27nWtcBGg9ar98hta7XfhrkBCQyCKdCmnPkZrTucLpXuFEc6RXIT1AOiuc+Mrp/TlSew58MPqrUHAVMmEHc2hJgAOsRvDWaMFbaNvdctfW1qP45bO1NZAmNa8i1wKwOLDYLUYwlBbYgPVCi90m2ViIDG3BEC7v5miXYJ5Jsz0K5uANuPgs1JlDt/EAE2Y8+ktYbg+paaKEqq5Ua+F2G7NJfbxJHKADyuD7yFCimQ0RQj5kcYjt4CY8qHC3wpWo4fJBGkjgSTD4ROMUgbHeBfYO4nQ40utBZH5v3kfflId4Iu16EHYWhH7b/K7VrQcCwVfPBL7/kIJXDH3G4auOMZSsJJNU9dlfiqaEV08tD3Sv34vjOPivgZ496PazMTGPlaXHZ/8CGWbrYg==
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1 @@
|
||||
eNrNWAtsHMUZTggtVVra0FKgQNXpAXk0t+d72bGNoDh27BiCHexLyMPBmtudu5t4b3fZmb3z2URRAkECxOOA0lQgxMOxqQngEKooQGgRj1KKoogUqNMKikSBhqBWKi20qKT/zO7d7Z0fAYlKtRLZOzv//I/v+x8728dyxGbUNObupgYnNlY5PLDi9jGbXO0Qxq8bzRKeMbWR1d29iQcdm05ekOHcYs11ddiiIWzwjG1aVA2pZrYuF6nLEsZwmrCRpKkVjpy2cjiQxYP93BwgBgs0R8LReDBQ2hNo3jgcsE2dBJoDDiN2IBhQTbDC4LCwkgZR56IsWm4mETY01Il0miKIGqi3PRTYsgmOMTWiw05Vx45GlJhSrzDTMAhXdMzBdDiOm6buqjFwVqjhOEf1Qj8j2FYzsIEalsP7mZohWRxoHg5Y4AyxORXGDQcgAnZB/KERptrUErGBQ3qlNJJvETeRbpoDyLGEvoIltDBuUyMd2CLOV3Uwrl8zs5ga8lBsFLpT0ibKSVYuTRHzFrBt4wI8l3cYjq5L3zWSwo4OcdooH/zWtUCgGEdmCnlKhYk2EYerHLmei2cQF69CfUafgbyfNYwgnqEMWdiGgAEhUD5DjObKjkgIJTIECbgQGbR0qlKuF5DHFgb4pEw7i4UtKGWbWcQsotIUVVGeJBl4zNBiEkqHgqgv0E4BV1WnsJ0gDXPsShiY4VDazPUFllT0Rn16s8AQwVLgBTLtNDbokKvQtBHQ0IIIozwF1jrcU1+AuIJjxAtJjQl+m3FSSNHVGdMAU6U9LZalE2lMxZxOAyVNnkEqZoQFUcF0EAN9uoY0EbUsNYjUhy3BKJtKDz04POUb+wIVTzcJ22EFC10il2BpieQ9I7wGkirEejwg81TXUXfXqvUiAsQ1XBjghZ9oZfUKMkwEtkMgmenYKvGEkyK5JFu1UOX8NpdoCAzogpCgxSDsBbHEKQjbEp+E4C8Z/P+gvWeHx8Qq6n/JvAdlOGdS4DOUDxGdGeg+lWulBFAz2EgTlIQlw+QegkBjgbgkxImyQUjloSALrpfyuzoF/cnCyikCpXHW9LBELsCJOUryrGRfRoj8T9LDB1slU6oC8eUkS4rqAmcRAQyPXueZJXM+X1ZI29k0OeHSr18jFs9UJwQxnCz8EUhiRlXoIljLYUMlWmDT1I4yS06U5asToxUcgybLSvwH0G3TSWegvjEZNJcrsnLaBFjPaI6Il5791VnS52rpCyDgMWI0CxDITggtU2YDFYbCClUHggjMxjSd4bA3j20N1LE8zBu+yNSeXvIdFCz2PFsiVQnzdDJY0hV08cE6HQKEuBhBYK1ymo1t4k+2oOCMyGlYVCQGYAzWCwwghH8GIdV1z9+8IT/TpKaIeSgkYcAg2JgdmBTWGamFpa2UCKA/5RKuVJ+4YxsimDqBWADJpHogqQmBFUmLOBnkvjpWFcZekQImStgOcf3ls5WtHGUOWFcJjqsrL3O1ciijaUNUEDAHpImRERghx9AATA4cEpCXi0ZvxsxDVYIDMUdJHasDKANRYu6cpNMBEgR0fRTwygy0EgecEiW8hxiYwpAotGCbQ3WZljKrCM7BBobaRYRrCJM1oR34GID1Mk/BVWCHjCJgz5wUuEYh+Wvw5zRL+m1RlafPVw26VDCQJ2QgIKZRGIXhdwFQ/IKJK1Zq6bGKZilnvjmtXKAsJwmdLgOsF1yAioMrBV5YnBKVb9pwyfGgXCJnJEdlvJLnIRiIqelnQxlpd85GLZ2QP3kmUYXmzamql2YnHcN7EaGZIGwHpHRRiSzTcnRsCyYaFObxck6DeShpmxi45tojEWF+e/rc6IN+kO6TEIA+2UkMBpzyQVxJqxnyp1siwJrhHMBXlKFoPLNEeOu6gRYv0+RjSeXiWNhdcNXCc0O9Nr2zNW2jlm7C4emZloaCbGMd6CUCDb9S1BDJ8UU7ROWcmi8ar9eVm4QKyKZN+LwRmWTCtqysszPEzC3d3uFTKrdMxHLZ7uxqXbWmrbOrQzQikPMKmp1lsjRU1QWXXy6vwHHvb4mxTVRAE54kjW2SIrZsPIC5Z4c/96cMe32lCHqNLIvtAcLBPGrkQI1IAXggkHNmFvJKfJzIDuJKQduRjJ/mWLEMZ8pUEwdbpk655DGzTFscKrVtFh3NsW3BSJIT2iC/4VMcYpws+GIgBw9uE5yFtNQohum9qzvh9t0CjOwqFumBvW9RzAaY1CrtADOqQ1AhG7DN7dUwZAC93O/cCpfM5GaicsHJ2hnbowcx0mKEqxCj1KHLA0QQYRUcBACDcsjgtsO4j0EiXClHl4ISQjEjii4sJ2k5JiDZo9xK5E7KVQELoU53UjYN2dGA//CCoTU9q9y5hhnUsiSoYmY1U0AReOHNFyVP5CgAW7zpLQtDQZoI+IO+YuPNisx1RfZITz7o1lJxzwBH6VTcpoAtWFeEdG0QoL5KLJQkNEVfMDrFLURpQk4KPJnvikHcdmwZyxBRAdmbc04bgbzhxT1Trl8eA2XgjUIM1RQdubg3PUQtcI2kdGnCEOPauCquSORHW3F8gBBLwcLqvYMKE58tohRL0yHexZ8D1fo7Oteu6Bp1jy5OiJEbxgAhXreZmcZuryspgjlTX4+L3qJAuAxe3NdSMrZudQFy30DhUH08FJ7wq9Yh3MVRS75/yv/CgojBOYp3XVUcdYUf9e8xWXHX5Vjt7q06UgSyuAvb2YZ4lZe2YwhHi2Otq6eq816W1Y3FQpEI/NtTdTIrGGpxlxzv9pShKMuMR8PRmBJuUMKRfVVnA1ULimqCiuL94UdLEdQhqXimOBKPR+MPATMsID65dhTkuMO2jwCm5JWXxry7swe6L6sw4nsjbYBv8UDCAZAjy9CljoFAdz2KxJuj8eZwBHVcntjd6ulJTIvUngQwnUGKKCtK9BlTM44BNB1vnZYykwsrLguK62JiUeSFmuLe9ylyqTgaD4ufyR+dcL9NRBIK3SWZhZ9DBiaa4hPCXxHryLKE53V4w+Si6aSB2VNM3NUotS098f6KiZ7Mos8jM4OJkQ2TgenES+O5Z91IfMZQlHdW7BqJNTU1neDc6S2KNdcv2zCJppOsAdR1/fxZdvqhdHejWXfPCOK4Z7RCteLT8Hd/OBJpvaItb0Q2x9dd2Yvzre00flljO9/v3kYpXBBZ9FqFESi+lBeKk8EsHhRl6KJYpD7WANZcWLrr6nWSbe7n/YUIupcOc+b+iqH+CxNhwOT5saaGWL0WSyokmdKUeFPjMqWpKRpRktFooxZvjCyLaw0P5igujkO5QGnTTOvkMTWlqBhmWsVN5uJY2/qulss7W3evU3rMpAmAJDAAZ8BMONpLbCggxXFVNx0NCrdNRlvblZ6W9cUnmuojsVh9LNyUwlFcD3FacWXPRCmty2k7Iqq+vH7fNuoOhS/M/e0PbvraHPkzD/4fP67d3jJwTmTBjuMva6H9u6890Bk+tuvQGep7q+fff/qGl6N3vfjmsxvowz9zntz52cePX/qds4du3/nxR++e03nm6bee+s8h/acP1x1+Lvdp9siqb919661/uOaqt7be9u/Y2szhX+29ac1fXlqHu3DotvGHrojvnui+d17D+yfdfO+Z+276dMfoWZNn/OPxZzYqH5y9lC85u+O5dz/98wW/++SNa54Onmo0XXfyXw9/Nv/iG9SJky/a++ydL57zyZHOPxWGt6QO3RIdnrjlxvktB7/x7beLAT26YtHRnjUjtxw897FTV3Yf23bJKTl7wSk/7lh7x+k/JO//JrZAC73w3h+/33jjoavPuvTgHevy8y78ygeNsczRyeYbfr3/w0smn795mz33owXvJP7WHblo7oeHM+zMuqPtr+UnliYuObjn6LHe0WPNVvzQVdt3Tlzc+nTg3POHbhtcuX9s+X/uGRoN0ntiX716w7aFjeFTGrXQnJFF9zUe+Obwjl9uTTx01/UNMXrj71HHW0d2nvfZd/kzO94/6+7G61e/ve7NA/vvex4/8fqrNzyY3PvkU48s5e80G5veXf7aM78I9n/0yjvH33g1c3zt6x/M/8nf15+7b+E9e57919bCy1vvfCCx8esSsXlzGO0475GT5sz5L+PURmw=
|
||||
@@ -1 +1 @@
|
||||
eNqFVWtsFFUUbnkkqFEUjIQfhMvK409nO/vuFh+UbbEIpbUPeYVu7s7c7U47O3c6987SpTbGAqlEfIwv0BoSZdkla4E2gJoIKlHUgIkVjbEgkEhC1KAhKgHxB56Z7kKRBvfH5u4933l95zt3e3MpYjCFaqUDisaJgSUOP5jVmzNIp0kY35RNEp6gcqahvql5p2koI/MSnOussrwc64obazxhUF2R3BJNlqc85UnCGG4jLBOjcvpk6ZVuVxJ3RTntIBpzVSKP6PWXIVcRBTdru10GVQmcXCYjhgusEoVSNG5fJRSkJFGMxua4etbZjlQmqm2QVGzKRPAJjGoa4YIXAoteb9j255SqhdAaTjqhOU4pajrKCDakRNQgzFQ5i7aDs+0gEyYZim63boOr0CgOEa1N0QiiYEkqG4iM4tRA0KhukAT0o6RIGcKSZBqY2ydNRtwwGQdgIYMbtTASN1XHcT34oDQ1kUYAwSk4sPXEQA7PNusIx6jJEcQzoH1EUvANIZZqOtyyBDVVGcUIwsXywNFIu+0GFBsSZVKCJDF00O3SYSjE4IpDcbfLQTqn/7Q6NpJdkkppBzJ1h8W07lDHuKFoba6eHrizRaEYRLbJLQRdNwZKY+1E4gBd15NLECyDtF7MJCjj1tAtYtkHxBGdC0STqAwJrD1tGxS9DMkkrgKdecmeq6NGK99BiC5gFfjOjnpZg1jXVUXCtr3cHuNAQTSCXcut5rytLQEkp3Hr/apiHeUNadC2hkS3z+/2DnYJjGNFU0GcgoqhpKzu2D8ca9Cx1AFxhMLeWNlR571jMZRZu+qwVN90U0ibaWsXNpJB//6x94apgb6IlYs03JquYLyRzuf2eNyhoZsCs7QmWbviWGVk6DrJ113ysBs+QQwKomdvkSUVpM0T1s6A6N8NWtVBfWRjFkJyk/VmYCLkqy9zhS19p35ZcZpnSqZlqmE61uElhlKGvCHURHRk7x7yBCtFT6VfRI/VNQ9ECmmaxx3GULMB0o/DQGqKw89JCVPrIHI+Mu7YR1w32rKXTYV95ELhiYJh2T+tjF8UxZH5t0UasCCKZmfM+MLh8P/EBWYItw7Y/QliWPCGmke7DPjXjKDxPEffuUI9WbseqGjubZA36imi0W3R49fjC6/JF4oWFNk6BOeo6KmuTnR0tixuaazgy6TGmvjy9TWNCjvYJUgqNWWBw2NPBEcQXdwaQRX+gOz1xn1BjyRVhCpiPtGPpbg/RPxB2eP1xXamFGzlPW4PaqO0TSX7IkuECIYnR2hyZGPlqlevqKpbGhlYJTTSGAX+mjHwrFGNZJuIAXK08k5qWHCDZMG9sWq1daBCCkuxIA544uGgT/QGhMWwN0UBXRdIxn4dnD+VZ7KjL9LRUjT7uSklzmfi8pfql3266N6jHx+r2NZ6orFp7c+tmzaIm3d81l/7Q7zP/8EbWxbu/77h6ul5NUe+3Xj2H9Y148KEygsH92y7vG/LPad+O37RbG3JXfrz7dMPz2raPPlkxfCrrpaz0x7YPnfFJ++m2nfcfdeFeQ3VfXV/hYefCqzE8/u3Hf8Dr359bdngfcFY5HLq737/1R3HQp2rntg6YWH4zJEz8Tt/DEyaPzx95taXZwz2oYd8F58ns89NWX5/tvbJCV8/uL32uzvmnqi6cj49vHe2q6r/2YFzqdLX3jqy4M0lc2q/ELZPevSRny4dXhSY+Wt16+Zv5nTuFhZ8Ho384pse2vQC2zr5/KkFv/fOmjrS/srUWM1KLTj58feuqQc/an0aqLh2bWJJ36HzQaO0pORfBzAQdg==
|
||||
eNrNWHtsHMUZDyBBUSoepVLU0pLhQkJCb88+39mODVFr7CQYEtvYDjSJI3dud+5u4t2ZZWf2zmc3SCRFKuWPcpA+lLZqKY4d3ChPWsSjKQKlpS2iVKhFRi1EIBX6BxIVj5ZWbfrN7N7d3vkRkKjU01m+3Z1vvsfv9z129s4UiCcoZ+cdpkwSD5sSLkR574xH7vCJkF+bdojMc2tqoH9o+CHfo3Or81K6orOpCbs0gZnMe9ylZsLkTlMh2eQQIXCOiKkMt0ovX94+GXPw+KjkY4SJWGeyuSUdj1XWxDp3TsY8bpNYZ8wXxIvFYyYHK5iEGzfROOq91kE38szVsT27QIpbxIYHpo19ixgpo9UQnDEiDRtLsBSkJed2sCvDjtpV4gK1S6OCYM/MwwLKXF+OCjNPHBzrnIy5YDvxJFW2TMbAYa+kflhEmB51VShgkyEtjfRTJDmyOR9Dvqv0lVylRUiPslxsj9rftMG4UYs7mDK9KWal/qy2iUri6FvzxMIb2PNwCa6rK5hv29p3i2Sxb0NYduqLqHVdyKZCIp5FoVJlokfU5qZEgefqGsTVo8QIG2Eo/GwTBMk8FcjFHgQM8EfFPGGdtRXJBBrOE6TQQWTctalJpV1CITkEoizLPQcrW1DW4w4SLjFplpqoSDICPBZoLUnkEnE0EttEmYVMm8JygiwscSDBsMCJHC+MxNbV9LZE9DpACEVKhBniXg4zOhEo5B4C1rkQYVSkQFJfhupLEFdwjIQhaTAhajPOKCk6kOcMTNX2dLmuTbQxNXN6GcpwmUcmFkTEUYn7SIA+20KWippDGdH6sKsY5VHtYQhHqHznSKzm6S5lO9zBSpdKHbi1DvyzADDZAEkdYoMhkEVq26i/b8t2FQESGK4MCMNPrKp6AzGOwHYIpOC+Z5JQOENQyFYrUdu/JyAaAgP6ICRoLQiHQaxwCsK2LiKh+EvG/z9oH9oRMrGO+h8z70EZLnAKfIbyoaKzCN3nc62SAGYesxxBGbjFuAwRBBorxDUhzpUNSqoI9VdxvZLf9SkYTRZRTREojUumh6tyAXYsUFIUFfvySuR/kh4R2GqZUheIjydZstRWOKsIYLgMG80SmfPhskLbLhbIiYB+oxZxZb4+IQjzHfgRy2BBTegi2CpgZhIrtmt+R1kiJ6ry9YnRDY5BTxUV/gPoHvdzeahvQgct4IqunB4B1gtaIOphaH99lowEWkZiCHiMBHUAAt0JoWXqbKDKULhDzbE4ArMxzeUlrC1izwJ1ogjjRSQyjbtXfAcFa0PP1mlVyjybjFd0xQN8sE0nACGpJg64V9vNwx6JJltccUblNNw0NAZgDLZLAiCELyOkvu5FmzfkZ440FLEQhQwMGASzpYHJYluQRlh6KokA+rMB4Sr1SfoeU8G0CcQCSKbVA0k5BFYlLZJkXEbqWF0Yh1QKcDTs+STwVy5VtgpU+GBdLTiBrqLO1dqmguaYqiBgDkgTllcYIZ9ZAKYEDinIq0VjKM+LUJVgQyxRxsbmGMpDlEQwJ9l0jMQB3QgFwjIDrcQHp1QJHyQMU5gJlRbsSaguC1JmC8EFWCDQJhXhBsI4HNpBhAHYrvIUXAV26CgC9sLPgmsUkr8Bf0kdMuqpqrxwvlrQpeKxIiFjMTWNwuQL/0uA4kdMXHWnkR5bqEOliMxp1QLl+hnodHlgveICVBxcK/DK4qyqfAuGS48H1RK5KDlq45XeD8FATHmUDVWkgzkbdfVC/hSFRhWat6SmXZmdbAzPVYQWg3ATIGWrSuRy17exp5jIKMzj1ZwG81DG4xi4FtijERFRe0aC6IN+kB7REIA+3UmYAE5FIK6l1SL5068REJ2wD+CrylBLOr9OeRu4gda2W/qyonJtqjm4EaiF67ZWa2FnG9pGI92UwwszLQcF2cM20EsFGv5lKVPJ8VE7RG2fhjeasNdVm4QJyOY4vN6oTOKwzNF1dpGYBaU73Hxe5daJWC3bvX3dW7b19PZtVo0I5MKC5jlCl4a6uhDwK+AVOB7+1hh7xAQ04UrT2CNZ4unGA5iHdkRzf96wN1KJYNjIHOyNEQnmUVYANSoF4IJAznEH8kq9nOgOEkhB29GMX2BbdRv21KmmNna5TaXmsXC5pzbV2narjuZ7nmIkKShtkN/w5g0xzpQiMdCDh/QIdiAtLYpheu/rHw76bglGdhOr9MDhuygWY0Jr1XaAGfUhqJEN2Bb0ahgygF7Be26NSzyzm5hScbJxxg7pQVhOjXA1YlQ6dHWAiCNsgoMAYFwPGdLzhYwwSIUr69taUEOoZkTVhfUkrccEpHtUUImCSbkuYAnUG0zKnOmOBvyHBwJtG9wSzDWCUdfVoKqZlWeBIvAgnC8qnuhRAJaE05sDQ0GOKPjjkWITzooicEX3yFA+HtRSdc4AW9lUHZ6ALdg2lHRjEKC+aiyMDDTFSDB61SlEZULOKDxF5IghAUm8ZyZPVAUUryy7fAryRpaPzzttOQrKwBuDMJOrjlw+mZugLrhGsrY2YUJIa9ZURyT6pa08O0aIa2Bl9clxQ6jXFlWKtekQ7/LDQLXRzb23beybDrYuH1MjN4wBSrxpt+DscNiVDMWc+Y9nVW8xIFxMlh/tqhjbNFCC3GeoOdGaTjQfi6q2IdzlaVc/fyL6wIWIwT5GeDpVng6Ej0TXcFE+uBWb/UN1W6pAlg9iz2lL13np+Uw5Wp7pHpivLnxYVTeTSiST8D1et7MoMbN8UI93x6tQVGVmW5pbUkZzm9GcfLRub6BqyTA5qCg/2HykEkEbkkrmy1PpdHP7IWCGC8Qn+6ZBTvpi7xRgSp57diY8Kvtx/y01RnxmqgfwLf982AeQk+3oZp8h0N2KkunOltbOZBpt3jp8uDvUM7wgUseHgekCUsTYWKHPjJn3GdB0tntBysytqbmsKG6ricXQB2pGcLxn6Fvl6XSz+sxdd871HlFJqHRXZNZ8CBmYaMqPKH9VrJPtw6HXqR1z1y4kDcyeZ+LB9VrbF869vmZiKHPth5FZxMT0jrnYQuKV8Ty0biq9aCiqK2t2TaU6OjrOse+iFiV3zKGFJBsADVy/ZomVUSiD1WjJ1YuCOBsabVCr/CT8Hm1OJrtv7SmKbRRes90s6bt9447Bmya2PBacRhlSEVn1WkMQKL5UlspzcQePqzK0IZVsTbWBNddXzrqG/ExP8Hp/PYLuZcOc+VjN0OiBiTJg7ppUR1uq1UplDJLJWka6Y3270dHRkjQyLS3rrfT6ZHvaanuoQHF5FsoFynGes8lRM2uYGGZaI0jm8kzP9r6urb3dh79sDPIMB0CGMQDHYCacHiIeFJDyrGlz34LC7ZHp7k3GYNf28iMdrclUqg36WEcKvpkOY+Ptg8cqaV1N2ylV9fVp+13TwVB4+rydK+/9xDL9uQD+zp61yl23rOq67O6zN29f0XRF7y8PvfCdu79/YvKyGw4t/22LfHws5UzMZm7/6a03/udfLfv2vvbwmcmjV53+yarpr19y0Z8L75x+5p+n3niv9eyd4yNH7txw8InfFH+33H7hlffTj9/0B37xlfH7N5/Z9sCB3DMXXnTP/t/jZ09tOrDr+SPZ656zThZeSax452enL9x37PIrH//ir7458NBLd+z57CUv7T/51M1tOfd759918Sd7LuSDTWfe+GDZhm/d8/7+v6+6u/vEVZ86+0DL+C1Pn+nYm9swcOuhA+PL/3jFP1b88MSr1vmrR2+8dM39751/1SR37zvw+v5vv8i+8erxzz3Fp16+9IZ/OwNb39919dGn37xvfeG25a8/8aN3R+654MHXD4h9/uyvP5/826kduROrbpP3kskXv/L2gzOJTx94/q1TO/6ycuLqlb8YG3wr9diTuwcefnLqSz9Yfd2b7lc3v7v6tb++u1JF8IJlK777wdt/Om/Zsv8CmHEMVA==
|
||||
@@ -1 +0,0 @@
|
||||
eNrtW81v28gVb3soir310vNU6Na7gEiJ+rLkRVE4Ttwkmy/E3ibZOBBG5EicNclhOEPLiiGg3ba3AoXQ/6DrdRZG9iPo3roLFD310H8gPfRv6XtDUl+WnewiEX1wDjE58+bNb968zxnq46d7LJJcBD98xgPFImoreJF//fhpxB7HTKo/HvlMucI5vHN7a/uTOOIv3naVCuVaqURDbtJAuZEIuW3awi/tWSWfSUl7TB52hDP4749+dlDw6X5biV0WyMIascqVWpEUMipoeXhQiITH4KkQSxYVoNcWACVQ2NR3qZJEuYz0GYU/EeEBkd1fF4aPkI1wmIdktkdjhxlVQ4ogYMqowDTlSqWF3KSKGPWBSkUxg3clhJdOHFBfT6zoHvcGbcloZLvtiMnYU7L9ETBDBg6TdsRDFAwSr5OEjrCgxwNGBPT4/AlzSFdEBMQQRsyF1fI9mI3adhxRhU+BgwikAsJ0BpN8IFk39vTAPowhAxGTgAGFEjBA9mG9ehdwTwjtiFgR4BeBcAjbg/+BxbUghFbpithzSIcRmsGDgdHAxAVwJGlL22U+hRUcFELYMhYprjfgoKAp9dPcUqc5ISRPiF0Sh8hTDUItOpAuD3qF4RDaUGV4xBwUbsr00RSp6HzEbAWkj4ZPXUYdULy/HLpCqtHzE6r0JQiOhcpggS0cmGD0ee8JD4vEYV0PxHls4z5rXR0d7zIWGtQDeR8lo0Zf0TD0uE2xv4Tb+CxVKQOxnOw+Rs0zQCEDNfpmXQ4Cez0DU7ozAPUPSNms1szKV/uGVJQHHuiv4VHAdRTq/n9Md4TU3gVmRmpao6Nk8BfTNEKOPr1J7dtbMyxR3KNPaeQ3an+fbo/iAJSMjZ5u3Dk5Xdo5ma5qWpa5+nyGMS5q9Ln+s6b/5+L5WOLjocdgOFWj3DDK1heZyDzQc+WOPqlXa5+B4oagiuwPR8BaxfLjQ9ge9p9/P00N+m+338+29n8/+OnhZdiq0bebES+SyirZYiFBwyRWY61srVVa5Dc3t59tpNNs4868IIrtq5LWbSMx3PeI7dJIMvWrWHWN5vPtCAyjC9t1JVONp7YbB7vMOd5YqBSfb1DQfAPnAUcz+iwQho0tLwqT9aOJemDFykjdHuwuvo4Oa+Vy+cUvz6SMwKx4gEgOq61W6yV8QYRMjb5GQRjlllFZ3U7EUa99+IIsGpn4zhTPEeIBRL84g3KC5wjwIDU5k3oxnkrzw+MUtMGd0Tfw3C5b11cvXaG9u5fu3Kv1Nv3+rnh85frj7U/2OB0dW6ZFekL0PPblxqaRSH1Lq8no6eUHt9ZvXtt4dt+4KzoCxLBNQVyBCNjRFotA/UbHtidiB6w7Ykcw/O76g9HXTbtldxqN+qrTYs1uwzEu3d7SgeX3R4nf+e+PB1pX1kiqgG3Qy0i95VBF0Z8lrqcw01koZu+FtYMCd7Bf9mBtN1XNYvKyq27K65eu1K9X1MbVVh3IZ7lAQxKwClRKDiwDzTIJRWdEoklYe/ioCH5ThBBpKMaYtSD2vLRJosgDm2WN8RioduJZIK00WsUChIPpNms4JPBv+NZbqUzSCdsdT9i7iyWzgASQ8sBh+4W1cnG2H2Gk4zCItsFrIrGTvscgw/u0FYXgXvf3ex8+uPq+s76/yq5uA1USas+MtMkSYZJkHWR2MSFs9xx6Ugh17DllwbAhip69YE0ys+CkZbLQROwIcEwcgpA49RLUa4XCBO3pwl8SloMdhJMzCB35d2B7dgqSBudJPKQLwcOeQZQ7Ji5tkSW3c8hyx7ZTGE4gne5XRPgytyLCKSRz3DLnvHBNM52FqTXM+M9pd7TAiw6n3Oicz2yUhwugLFrSdF8Bx5xeIn07XSElHk+XR4nX0wHsn1ma9LvvklP96eyMqlo9mVGdyHW/yLpvJHndodWo1E9JnbLY/ClWOvtzkffBuHBAM09Ma0aZi4Wu8DzRb8dhe1zFZHEtqW6yN+4n1SBGxjQowAvInisdae9Nir8tmGszm6tINgAolE8BR+2IIwzBmfT7/b6ZYsGNwB2YjsKFgxXQTi2YFdjnFYxQ8LAyM8FKkaxErJfQrEwmw3ZbQOIdDbDjg4BjXYfZDpNEdMm6zyKQOpJBvQIk1VVztYlvmpNhVSpmrQLv6kmbO8giHVG6IWR7PegxSNv1aIDoYXrfZqGwXaC0ViurtXqliYX0pBtZTHI4Um6tlcsrQ0SZ1It6iR6VCnYDNJs5c/wsSBQ12AnFPMvmWq2OkBTzw7aNI+umlb134b3eMnFJXLYdimKxtJACh49ljNm9lnEcBAPkxe1EsqWS7QTzu5W+lhq1/UatBDxLllU1w6CXSN/BRVuQ3+Iy++Bd2n6IC6qZ5axhVzc0zHrW4LBexHDcqjVu4hEiuHLrCrKF4l3KOGJtv6O5WxXNbdzMEW6lZTZbSavNw7bvQ1s5o8MWTZW0uLHPQQIojxa+6zQXe+G5y5gnPb7LpsU5aZySKSK1Xe55CWV1vMSkUVPW9TLBVSjtaRPKylyjpqzpRof1Q8HBS2tCy6xNt2m6ZPAebOguLtFq6Gnx3eeon7Ak3RLvoYbrx14MCpTsQ1OP1g3JPgDsxnCIPtoWEZh12Wy1arVmuQ5GT/vtsWUmPnti/aeaPDpA0L4OeAbtBjM3sQnsbdBkYhCHgvc9yzUo2tHKxoKSU2KlcqsUa2PG4hmMuWSPZyuBmzPGbm7Wl2yCVpEbIuiRuxSM9yQSPOJ51XWYMMyGaOaQy4g+Y1Yk22BpYB4KFLFIwIYiLpMHKOGSE6abQgShSyUbzyxNYprmJFScQHaVeZhGkwciJut7gjvkEnUyMpNUyyDDgdTcO2wA5mymsMYc9Ot7uB4WODTC+eY2udKqLNrjxNvr6NdGJ1ZYq5rV+vA1HT7+5M9v7PCxSCYDp6rA6dEPJ1XKJDshr3jkmFZCU2dzp8RYPHbDAoy8rAIbPpoBfXK183gTSHMSeQgFBlhSktyfEWd3YNxONjChvoi2F9H2ItouPdruaGd1ttV+7xC4wMwvAuFsIAT5PyoUJzGg/UreGr26bLMoElAikS71JAMHfnHldXHldXHltbwrr8OK1Wy+1juvav3izuvN33k1v++dV7W26M7Lqt9i7oZ/69L2vc3HrcuD7fcbruW/2TuvZqvTsTqL77zedl7fndfWB5v3bm1uNfx7Yu/J3eb97qBm3++ezzuvVnX1xJ1XZfiqR+av9/oLLBaFhH/mboDOwZ0VwhqTpRh3gp3gEqRDGdalA3AIhBSspdMImx3zDnNBA6W08Mm2ToJefsXxxmAUCaSWbLnXPgvFwSHXGsNY/vQy9n0KCRfU8qgiacJ3DuSSHfyMC2uZ3kFobMvHM66a1tCgjWkJLR/MtqtVd/kTq0n5l6PSos1EIg5y3QSH1FvLvNBehOFfX22Sd6x6CiOH6TfeNfOWAbmm8gploIcST/hInyuX2B7E17y2gshdqLBhM3LJbwz0SCCFaYvMwSC5fNPf4CwUvcd7rirO2sHyUZzwiLlsQc3I2x80iB+65J35FGb5OAyrnGOM3A3dd/O0xiTPx5wSyk6Vu1YEIgIseFTs8Cg5tjFJXs46cZlX0xuTfFNJiGEu+K/c4hZVpFV+28xVCHMZ/fKF8FsueYd7oAy4Hz0hHJDLOSjCzkOW3cqn0tE3f5BcN/JKLnf9E7l1PpaRFps5q2JyeJV3kpWenZEwEnvcYbnvD6HEYYqCrua/Rc7k9Ci/46KIhRDp8zzbzDHnS2/M1+9cI1TmXpf3mecBDuIJPHbPAYER6Q8hMtXoph8L5BnoeQAofH0ZnCcMAJG/65r5FmViOrmZzTbt5H6YZWshLH1ak9zeYxH1vNy1ojj91WcegWT+hw5ZUPMG5yDYhyFkQXJ5P+xafAcg8NOf0INqmuYU7PG4NTl51cee56FSChwsGfI9/5y6nZHFvHIg5Yq455Iu5dGUzSwfB36QmocvdfK/EbnBFPEZ2Q1En/BuvudL468MKViryMurJ0VSzp7z5y/7BOT7/aDzdf+kkwVOG1xI8J1/0mmt1oanAXr5DzvHI/8P7fUwPw==
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
eNqFVV9sU1UY39wD+KKMqKghcqzgg+62t+3d2i4+MDoQso0NNkRmluX03NP10tt7ruee260sC5Fh4p8IuZoYNegD61otExhiJMH/+oBBgolRM2I0IAaNYmKiSEIIfueuhSEL9qE5Pd/v+/f7ft/pjnKecsdgVv2UYQnKMRHww/F2lDl90qWO2FnKUZFherGnu7dvwuXGzIqMELbTGgph2whiS2Q4sw0SJCwXyodDOeo4eIg6xRTTC6fq/xwN5PDIoGBZajmBVhRWI1oTCtRQcPPEaIAzk8Ip4DqUB8BKGJRiCXm11rg/MDYgPZhOTXlDTOzqVIkqDrMsKpQIRFQjkYR0FIyZ1ZgWzvkxBc4bZmHQoZiTzCCnjmsKZ3ArOEsHnTqEG7bsWYLb0CwOUWvIsChiYMkZ26iO0owj6NDmNAONGHnahDAhLsdCniwdCe46AoDVDEG0yaFp1/Qdh8EHFZiLLAoIwcDBGaYc+QRLuhFOMVcgiMehb0Tz8A0h1lk23DoZ5po6SlGEa+WBIy8EZQOGhAw6JENzGDoYDdgwDcqF4XM7GvCR/uk/rc6NJEsyGcsi1/ZZLNg+dY7ghjUUGBuDO6kGg1NdklsNOjAHylJbKREAHRgrZyjWQVO7ixnmCG/6BpUcAOKoLRRqEaZDAu/toW2G3YR0mjaBzgqRc/Vl6FWylNoKNoHv0qyXdxDbtmkQLO0hOcapqloUWcuN5ooUlQJas4T3XlutjlBPAURtITUY1YKRgyOKI7BhmaBKxcRQUsn27UfnGmxMshBHqS6MV5p13j8XwxxvsguT7t7rQkqmvUnMcy3aO3PvuWuBvqhXTvbcmK5qvJYuGgyHg7Hp6wI7BYt4k2lsOnT6KslXXSqwG1FFbVHU8P4aSyZIW2S8CS0RexO0aoP66HgJQgrX2VGEidAvj5Wr67m3u6M2zR/qFhfbYTreB2u40YQiMdRLbSR3D4VbWtVwq6qhR7v6ppLVNH3zDmO6j4P00zCQ1bXhl0nGtbJUryTnHftM4FpbctlM2EehVN8mGJb86RU1VVVnHrwpksOCGJbMWIwmEon/iQvMUOEdlv0pakKJxPpmu2zW+mfQfJ6zD1y1npKsBypafhPktXpqaHRT9Pz1qFp/pVq0Yuje+3AeVMPrmb15deeax7ra9GQ7zXQazdqWtcl3RxRiMldXBLzyVPEFMSK8GaThqB6JtUTj8ZYEiUS1uE5xOE5icaKSdBxrE3kDe5VwMIyGGBsy6YHkGiWJ4clRen3ZeOX2LevbutYlpx5XNrIUA/76MPBsMYuWeikHOXoVPzUsOKclcN/YtsU7HCcJkmomJJzCNJJOE2UV7E1NQFcFUpSvg/9v8lRp9kX6/OSy5xfW+Z+Gzl0d2c/URTu3d5xrGt9X3P3L90nj2Iqzdz3X+O3iPQ8d3TW86cW7r/xz5qtV3Q0/PvJK48Cqj9fd3nX5yGh297J953Ov5ztDBzafPnF+tH7B0QtaeW3zfQtvGz97T/s365VDn5xe/mzkllNLO/Zu3fCW+tKmDy8cP3XpzqX9+IGWnZcvxgzx6vHhW0fKe36u/7o8/tpfJ1ZueLpx5Z57H2746bue1iVnXl505OzvPX989Okzuy6lXvhiyXTg4rnJ8TsunXyj9OuCurorVxrqflt9aPvf0MW/54vuGA==
|
||||
eNrNWG1sHMUZDqSkiEIRbUFUjchwTUNCbs8+39mODagEO0lNYjvYDiWJg5nbnbub3O7OZmf2zueIViSoiACiixDQH5QCzrl1U0gKLaVAqqoE+IGg/dEPg0oiSgUptDQFtaKI0ndm9+72zh8BiUq1Etk7O++8H8/zfuzsmSoSl1Nmn3KA2oK4WBfwwP09Uy7Z5REubqxYROSZMbl5cHjkQc+lM1/JC+Hw7pYW7NAEtkXeZQ7VEzqzWorJFotwjnOET2aYUX7pnMTumIXHxwQrEJvHupOtbel4rLon1r19d8xlJol1xzxO3Fg8pjOwwhaw8DV6Yez6HbCZGcSEZ93EnkG0lNaucWbbRGgmFmAgCAnGzOAwG1vyMIGL1CyPcYJdPQ8bqO14YozreWLhWPfumAMmE1dQacLuGPjpluUfBuG6Sx0ZAThkWEkj9RYJhkzGCshzpL6yI7Vw4VI7F7tenq+bYNyYwSxMbXUotsuDWWUTFcRSS7PEwgXsurgMz7UdtmeayneDZLFnQjS2q4eodWuRSblALItCpdJEl8jDdYECz+UziMtXiVF71EbhzxZOkMhTjhzsQsAAdlTKE7u7viOZQCN5giQoiIw7JtWpMMso5ARH1M4y18LSFpR1mYW4Q3SapToqkQwHjzlaSRK5RByNxtZT20C6SWE7QQYWOJCwMceJHCuOxlbV9bZF9FrAA8lFhG3E3By26USgkLkIyOZAhFGJAjc9EaovQ1zBMRKGpMmEqM04I6Xo5jyzwVRlz1rHMYkypm5On40yTOSRjjnhcVRmHuKgzzSQIaNmUZsofdiRjHKp8jCEI1S+fTRW93SHtB1WsNQlMwaWVoF/BgAmmiBpQGwoBLJETRMNDmzaKiNAAsOlAWH4iVFTryGbIbAdAsmZ5+okFM4QFLLVSNTP7w2IhsCAAQgJWgnCYRCrnIKwrYpISP6S8f8P2od2hExsoP4nzHtQhouMAp+hfMjozEP32VyrJoCex3aOoAws2UyECAKNJeKKECfLBilVgrIruV7N78YUjCYLr6UIlMYF08ORuQAnFikp8ap9eSnyP0mPCGz1TGkIxCeTLFlqSpxlBDA8hv1lgcz5aFmhbOdz5ERAvzGDOCLfmBDE9iz4I5bBnOrQRbBRxLZOjNiO2R1lgZyoyTcmRg84Bq2UV/kPoLvMy+WhvnEVtIArqnK6BFjPaZHIl6H9jVkyGmgZjSHgMeLUAghUJ4SWqbKBSkNhheqFOAKzMc3lBewtYdcAdbwEU0UkMs2nV30HBStDz1YpVdI8k4xXdcUDfLBJJwAhIQcNWKuf5mKXRJMtLjkjcxoWNYUBGIPNMgcI4Z9NSGPdizZvyM8caSpiIQoZGDAIthcGJotNTpph6a0mAujPBoSr1ifhubYMpkkgFkAypR5IyiCwMmmRIOMiUscawjgsU4ChEdcjgb9iobJVpNwD6+rBCXSVVK7WD+U0Z8sKAuaANLHzEiPk2QaAKYBDEvJa0RjOsxJUJTgQC5QxsV5AeYgSD+YkkxZIHNCNUCAsM9BKPHBKlvAhYmMKo6DUgl0B1WVOymwiuAgbOFovI9xEGItBO4gwAJs1noKrwA4VRcCee1lwjULyN+EvqEXGXFmV585XA7pUPFYipBCT0ygMvPC7DCh+zMSVK8302EQtKnhkTqsVKMfLQKfLA+slF6Di4HqBlxZnZeWbM1xqPKiVyHnJUR+v1HkIBmLKomyoIR3M2WhtH+RPiStUoXkLqpvV2cnE8F5GaD4I1wNSpqxEDnM8E7uSiTaFebyW02AeyrgMA9cCexQiPGrPaBB90A/SowoC0Kc6ic2BUxGI62k1T/4MKgR4N5wD+Moy1JbOr5LeBm6glZ2GeqyqXJlqDRYCtfDc0W7M7WxT22imm3R4bqbloCC72AR6yUDDryy1ZXJ83A5RP6fpiybsdbUmoQOyOQafNzKTGGyzVJ2dJ2ZB6Q4Pn1W5VSLWynbfQM+mLb19AxtkIwK5sKC5FleloaEuBPwKeAWOh38rjF2iA5rwpGjskixxVeMBzEM7ork/a9gbrUYwbGQWdgtEgHnULoIamQLwQCDnmAV5JT9OVAcJpKDtKMbPcaxchjNVqsmDHWZSoXjMHebKQ5W2nbKjea4rGUmKUhvkN3xwQ4wz5UgM1OAhXIItSEuDYpjeBwZHgr5bhpFdxzI9cPgtinmBK63KDjCjMQR1sgHbgl4NQwbQK/jOrXOJZXYSXUhONs/YIT2InZMjXJ0Y1Q5dGyDiCOvgIAAYV0OGcD0uIgyS4cp6phJUEMoZUXZhNUmrMQGpHhVUomBSbghYAvUFkzKzVUcD/sMLjrYMbQrmGm5Tx1GgypmVZYEi8CKcL6qeqFEAtoTTmwVDQY5I+OORYhPOijxwRfXIUD4e1FJ5zwBHmVTemYAt2NSkdHMQoL4qLLQMNMVIMPrkLUR1Qs5IPHnkiiEBSXz9VJ7ICshfWXTOJOSN8A/NumR5GJSBNxqxdSY7sv9IboI64BrJmsqECS6MaV1ekaiPNn+6QIijYWn1I+Mal58tshQr0yHe/g+AamMb+q5eN1AJjvYPypEbxgAp3rKTM/tA2JU0yZzZr6dlb9EgXLbwH1tbNbZlcxly30atifZ0ovVgVLUJ4fYrjnr/RPSFAxGDc7TwUsqvBMIPRfcw7u/vx/rgcMORMpD+fuxaHekGL13Plo76Uz2bZ6sLX9bUTaUSyST8O9RwMi/bur9fjXeHalDUZKbbWttSWmuH1pp8rOFsoGpZ0xmo8O9vfagaQROSSuT9yXSqa833gRkOEJ/srYCc8PieScCUPP/cVHhD9sDgxjojvjjZC/j6T414AHKyE13p2Qh0t6Nkurst3Q0rG/pHDvSEekbmROrQCDCdQ4po66r0mdLzng00ne6ZkzIzK+ouS4qbcmLR1IWaFtzqaWrJr6Rb5c/MxSfd7xKZhFJ3VWbFR5CBicZ/VPorY53sHAm97tg2c9Fc0sDsWSbuX6O0rT75/rqJocxFH0VmHhM7t83E5hKvjuehdZPpeUNR21m3azLV1dV1knPntSi9bQbNJdkEaOD68gV2RqEMdqMFd88L4nRotEYN/0n4e6w1mey5qrfENmSuxDt3Faz1eGK4YA25jwe3UZqQRJa9VuMEii8VZX8mbuFxWYYuSyXbUx1gzSXVu65hL9MbfN5fgqB7mTBnPl43NHphIg2YWZ7q6ki1G6mMRjJZQ0t3renUurraklqmrW2NkV6T7EwbHQ8WKfanoVygHGM5kzysZzUdw0yrBcnsT/VuHVjb39dz4BptiGUYADKCATgbZsLKMHGhgPjTusk8Awq3Syo967WhtVv9R7vak6lUe1cmmcXtXalMl7bu60MHq2ldS9tJWfXVJfsNlWAoPHLK7mW3nL5I/SyG/x9+aFz1y30vX37mB+duufvat46csBOH3z248Y2hidXm/V948IWVJ47d/Nq9v1l+1oUfXvvMX6/YOHjF28cvu2xy9ZLbF1/wDn6r477SI9aPWp5oeW/Xv44ff/P4rm+Uu6/bW9z6s4FdL4jrzFZ6YNGfl/7YP79jfOTFcz4t/H2v//PwH3+6dd0fXjr85rKOHUf1vQNHrjnzJvb7zuE/3bfl8F2nfNW99d+f/e7RZfjVp6874/PPnpbYdmypeeodn3nyvBfOG/nHGct+hdvueePp0t+fvBq9vub1i1dc8+7zdCxmVF5b0frb0zv8G778nnX47Fd/WFnetuRzZxPav33sZbS7f8P0+5efNnDnnrv+8tSn7n772P7ue3/S+4tv3Yyv7Xnxgdz3lm17a/Erhzvv6b87mSTnLv32kduvvuPYstg7Bbzr1r13nv+l+JvfuWn1kq37bqukNj6D3/n10c43nuu/9OdPFyonjvp/u21w03++OXDLB7eZTyxJTQ07l247duPi35UmrryAPTamIrx40bNnvX9i6amLFv0XHXoWHQ==
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
eNqFVE9oHFUYj7RYWy/qIdBi7XSJipK3O7N/kt1ACem2SUtNN82upV3R8vbNtzuTnZ2ZzrxdktYSTBpEFONDyMGDUjPZrWvMJrZasLUQ04rF9mBPXQJV8OBBUDFeCkJ9s9lNUhLinN58f3/f7/e9N1IqgGWrhv7YtKpTsDCh/MdmIyULTufBpueLOaCKITt9sXhiMm+p1ecVSk27w+fDpurFOlUsw1SJlxg5X0Hy5cC2cQZsJ2XIQ9XsWU8OD56iRhZ029MhSKI/2Cp4GkHc8tpZj2VowE+evA2Wh3uJwZHo1DUp6l7PudfdDEMGzbUQDedlQAFkG7oOFPl5RdHvj3jOlRTAMh9m3FEMm7K5dfAqmBAwKQKdGLKqZ9gXmTOq2SrIkNYwhTJxK9bmZ+UsgImwphaguJzFZrFpairBrt83wLtP13EiOmTCenfZHQfxKXXKrnQ1cPj6hjibuiB6A0Gvf3YQ2RSrusb5QBrmkIpmzX91rcPEJMvroLpSrLicPLM2xrDZVC8msfgjJbFFFDaFrVxb8NJau5XXqZoDVor2rW9Xd662C3glyds+90hhe0gnbCqNNRvmVkheSSlzVQJIbEOiNNNgSQM9QxU2KYntFy2wTb5lMFrkJWneHnG4InD7h1J9MT6NHWmoeb/pGecAV4d9222prYK/XYiDKbiqC1Jbhyh1iAGhpzcxHa23SWwoxlzCwrqd5oIcbIhfIkpez4Jcjm4oe9WzOpbF+2tqTqWofim4WO4vc4KiKFZf2DTSghxnze3oBCKRyP/U5cwAZZfd+ZAYQf72xPKUoWCyKmyUuXy16niKLh6OqGWTyFU8jWhh0+iN8YiBZLkOGqkyu8bPp0TJX+g5FMoETbNPhTMnjukZ6UjyWPdXg4hoRl5GlD8vgGoLMUhZVQhJaWhvC4fTYdkfScvBFIhhWQqmAnIklArKZLKgYlaWvJKQMYyMBpVoN4piogCK19aGlQ6cPNrVezg6fQL1GymD85fAnGfd0KEYB4uvIyvXWvMLbkGRp/d3nWSXwyRCUiESDJBwyp9OE7Sf35vGAq0siOO+DrVn7C2+phY33biz590nmmrfllfevzOwID51/pMpe2L+9N3m37fvf3N7y5X+92bb5hP389c+/yD17x83L4x9/Pj1Bzj9z9PPjb6xaC3NzA0v/vnRr2Yltm3AW+q5tG9h/npPMu7smtn69uG7O0YGKs33fhEPOVvHvWPPfnav+eXb8sFXj1/9sjwQpt91a3/tWfrZ+Ymgzpsk9tKtJydi20I7F98Z2bEwGv564sXvKxfojzd2z9tHb+3tHH6wc994NfmhLzVW2d2yOPz3N62dHPfDh1ua8G8wusTP/wHDg2Hi
|
||||
eNqNVVtsFFUYLgGiIdGAMSo+yDByEdjT7uzObndKfCi7hXJtSzdAQWzOnjm7O3T2nHHmzMK2QiIIhlQNEx40MT5Qtru6NJRKDaIQTZRIFBoM8bIq8GL0SQghJgZBPLPd7cW2wGQm2ZnzX77v//7/3735NDYtjZJpfRph2ISI8RfL2Zs38Ss2ttjruRRmSapmm5tao0dtUysuTDJmWHU1NdDQqiFhSZMaGqpGNFWTlmpS2LJgAlvZGFUzRdQlpuCudkY7MLHEOsnrkz1ixUSs29YlmlTHYp1oW9gUPSKiHARh/EOjNl/cvZ0bUxXr/B3p0FYx8IMAsCghmAEdMo5P3J1PYqhyEler5mST1GLOwARg/RAhbDCACaKqRhLOyUSnZngEFcfdKB6h02JqAblxS/ydQgfGBoC6lsYndwGLQY3oHDVgWgpTmzkfbmiKtq9avalhQ244tHMCGoauIei61+zgEPvKXADLGHjiccFlDHgZCHNO1VfA1jRneLGJ4K0OyNXeE2NT65Djzhml88/GHhgQdfA4oCykkxt2Pj7WhlpO73qImlrHhYQmSjq90EwF5XEsTZu4RJ18uHliuvLhSLq8v1qS+D0wLrKVIcjpjUPdwgMjUoz4FHxenx94g8ArnRoXGzMzAxDlKZwj3uOVCuqYJFjS6VGUD0xsGbw/8b4c92K2tTfLFcUXzufLTdXTtHa0H+ZmI1xd52zU5hJLtcIamwg8c0CQ5DqfXCfJwqr10b5wOUt0Up0GoiYkVpxr1VBpnjxK2qQDq4XwpA1TXDRK2OT5dS2lMaARw+a9UBoEUPrk5GSvexWXPtDexCleIjd3xWfRQ/hYmDmDLl+30lJttMJ6a3HxZN68rydA7A2Vsi17sP0oxLLP4ofxmRqiOJl7eSdV0GXlKUsxYjmKK+tXFOUBcadE5NtaFCbz/J+gw9QX3MdyrJTD1sJ9raesUKEMGmiqc4b/bvdKUrglspMGW6x1Um0jyUTi6URgFzxtMVNDPKTbyAY1GbAw4oucZZyih29ndwm96JcC/iBHs1zQCNL5pm21YxHqQrWWC4aJdQrV06NAqZmAROssjYkLoLjArwT9AdUfAzgWV4GshGqBovgkEPP5QqockmplNXg0rUGnwJeFkKA0oeN+FAcIoiQGw8Ps5CNtG+rXrw73bQEbaYxyQaKQC0cowblWbPL14RSQTm2Vr20T58Irwcb6NmdQCUh+fyCE5FBIVfwxBTRs3niiMtYjY5t1d37pb+m1nFsSkjg37fF53Y9Wla7p/Ll3Tz1Uv3a6NHv/vW9uD7X9dHzZuZfObLopxuY0rFh3IC0tLy46vGPJP9dw98xzl/fdeG/ugZ2RG0ek397Snuo4/cfdL71nPNfw3cFXr/98Be18ZKbyptzUMpQONzdKF774/dfEgKf5mZ581WNP6zej7T9stt++sMD+ZfBKj7bthU/Or/Bc6v/6VsuR/szdJ59dNmv+y9Fjf7cp336f+rjXu+mNLUsPXn2OeM8uUQ8eXa1aanJ21/MLpT2R3Pb56RkXF87Yb95um/fdrS1fNd4Rhi79OGvoUzD3nfNr5hXv/HWxuPijfz/vbthzuOHYoSV/dq1a2bnm0rvdN4OZdZdrS6SnVz3xvu96fFpV1X/mQzDF
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
eNqdVn1sG2cZT1ZUtgnoxFC7aWw9TLeqkHPu7LMdJ7WmxFkSN823s6YZxTrfvY4vvq/e+54duwvV0hXWVeu4ETahqiokaTxCmnVNCl3XFKZ12jSGFES1LQtiQ2ysBSptCAmBgPG8Z3tNaP/Cf/ju3vf5+L2/3/M8d2PFLLKwYujVs4pOkCVKBB6wM1a00F4bYfLotIZI2pCnurv64pO2pSx/LU2Iietra0VT8Rom0kXFKxlabZavldIiqYV7U0VumKmkIeffuen0Po+GMBaHEPbUMw/t80gG5NIJPHh2GzYjWogRmTRSzZStMiLGCiaiTrwM3dTEPKMbhNERkhliMDZGcDFUzKQMi0EAP88AUvhnGZJGdN9ynYZtTJgcxKFeFNlXPDWMxzJURPPiPCZI84zWMGvgxLZqTCfSjDW2NKRndA+saIaMVLo0ZBJWMFhN0RVqqcMaD1dMLCRq8JASVYxgAXKYwCqxLRqJ83J0jaIvEUHyppshZesu8TTWp/f1AE0XNdeAiFlFzScwEi0pnbAQtlWCE8O45CIjLFmKWfbyNDIlOwbpQ4qOGAN2NKUA/FHKqD4WSiMdK1mAKEqSDQDpnQ4EW8AaGJYzeJl+jKgo1DEHPkweJKlIIeo4B2S7dULlZsSkYRMG4llAJ9VGpyFiugmrOG3YqswkqdRleK5sXnoAU7TgoFB/2D21aUFdWURBpUfXzr37n4OujkMBqYaRYWyTRqwwC4oo+pBndJTqCSWtWEim3JeD7lllaiSHkUTAdHTPaDGNRBngPDmVNjBx5tbW+nNAGoIKQLpkyBDeOTlUUMwaRkYpFaicgYLSkSuiM5NByGRFFbieLnk5p0TTVBVJpPu1VMLZcgGyFMn12zO0/ljoHp04C10AojFW252HptQZ3ivUeblTIyw0jKKr0GSsKgKeadPdf3H1hilKGQjClhvemS45z622MbBzokOUuvrWhKQkOydESwsK86vXLVuHwkJOMdp9fbry5rV0fi/Pe0PPrwmM87rknHCb5WdrnBGx8qxkQAznR9xchR8VCpqknclgmHsWKtSEmkMHpsGN2HhsCrRAb7xWLI+aia72ioi/q9o01Qy6OIvxtF3D8CGmSyKMj/MJDC/U83X1AR/T2hGfjZbTxG8ow/NxCwo+BVI8UJG9KKVtPYPkmegNBV+kgsNpKHyYJSwaMQ2M2DIqZ3aA7S0NWTbWPF+qLtawhkRdKbhpnUVX+VxhJCdLtiynszmNCxcEv5JEtpRaKLtAt9A0AIjVsDMlCHxgrrxVIX8GDsuxPMdy/LkRlra7ChMBCHX/y6MefAMcx5293oAYGRgXTlHg3N+F1RYW0kA1mvxaGCEcDp+/sVEllB9MwgHfubVWGK1Gw/s0fPZ6g3KICQ7PjlSsWUV2lrfAQ4IPoFCYQz5JEASuLuDnUoIcloW6IC/5AwEh+QKdCBJEoWqahkVYjGBiKSTvLNdo4ghttIifD/iDcNIGRtEl1ZZRn51sNugZcAMD01M1RPm5aAsbFaU0YvvcAnSKzbs7Gzti0Zk+ABmFUaSgp96pXpdISKlEUosgKZva0dyWUCVJwJ1tuzrCqNcO7GpuN0gfMeyYmGoiTVxjkKsbYvmQL8yHhJDfx/Jezst7eXZQDfb6RdLaYhXMpNxmt7Ukoi1BOdjc1JTMhWKxB3ca4WA2q+mBHd2dXU3q4DBpbMEPBOQhzWvtkDKWORzv79Cbg/0FkrUzvcO57EB3Dk4jknSktoGB4oTZiCPlFmGhRdhSgwiVBmlgZJeDiHftOGxg2uCToEtX8w1MHyUTwRXmeZ9CUKTT0NHyOHBgZxU50pbRCzi4M1SQgwPSoDaoPNi1N5UOx9sFf1t6J6+He0h7ONDfhq2eVSSEfD6WK/MQ5IQ6twqvQf8/Uf10gF3d8WyXWfr2KeoG1pVUaroPWdBAzoykGrYMk91C06B5b+NuZ6FOFjiUrPMLqWCdnxPCbBPMzEq0T+fDFH0tFEUVaiwrOfNpf8RTLwh+TwN8oETqgtBO7hfSI9Olt9Qr1dnNh2+ucn/r1J5fdr7M3Xb+w69vPH7X+NTB8W92v/BR3Lntb2Pd79evXHxvpOMvj62/tL81v7T+zKbNqWxq6M3776n63raj1beHcxPc68ffjTz2T/7dhy8sfYjtyMkPHv7Hr44/vevvf3glclfg1sHFSXP/TF/jwZ7LW64c2Xiv9Z0nMne0bhK3H3q1p/1L4xPD8g/2/ufRfzUNX2ltFD4+8PaG4mL91m1Lp+88X6iquv/qyvuXtulvf27s15t7f3hmezS48vnqU0/FD3D6yYUjmVdb9vV+N/vsyuXQFzcOLN3y26VNN8/9fmP9LZfij0yOv/VMVDn059f/et+FufE/fXzVe3jijfGF05Gfv/xWb/vRrfda7eeWM3ff+ebphza8d/LqN/79k5d2xg7uPTS54dIT4h0Xj21eOV74xbcjV17bG7hYnPzqt2KPH2lHn90ljR3+8m8WhY/Wf/CFZ+6Zv3zrjmN3nx1ZvH3+jD2wJfNjf03Hthf3Dxz9zGT/40PS9tY/aosJIPiTT9ZV7Xup5tj3b6qq+i/YsDbD
|
||||
eNrNWQtsHMUZNk1BCFUoFGgpVGRzQB7gPd/ZPj8xlWPHsUPit5PYceTO7c7dTrw3u9mZvfM5CSoPCUJB0UZpSxXxNnZr0kAeSkPAAVrxaBUqKpWWUIhog0RDFUpbSkubkv4zu+e7s520VFSqFdn7mPn/f/7/+/7H5raJNHYYseh5uwnl2EEahxvm3Tbh4E0uZvyO8RTmhqWPdXb09D7qOuTY9QbnNqsrK0M2CVs2poiENStVlo6WaQbiZXBtm1iKGYtbevaNS45uDqUwYyiJWahu/eaQZoEqykN1oX7LVZCDFaQY2LQTrqkgxgjjiPKwIl6mUFahFlcoxrrCLcVlGP5YJlMSlqNgMD6rgJ3wW1W4gcV7R27a6DKuZECO2CXsWhgqDTmWiUEryzKOU6GtpYWmtC1OKe04ZRWsE8JCWzeUhlKWjk14kLS5WmmpKUIJrGLcwSgVqksgk+HSkDRLno9nbbE94VLpTVg6fVm3OURRSrzlKE3M7BDDyNEMWKJjpjnE9leFGhX/hYJpklCsWPAiRUbBCeLcwsUONjBlJI1LFaRproO4uKLgJQeODgsdzFyTs7DSx7DwrNiYgT1KFvya8yeiLAMek6EWEVNQ3HK5AvIccItwMBUi2riMgkXNLMjlDoEXTOnrXsWkSkaJbWPOSpU4bLYSCcCUgvQ0ohqoCU6iY5sbsES3UohQCBIFQKRAfqkCZ8OKg2gSKwliAgyZfxSSgiXBflhlECaDD6JMInALtiBTFbtnOkEjHIkDqXGkDRc4o43aYCEzLNfUlbgAXmCdBFEY4mAjBwIkbBDBsh1AuMMJlndykbgoDlZPgQjhVNOyhhXXBmEBEgAphCYBcCFCNdPV8ZDvBCkU0WxHQuKGACrlo1nbggfIcVBW4jZ4QF3TlAjVcQLBAUHMhtlQMoFREJXA88KHwh8gXOO54wf+gVfhQTpIleCnT9BNuH3aKxJCdfkV0bDSm+MdHrFNAp6XIJGQYgqhgLuUjIWScKyUwmyskQTRlAyOMzgxU5bgcDJcqgyGWoiInAlR51jREUf+DooYCiet9GBoaV5veYFeASIfvFSxnCSiZNRXGFAFPKxkCGQxEXqpPgt+lfkiAGOxCYU2+3wgnYZFwVRpT6MN+U0akzenjSpxixuKhhgG8AqKBSDThddSgsNCH7IFohwiTxiEI1C+fjCUP+kGYTs8QUKXSK/waKnPNcxnhKQoYt1BIDPENJWO9lX9wgPYN1wYELgfKJFTrwK1FbAdHMks19FwsBnYEaBVD+flN/tAU8CAdnCJsgQ2B07MYQrctrRgh8AvHvn/gH1gR4DEIuh/xrgXuTVtEcAzpA/hnbPAfTbWcgSAoiUSosioIvf6EQQYi4hLQPw7NohdogIKrOf4XUzBQrKwaYpAgTonPWzBBZCYJjjDcvYZYsv/hB4FYcszpcgRnw1Z/MqjCA8guA06g3Mw5z9jhbSdzcEJH35DsiwWEwJTF9qK9aE4YkSDKpKrpKENsyvKOTgxvb+YGE1wMEd0UAH+IeiO5SYNyG9+NfexUtxkiJeB/cUsGfS1DIZkf8GI6P1kJYSSKdlAhKHwhGjDpQqYjUjS4LA2gxw9aD8KPTlTeu7soGBJcLKl0z2QiUdyukr9+CBTNkncsokGz/LSHNFmFpCtVMm1Q4SqMgZgDDKz0H2KKIr+qCjvFRZv2ZbMSGJBFOLQnmBEzx2YoGEsDktzjgigP+EDLpefuOtQ4UwTp0VD66sHkFrgWEFaheMRXpDHitzYg2UL3Ou42D8vP1faShPmgnV55/i6MpKreaGMJKnIIGAO7MbUEDFSXKpDMKF510XIp5NGj2FlICuBQMSVuAkNmWKAl5jfJ5lkGJdCdAsgEKQZKCUuHEqk8G4xZcAEIbQgh0N2mRMyqzBKwwKmtAgPzwBMyoJyUIAAZE7jFI4K6JBehNgzNwFHI0D+GfEXjeaQbFPn5qsOVao0lMF4OCRmBspFX5+FKH5K4oonM+GxCtp/zgr6tOkEZbtxqHQGoF5gATIOyid4YXFCZL453SXbg+kUeVZw5Nsrv1GHhphYhWiYjrQJeRuc3NgG/MkwGVUo3pxoZq53MpEYy8BDZwthC0TKFJnItmzXRI5AIiWagac5LUbAuGMhwFrB4MAK7Rn0vQ/6YfegDAHok5WEMsBUQYjztDoLfzpkBFgdyIH4ijRUXmksFaf1j6EsqdblbU7lkoqI/8BXC/dVMX3uw84oGzPhJg48N9KSkJAdZAK8hKPFgEmoIMenrRB5OTMmmqDWTRcJDSKbtGC8EUzKD6Nn8ZmfugPhszK3JOJ02m5rb1rV19zWvkIUItgXJDQnxWRqKMoLPr58XMHBg2sZYwdrEE24kzB2MIygsvBAzAM7Crk/q9kbzHkwKGQp5AzLgZZQmHS5oADcYOCclQJeieFEVhB/F5Qdifg5xIrHIFNSTQi2LZNwiWNmW44QKrVtFBWtaOgGfsOICz6OZwt8IBsP+d0BaKkTBN17e0evX3ez0LJrSNADBbMoYsP+hxJpB5hR7II82ABtfq2GJgPg5c+5eSxZ8Y1Y47Bs64atEwYW1GPbxyBc3NtT/BXoCRjDoZiqmGqWqALe/uQosWHoxwlTDuejjOuT4EaK5aDgTQ5jbKtIzPPj/l7vSdHHQW0R78s2MovuDlKdKsyZ/XpSJCwV6hTl3oEOMKWxrawzC2ii0KXX1IQjT46oTPTgIq+oJmQLb9yW758ufGFDYQIhavBBzBv3N+8pXGMx77HVSOvoKRIpGOI9hpxUVeX+wueOS0WC8iaaOmerC15Oq5uoCEej8G9vkWSWpZr3mGwYfli0G3Mnq2oWCPEejuzJOcjENMkNb6wyFqv6HpDThsyFbx+Hfdxlt41BTPDRlyeCj3GPdNycC+bxkivGmiE+3tRarJcq0RplpUuV8kh5TInW1kUr6+DJitW9u5sCPb1zBmJvL6RiBsxTl+fCP6EZLh3G+mTTnCGfEiGH42h+T6pC1YEcoAZWebvXqd1+Z6K2Ne/3UaYWDi3elIx9ZnQko2uurhvpTCpSO1pZQeLY1RIHgi0wVAg1YJCaYt6j1VVVe4I3Oe9PwlkjajSiRqJTI4DetJVVXdtnmgoq0kTD8kMT7K6OHR5RxacmU9Rk1f+d66C8sVgkEjk0ewG3hqH4eBOVEflzpHCFgwWvhXl5MZW1tbXPzL0oJ6oCltRWVR0uXgUFvUBMtDzFDs1eEIh4JMJ2j+RWq0T3jl0LN0PReCxWXVNRE0nEqpEe1bXK2opYRQxV6wglqqPxp/xBX+Ui3iKNgYsgexGe9Y6VptCIIGNDRTRWUQUnrc99Ruhx483+5FSvwGRhQgl/QkuoGoLqrvoQ9Saa+9sbV7c1TULbqjZBf0jwjjfOu2JoSEsMxVMNA8mu5UN2czrMzJZo46YVK1FyXfdIO29f01LRNlw9nMyuHBlKmryD16jR6likvLoyEoPLcCQM3FITLL3WWee4dg9or91kpvR+bXmsssZo64yHCS/vH9jYP6x3JFuTvDxRTlBN1s6am3pjNGbarT3t69jKdmd1rCfSN9DOampvblpLUXy0fVSLZGJrYsmezp5NNFXTabS2dmlwRMSNhrJ60UlBXmUNAbNUYJbq86oyx6v6YFhsCBdn03qllXO7g5rZeujjwcMY/kI310M4bhAdw7Gd4Bg3TfSGtd24q6u5ctRxa3szHa1WZFm0tWqNXtO5or2xzcpWdHfEOuxYn4HK+ws8Ux2JqpHAOVWRyhoJzbzp/6VVB9ephYlCDVoob4JaDMaGxHgPEAo73qRmWq4OhcHB400tandjv3egNhbVo3FdjyX0WKWmVanLINnmpE2nlTFRVSaQCcBLa95+o6IhBN6sCNVDgWyoqQKOyf96uHXc74JeOO/ggm9eWCJ/5t3T/Tz9RWT+1Ls3XPDgpTvfWqQ//eWxi8bef/jCbfPumrx7cPMOd2/Dt091rnntk3pn31tjndeGbzq666+njx+9EZesajt4/qKPbrzoBN/0l/lHPtr1weunjtzx4omXfvxKPXtty6GPT5/6x55ENv7qP+fd/bejoze8XHNvl3f/13/jrVg1+fh4+Fer+36565LItV2LB/4Q7+pCO/teODy4dvmxyx5/ZdvU4usiT6pvJkpKOk7s2jKw7dnXv/TT00/FI58/fs2vH9hVct/i+6+/586Hlr1TtW/LBecfuyt94JZ7B3oufvDVV686NL5i/ps/unjRlV81vtG6Y8HxspNXP73wg8uev3igrISqyx9oiVz5bP3Cl/b3rZ76M1/wyUP72vsv/+gHK65+edH6hTu0a0aPlrz/oveFsu7yd39y6PWWv996IvMKbji5N3tpxeGBB59r9X7/1JGlu05fPrUm/s7KO1/o/O7b8e3O+M+uevv23gMl+5a/9+xbz4w3XLf24JkFH39l/uc2NB+r29697Tvb3q3afrLzzGvpB/5k3Hf+h1/71nNV139ILv1i/U2/O7llXc0dd42f2nnq5/VPjF/13uDmrj8e/u1IduG69NFb5pWUnDkzr+T7S5Yt7ILrfwGDopBs
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
eNptVmtsFNcVNlhqHm1U6I9IaaPmdpWIUHnWM97xPkwcanvx214/sTGlzuzMXe/YM3OHuXd2vUucFIPaqokqT9I/SWgI2N4llmtCTSANpUpokSJKKxpVBEjTh1Dpg5AqKRXpD5qeu7sGu2Z/zM7ce853vnvOd87MZD6FHaoTa828bjHsKCqDB+pN5h2808WU7c2ZmCWJNtMV6+2bdh394teTjNm0prJSsXU/sbGl6H6VmJUpqVJNKqwS7m0DF2Bm4kTLXCoXd/lMTKkygqmvBm3f5VMJxLIYPPi2ERcpDkYKSmLDTrgGUijVKVMs5kd801QyyCIMWRhriBHkUgx/xKAoQRyEgX4GAVO4CoglMd93Ck6jLmUoDTjcizP7mq8C+RxiYB6XZijDpm+iAq2g07LBRJ3YJCtsOeQqy2ZsGKRoi5pJGqmKhVpK5FEGiDOiKZnNy3FunWwV2ADQQzpFJpxVMfHm1dF3wIpJNGzwpRGbCTIRTN3SuaUFaxL8U+ZgxYSHhGJQDAtwQhtqylyHI4l+ka/x3BXLwDJ2IULCtQpl51i37muAIefCDZiS0o3MMMWKoyaHHUxdg9HhUVp00TBVHd0uefnqUNEOYWtEtzAisGPqWageLxhXh4OT2KJ6CigqquoCQX5nQXkdqBkYliL4UT/FXBLcMQ0+hbwuCUGxaBpKXVApFxtS4sRlCPAcyCpXhsUhWiwbVmmSuIaG4lxoJXoF0fj5AWzFgYOC+mnh1LYDqnaYjouPBbvC3f8ddDkOJ2QQMoZcmyMuZRYqolsjvokJXk9oKN3BGs99CXTHMlMSH8UqKGNiYsdEPokVDej8oWzdTJJQ5i2s7LXDkDYMGsCWSjQI4P14JKvbFUjDCQOSOQfKsnChjN7cGMa2oBiQ7VzRy3tNsW1DVxW+X8mLOF9SosC5rN6e4woUoHst5h2NAYm6lsquDAwFC0l+OewXXxsXQNa6ZUCTC4YCfHJ2Yf/E8g1bUccARCgNHC9XdF5YbkOoN9uhqLHeFZA8zd6s4phBeXH5uuNaIC3s5Ru6Vocrbd4OF/BLkj90ZAUwzViqN1tol+MrnDFzMoJKAMM7IOZUqKyOvYufDA+rieG4WYvVVKI12jxsqKpMO5sHOiK4x60eiLYR1suI26Ik6lm9WBcUwyOCFKqKSCE5FKgSJL/ol/ySMGQEewIKa2p0snZca3abG4cbGoNaMFpfH0+HWlq2tpNIMJUyrerWrs5YvTE0yuoa6ZZqbcT0O63qmGOP9vV3WNFgf5al3LGe0XRqsCu9CQE7N6Vrtc1jVpYG20NZLTioDplD+tbYzkQy0tcmB5qT7ZIV6WZtker+Zup0L6MXqqoSxBLDoCiHRf5bWNKGAe3Mkt50WAoegv60oePwnhykjLl0cgZ0iM++ky+N+YOxttsSvn8mCpr0TvYl3QokhVBMZahKrJKRJNdIkRpRQk0dffMNpTB9d5TgkT4H2j0BMtyyJPm8mnStMazNNdxR7Ce52KGSnD5MUgGP24RiocTKmx8UeoovOKEluljsLIE4I4qlZwthvZMF1aez42lNdTUtmUqbYiQrB/Q4dtXE0ZILzAoeBggJJvWmq2VpobSzpLs5OKsoSKIgSm+OC3zWGTAOIZ+Fa+ktS72Zakj2G6sNGBmDWenl5UI1xJ8vt3CwCYLlsW/DyJFI5Gd3NlqCCoBJJBB8c6UVxcvZSFUmfWO1QQnioEjnx5esBV3zLj4MD8PVYryqCpRTLaoJORgPJsIilhU1FI5LgXg8Iv2Uj0MVUHgxbeIwgWIY1zrLeBcrTGWcz5jagFQdCMJJNyHdUg1Xw71uPEr4GegmBK8Ogyja4YZGoUFRk1joLejPy0e3ddZ1tDQcGxSWC0mI2cXPmbxFqKUnErle7EBhvDnVIK4Gw9LBOcDqqdvmHQ1rsojjmhwK41BAlCNCPYyhJbRbspvhkzavGMA9pXqLyUCtr0aWA75N8M1RGw5CmQofPbtzxdF/eo3+0DN3lxV+5cbU252/ENdF//mfey/TPc/J8tT2ym+s6d/Xtb7eu2p8ac+1142m0f5//7H73aMd/f7Hz/7r7Ecf+tHe7z93Ze8n38ksPPuFv1YSnDv20Icbq79yI3z8pdNXn3x98sZfRj9u3/Cj8vc2v3Xzt+6vuh+451uvPP6EtTvzt57LFZnvLcptl7/6aEVd6tilT8XTXfcIpx5ufVc8c+rTp+zQo+Hwi88f/va6+tTN96+92nbh3L7o89MH34psLf91bP/dB+7H+a4r++yX9diGkXe6d527+Y/9F4YOvrDW9+C2yVMvfdz0+f6rg8fnrdkHr9/Uf/LLS4PfPLF7//XW5J9fPLWWHjqx8TfTB2o/uGvvkzfue/+Z2VePHHq28rH/Tn609vLfz029/MVducXdmevtP+hostcP/PBPL0zBd9CZB8grj22ffuJ317JXnl7/3afq3xvY+PZU9IPzma4tj7D7zg/uDGfxvc3nH/l9h/I5+cJdT0NOP/usvOzcmdkvT64tK/sfUwEmhQ==
|
||||
eNrNWQuMVNUZhmqFGqttU0ONjx5Hq4Bzh5mdmX0BscsusIuwsy9gF5auZ+49M3N27z33cs+5MzurxohtTavRjrFpotFEwd2WIgoLvirFpGpMY2OtEZWm9mGVYoutfZCmidL/nHvntbvQmtikG1ju45z//8//f9//uOyYyhOXU5vN30OZIC7WBdzw0o4pl2z3CBdfn7SIyNnGrp5U/8BOz6VvLs0J4fDWZcuwQyO2QximEd22luVjy/QcFsvg2jGJErMrbRvFo19YdUPIIpzjLOGh1q03hHQbVDERag0N2R7CLkEY5YjpZDwTYc4pF5iJCJIvLVxEzBaIEWIgYSOPE/jHNjnK2C4iYHwRgZ3wW0MiR+R7V20a9bhABZAjd0m7Lg+FQ65tEtDKi1wQK3RTuNaUrqst1E0su2adFDZjVScxTdtfhzrtAtIxQ12B0agIBgvbwMVrqzIq55khaDOYhChHFpwPW+TamVq3hUOWbRATHmQdoSVszaKMwiouXIKtUGsGm5yEQ8oZyqui6MjtGY+pGMLSymXrDSGpBN4KnKdmcYQT7Oo5WGIQrrvU8VeF2pD/AhGWpYwgG15YdAJcL70tA+uSHGGc5kkYYV33XCzkFYPYuOBwWOgS7pmCR9BGTmQ85cYC7FHOKUcRM16AOCmASZwgnLY9gUCeC96RYWVSRJdQsbeZWQS5wqXwgqONfeu5UskZdRwieBilYbOdyQCSETbymOmgJjiJQRyRgyWGbWHKABoMYGiB/DCCsxHkYpYlKENNAD/3j0ItWBLsh1U5iJL0MogyqWQL2IJNTe6e6QSdCiwPpKWxPlbjjC7mgIU8Z3umgdIS7oF1CroRiIODXQiQtEEGy3GBV66gRN2pRfKiPlj9NSKkU03bHkOeA8ICJABSKMsC7kKU6aZnkBHfCUooZsVURuGGAhfUo1nbggfYdXFRwTd4wDzTVAg1SAbDAUHMttlQMiUl7EzgeelD6Q8Qrovy8QP/wKvIMBtmKPjZKEku3V7xioJQa3VFLIIGymwn445JwfMKJApSHFEGuLNULFDGtS3EHaLTDNVRgaQ5nJijxSSSjYTRcGgNlZEzIeqCIAML7O9gmONI1s4Ph5ZU9TbU6JUg8sHLkO1mMaMTvsKAKuBhVKCQO2Xolfoi+FVlqQCM9SbU2uzzgfbkbAamKnvaHMiqypiqOV0MpW2RgyzECYBXUiwAmSG9ZkkOS33YkYhyqTphEI5A+dbhUPWk26Tt8ARLXTKpw6MlPteImBGSuoj1BYEsUNNEqe71Q9IDxDdcGhC4HyhRVq8BtRHYDo7ktufqJNgM7AjQakSq8jt8oMmE2Q0uQYthc+DEMqbAbUtqdkj8kvH/D9gHdgRIrIP+J4x7mVvzNgU8Q/qQ3jkN3GdjrUwAKJUyIcqMKnOvH0GAsYy4AsR/YoPcJeuuxHqZ3/UUrCULr1AECtQZ6eFILoDEPCUFXrYvJ7f8T+hRE7YqU+oc8cmQxa88SHoAw23QIJyBOf8dK5TtfA5O+PAbUWWxnhCEedBWbA2lMac6VJFyJQ1tm11RzsCJyv56YrTDwVzZtwX4h6C7tpfNQX7zq7mPlfomQ74M7K9nybCvZTik+gtOZcepKiGUTMUGKg2FJ1QfCyMwG9NsTsDaAnaNoP2o9eRM6eWzg4LFwcmWVHogk4yXdYX9+GBTNUnCdqgOz6rSXNnc1pAtjMrtEGWaigEYg80i9IgyirI/qst7tcVbtSUzklgQhTS0JwSzMwcmaBjrw9JRJgLoz/iAK+cn4blMOtMkedlG++oBpDY4VpIWCTIuavJYnRv7iWq8B1yP+OcVZ0pbeco9sK7qHF9XQXG1KpTTLJMZBMyB3YTlZIyQxwwIJrTYhgx5JWn056A9t6R2aLTTJjRkKAde4n6fZNIxEobo1kAgSDNQSjw4lEzhfXK2gblFasGugOwyJ2TWE5yHBRytkR6eARjLhnJQgwBsVnAKRwV0KC9C7LmXgaNRIP+M+MtGc0S1qXPzFeYNYFuBkLGQnBmYkH19EaL4MYkrn8yEx3po/wWv6dMqCcrx0lDpcoB6iQXIOLia4KXFGZn55nSXag8qKfK04Ki2V36jDg0xtWvRUIm0CXkbnNzWBfwpcBVVKN6C6ma5dzKxHAbBQ6cL4RqIlCkzkWM7noldiURG9RypcFoOnmnXxoC1msGB19oz7Hsf9MPuYRUC0KcqCeOAqZoQV2l1Gv6kVAR4K8iB+Mo01JDILZGn9Y+BFjcZ6rascnE86j/w1cJ9Y9KY+7AzysZMuMkDz420LCRkF5sAL+loOWBSJsnxcStEVc6MiSaodZUioUNkszaMN5JJ1WH0ND7zU3cgfFbmVkSspO2u7vb1Gzu6utfKQgT7goTmWlylhrq84OPLxxUcPLhWMXaJDtGEOwVjl8AIqgoPxDywo5b7s5q94bIHg0JmYXdMDbSUwaQrJAXghgDnbAt4JYcTVUH8XVB2FOLnECsfg0xFNSnYsU0qFI65Y7tSqNI2Kita3dAN/IYRF3ycLtb4QDUe6rsD0NKgGLr37tSAX3eL0LLrWNIDB7Mo5mP+5xllB5hR74Iq2ABtfq2GJgPg5c+5VSzZ6VGiC1h207abpnJEUo+/Ne9zuyBgorS3/uvTozCIQznVCNNtWQdK09kJ6sDYTzKmGs8nuDB2gyMZUaNCafcYIY6G5UQ/6e8tPSY7Oagu8v2yUW6zPUGy06RBs1/vlilLg0rFROlACkxp61rWUwQ8MejTm5sj0cfGNS67cJlZNBPyRWnSUe9/VPvCgdIEQrTgQ1xp0t+8t3aNzUsPb8B6qr9OpORI6WHsWo2J6drnrsdkiipNtffMVhe8rKibikdiMfizr04yLzK99LBqGZ6o202EW9R0G4SUHoxO6lBLKSkdnb9gZETPjKStlVuyvatHnI58hJtrYm3b167D2cG+8W7RvWlNvGusaSxbXDc+kjVFSjRrsaZktKEpEU3CZSQaASu0DM9vdgddz+knpt2y3bSMIX11MtGc6+pJR6hoGNoyOjRmpLKdWdGQaaC4uegUze0DSZY0nc7+7kG+rtvdkOyPbtzSzZtbrmvfzHB6ontCjxaSm5LZ/p7+7cxq7sl1dvbqyxGY7OWpsXJzH+nt7UhMuF7LQCHVaUdXxTobNxnNPWu727rsYrwvlUw5yY053DBUY3NTNKZFA7Mbo4nmqPzZW4aMSVhW5Eq7Eo1Nie9DwnIgm5NbJ8GTwuM7dgFKyUsvTgWfRR9KXVcF+KJdHYDY0qHNxAijWDNa5zHUEG1IolhLayzRGo+htRsG9rQHegbmhOa+AShPHLKRtrpMiCk957ExYuxun5MEhyQJIMC636drUIkhL2qBVaU9g1qf361pXR3TPu+02kGudEixoTAxXjB0zzBy+YIVbZlIxGmaeHrmQLAFBi2pBgzSLF7amWiK7Q3elPG4G84a1WJRLRo7NA58zttFzXP87KOBijzVifr4Jnc3Pj2uyc9vpuxTNP93uass7UpCQJ6cvUDYY1CQS1MJFbHoj2tXuETmOmleVUyipaXlmbkXlUXFYUlLsuHp+lXQ5NSIiTVY/MnZCwIRD0X5nvHyao0apTevhJuRNI4bpKkhGjficRxNNsWMtNEMXktHY1DgccNT/scPTch4y9QOLoKMTkWx9GbYwuMyPa2Mx5LxRjjp8vKnlX4v3eFPk8sRTFsmtDWP6hlNx9DxaD5ES1MdQ91tG7raHx/UarGmBZ1JaYrZHLrxzGQ/xIS4pd26aXsGZFuXTLav0frahkoHWpIxI6bHY4m0biR0vVFbBRmsLK2CzF0yVU9hE2zP66XpXHxlqDWRiIeWQ91Z2dwIYVL/j3DLpN9cPD//qS/fvnCe+jkL/p46dUffhrtejX7xmWObV9zIeu6eHH1i0dmXDF45eML6xs+Xuvd/77nnP5j+06LrD75389ru6Q0/ve255xe8fN8HL/1z5dXzVr2evmCVd/fGP6//1/tfuuzJ8fFTx/PT9M4TH578aJpd7BXG77v5sfSFF4nmh3636MjY6kPxF++9arB4zx29yV+z/emtmV/eePbE7lc6tvyxWVty4uBrtHQhudd69camfct/ce6tb/zk9bPn/aa4Ymdq69upfbeZ/3Bf1RfuX73v8HnzF6auemvhRTvOD2u3v3be0v135A989I1zLth0cmHrs5ee/Pu57PiCxfS3V4jcJSte+Uum6+Qb57W9MC8fzczvHPsWCV9/4MPmy6du721/5G9i4eHe1TsfOX/bO73n7H9l4Ho33ZH6zLNvD77ofLe3OXXo7W8fv/jOl0/89VdDqZdORXcWY7esD/9sx8EHRp858sNR4wcbBgeW3jNw6ZG49/7I+QcnP/2dcN+p4+/aR4/9/ujjp6649qx1D+p7rjm278h7rR+seDe29/jhFyLaXd9c/dVT267ZM7jzrmt79SsW8K9d9vkHPqWPJI8+/of7Xz/22dHhg5F3Ju8Zn94UavnKYRWfs+ZtbyCNKQjWvwGE1Hh5
|
||||
@@ -1 +1 @@
|
||||
eNptVmtsHNUVNk4l+lBboyoWSlSYrIJ4xLOe3Z3d9dp1wc944/gRPxLb4Cx3Z+7ujj0zdzL3zq7X1G3zED9IhZhCIxoQtIntBcc4hKSUuklRS1yjNopaodI6RTSiRaFS04RH2kIR9NzZNbGb7I/dufee853vnvOdM7unkMU21Yh5w6xmMmwjhcGCunsKNt7lYMr2TRuYZYg62d3V23fYsbWluzKMWbS2uhpZmp9Y2ESaXyFGdTZQrWQQq4ZnS8cezGSSqPlz5S884DMwpSiNqa9WuPcBn0Iglslg4RskjoBsLCAhg3Ur5egColSjDJnML/BDA+UFkzDBxFgVGBEciuGH6FRIEVvAQD8vAFP4FgWWwfzc9pxGHMqEHOBwL85sg69K8NlExzwuzVOGDd9ElbCKTvx2Q+jEBlllyyF9E8OwYxAV63wrbTFRJqKhmRq3NGEvAL+U2RgZsEghnWLYgBgWZJU5NkeS/BLf4+yLiWB5y4uQckwv8Rzrs+daoGYiwzNgKKvp+QTFyFYyCRtTR2c0MUKLLiqmiq1ZJS9fg1C0E7CZ1kwsEDgxtHHIH08Zr4+NM9ikWhYoIkVxgCB/MiHBNmQNDEsR/EI/xbwo3DEHPkIeSrJcCmTSHCTb0wkvt4CSxGEC4NmQTl4bk0PETQt2aYY4uiokealL9Lyy+fkFLGTDRUF/1Lu1ZYOubKbh4tKz857+76IrcTghnZBRwbE44nJmoSKamfZNTPB6gqQ1G6s89yXQ4RWmJDmCFQamE8MThQxGKtB5s6xiMkMoc+dWq/0opA2DBrCpEBUCuM+lxzWrSlBxSodkzoCkTOyV0Z0ZxdgSkQ7Zni56uc8jy9I1BfHzal7E2ZIERc7l2uMZrkAR+sdk7okuINEQr+7OQ1uaQsAv1/il58dEaBnN1KHNRB0Bn2nLO//5ygMLKaMAIpZa3p0uOs+ttCHUnepASlfvKkieZncK2UZEPr5y33ZMkBZ2C03d14YrHV4NF/IHAv7osVXANG8q7pTXLj9d5YyZnRcVAhjuj6VpBSqrYXfpvURCSSWSRj1WsqktzW0JXVFk2tm2oyOGe5zwjuZ2wnoZceIo1cgapYaIVJMWA9FgLBCVo6GgGPBL/oA/IA7pkZ4QYptb7XErqbY5ba2JptaIGmlubEzmovH49q0kFslmDTO8pbuzq1EfGmENrbQlrKYNv71FGbWtkb7+DrM50j/Oss5oz0guO9CdqxOAnZPV1Pq2UXOcRrZGx9XIgDJkDGnbu3alMrG+djnUltkaMGPbWHss3N9G7W0r6EWDQVEqMYxIco3EP3PL2tChnVnGPRyJSc9Af1rQcXjvNKSMOXTPJOgQn3m1UBq0h7rar0q4crIZNOme6ss4VUIgKnQpTAhKQVkIyLWBWK0UFDZ39M02lcL0XVeCx/psaPcUyLBlWfIFJeOYo1idabqu2E9xsUMlOX2YpCIeswjFYomVOzsg9hRfMWK8+Xixs0Rip5GpjXth3VOe6nPjYzlVcVQ1k80ZUmxcDmlJ7CipEyUXmBU8DBASDeoeDoWjc6WTZd3NwF0lMSCJUmB+TOSzTodxCPn0vkvvOepOhiHZL11rwMgozEq3IHvVkH6x0sLGBgiWx74KI8disZPXN1qGCoFJLBycX21F8Uo2gaBBX7rWoARxSKKzY8vWoqa6SxthkaiJoVCNjIIhGYVxtEaOYhRWg0hNQtJTaijwMz4OFUDhxbSIzUSKYVxrLO8uVRlojM+Y+lAgHIrATesEzVR0R8W9TrKZ8DvQOgFeHTpB6tGmVrEJKRks9nr6cwvNg50NHfGmFwfElUISu6ziH4qCSaippVLTvdiGwrgzik4cFYaljacBq6dh0D1Ro8oSTqo1Cg4rIUmOiY0whpbRPpPdJJ+0BaQD96ziHs+E6n21shzy1cFbv74mAmXy/nbsni6O/oUb7Fv3f77M+6zRt/228xWp4uSFTZVPr39scve+nRU3nu5te/Kmh+5LP7FOP7qW7P7ilb9su8XYe/6tV86c/ceZj+rKK+Yrv1QxIg4OkreO/Hdx/o2nhl9r+XDX+08+Fn3/Y+PbhbMffvTLswuzvb9778trL/UXHpqNvnrwjXuOVN6mXDhYudQy9LeWp67sm1rXcfvoNxZjC9vrvp6OH1iM31R4+5mdj26/+XT5Tl/Zdy99ctGeX/fy6fLvL91y7OY9enfVV5rKDt322obGB3+0pn0xmd24Ntb6x80v/+HyvgOd5w98syF+Y05qu799/f0zb//LvsvY9ZMvvLD/UuTX3/vPPx9Z6P7rvUMPunef33SRDewQf3PP3J/XP37ynaXhz10+l3vz45kN32p/nS588PBcaOujyctfW+zt+hPb+MOBg3fuPPvJu0dG1Gc7Hr+v445nXx+4dSou3/nv3N+/U9nUcPHg3f7fv7PxVxHp3MjU8N7nyJXLX92UQC/Ovqs/fOGOQ8MfrCkr+/TTNWXH929q+UF5Wdn/ABcdAng=
|
||||
eNrNWAlsXMUZTohQWxQohyhURTAsBZJo33rX3vXZtDJ2fORYO7YTx8TBzL43uzvxu/Jm3q7XIajQ0jQFiS4SEvQIARxvcSOTixBoEhCFQlsEqhC0JohGkF5CVRooKmoF6T/z3l62kxaJSrUS+x0z///P/3/ff7w7CxniMGqZC/dQkxMHqxxuWP7OgkO2uITxb08ahKctbaK3p3/gUdehM8vSnNusuaYG2zRk2cTENKRaRk0mUqOmMa+Ba1snUsxEwtJyb17y8taAQRjDKcICzRu3BlQLVJk80BwYslyEHYIwShPdTro6woxRxrHJQ0i8NHAOmRZHJiEa4hZyGYE/ls5Q0nIQAeNzCOyE3wriaSLeO3LTZpdxlAU5Ypew69pAMOBYOgGtLMc4MQLbgpWmdN9ooDgxrIp1Qlhg26ZgwLA0osODlM2VqKUY1KSwinGHYCPQnMQ6I8GANEuej+dssT3pmtKbsLR02bw1YGJDvOU4Q/XcCCPYUdOwRCNMdajtrQq0Iu8FImaKmgRZ8MKg4+AEcW7hYoekiclohgQRVlXXwVxcmeAlB44OCx3CXJ2zEFrHiPCs2JiFPSgHfi36E5ssCx6ToRYRQzhhuRyBPAfcIhxsChHdXEbBMvUcyOUOhRcMretbzaRKZlLbJpwFUQI2W8kkYAphLYNNFdT4J9GIzdOwRLMMTE0IkgmAMEB+EMHZCHKwmSIoSXWAIfOOQg1Y4u+HVWnKZPBBlE4FbsEWrCti92wnqJRjcSAlgdXRCmd0mzZYyNKWq2soIYDnWydBFII42NiBAAkbRLBsBxDucErknVwkLqqD1V8hQjhVt6xR5NogzEcCIIWaKQBcgJqq7mpkxHOCFIrNXE9S4oYCKuWjOdv8B9hxcE7i1n9gurouEaqRJIYDgphNc6GkA6MgKr7nhQ+FP0C4yovH9/0Dr0LD5rCJ/J91gm7C7SWvSAg1l1dEQmigyDsyZusUPC9BIiHFEDUBd4aMBUo6loGYTVSapCrKkgSDEzO0hIRSoSAaDnRQETkdos4J0jDH3g4TMxxKWZnhwNKy3toKvQJEHnhNZDkpbNJxT6FPFfAwylLIYiL0Un0O/CrzhQ/GahMqbfb4QHvTlgmmSntabchv0piyOd0mSlg8jVTMCIBXUMwHmSa8ZggOC33YFohyqDyhHw5f+cbhQPmkm4Tt8AQLXSK9wqOlHtcInxWSqoj1+YHMUl1HPfHVQ8IDxDNcGOC7HyhRVK8AtRHYDo5kluuoxN8M7PDRqoXK8ts9oCEwIA4uQUtgs+/EIqbAbUsrdgj8krH/D9j7dvhIrIL+Z4x7kVszFgU8Q/oQ3jkL3OdirUgAKFoiIYqMKnKvF0GAsYi4BMR/YoPYJSqgwHqR39UUrCQLK1EECtQ56WELLoDEDCVZVrQvLbb8T+hREbYyU6oc8dmQxas8SHgAw63fGZyDOf8dK6TtbB5OePAbkWWxmhDEdKGt2BhIYEZVqCLFShrYNLeinIMTpf3VxGiDgzmig/LxD0F3LDeVhvzmVXMPK9VNhnjp21/NkmFPy3BA9heMit5PVkIomZINVBgKT6g6GkRgNqapNIe1WexofvtR6cnZ0otnBwVL/JMtLfVAOhkr6gp68cG6bJK4ZVMVnpWlOaLNrCBbEBXbIWoqMgZgDNZz0H2KKIr+qCrvVRZv2ZbMSmJ+FBLQnhBsnjswfsNYHZb2IhFAf9IDXDE/cdcxhTN1khENraceQGqBYwVpESdjvCKPVbmxn8gWeMBxiXdefq60laHMBevKzvF0ZSVXy0IZTZkig4A5sJuYaREj5JoaBBOad02EvJQ0+tNWFrISCMQcJXRoyFAavMS8PkmnoyQI0a2AgJ9moJS4cCiRwvvElAEThNCCHQ7ZZV7IrCY4AwsY6hAengUYw4JyUIEArJdwCkcFdEgvQuyZm4SjUSD/rPiLRnNEtqnz81WDKhUMZAkZDYiZweSir89BFD8lccWT2fBYDe0/ZxV9WilB2W4CKl0aUC+wABkHlxO8sDgpMt+87pLtQSlFnhUc5fbKa9ShIaZWJRpKkdYhb4OTW7uBP1kmowrFm1NVL/ZOOhZjGXjobCHsgEjpIhPZlu3q2BFINKmaJiVOixEw4VgYsFYxOLBKe4Y974N+2D0sQwD6ZCUxGWCqIsRlWp2FPz0yAqwZ5EB8RRqqjaaXitN6x0BLGjR5W1S5pC7sPfDUwn19TJv/sLPKxmy4iQPPj7QUJGQH6wAv4WgxYFJTkOPTVoiynFkTjV/rSkVChcimLBhvBJPKw+hZfOalbl/4nMwtiVhK293xttXr2rvjnaIQwT4/oTkGk6mhKi94+PJwBQf3r2WMHaJCNOFOwtghMILKwgMx9+2o5P6cZm+46EG/kBnYGZUDLTVh0uWCAnBDgHOWAbwSw4msIN4uKDsS8fOIFY9BpqSaEGxbOuUSx8y2HCFUatssKlrV0A38hhEXfJzIVfhANh7yuwPQUqMYuvd4z4BXd3PQsqtY0AP7syhmo96HEmkHmFHtgjLYAG1erYYmA+DlzbllLFmJzUTlsGzbpm2FNBHUY28vuHgCAsbz09XfgR6HQRzKqUJM1RJ1IH8gNU5tGPtJUpfj+Tjj2hQ40iRyVMhPjRJiK1hM9JPe3vxe0clBdRHvazYzy9zjJztFGDT39ZRIWQpUKpPnD/aAKa3dNb05wJMJfXpjYyi8d0xhogsXmUXRIV/kJ235/meVL2woTSBE8T+J5Se9zdOVayyW370Gqz39VSIFR/K7sWPURw9UPndcU6SofKGtd646/2VJXaEuFInAv31VklnOVPO7ZcvwZNVuwp2cologJP9weFKFWkpJ/s2FnxsZUZMjCWP5zam1K0bs9kyI6R2R1i2dK3FqQ99YnMfXd9R1jzaMpnIrx0ZSOu/hjUqkIRaubYiGY3AZCofACiXJMoPOBse1+4luNW3RDW1IXRGLNqa7exMhymuHbt48NKr1pLpSvDZZS3Fjzs7pWwZiZky3u/rjG9jKuLMm1h9ed3OcNTatahs0cWI8Pq6Gs7H1sVR/b/8W02jsTXd1rVVbEJjsZqi2fLCPrF3bHh133KaBbE+XFb4p0lW/Xmvs7Yy3dlu5ur6eWI8dW5fGtUMVNjeEI0rYN7s+HG0Mi5/pImR0YqZ4Oj8RjcXqfwIJy4ZsTr41CZ7kLrtzAlBKXn6p4H+gfKRnVRngV0y0A2LzRweJFkSRRrTSNVFtuDaGIk3NkWhzXS3qXDOwp83XMzAvNPcNQHlikI2UFUVCFNS0a44SbaptXhIcFSSAAKten65AJYa8qPhW5fdsUPq8bk3pbj/g8U6pHOTyRyUbsuNjWU11NS2dyRrhpvFoHU0QV00e9LfAoCXUgEGKwfKPRhtqp/03RTxOwVnDSiSshCNHx4DPGSunuLaXfRRQkaEqkR/fxO6Gp8cU8flNF32K4v0udpX5iRgE5PDcBdwahYKcL0RlxMLHKlc4ROQ6YV5ZTLSpqenI/IuKoupgSVN9/dPVq6DJqRATqTXY4bkLfBGPhNmeseJqhWr5ma/CzUgTialaXTRWi5siakNDpFarVRtxQzIRSyYTamPkKe/jh8JFvEVqBxdBRqc8l58JGnhMpKfldZFYXT2ctKX4aaXfTbR702QLgmlLh7bmcTWpqBg6HsWDaL7QPhRvXdPddmiDUok1xe9M8gXTYtCNJyf7ISbEyU+puuVqkG0dMtnWofS1DuUPNsUiWkSti9Wr4URUVeuVmyCDFaWVkDkhUnUB62B7Rs0fSNctDzRHo3WBFqg7yxvrIUzyi/4dk15z8cLCw9fc/fkF8mcR/D9z5p77Wq3j4cV3nfrk8h+9cvrWwitvnezb9bu9fPvClgt/3nvlVHvL5uvXfv9ksDV/5usvxdXse9/cdNuJFzuf6TzdtOjWlfsvDO97d+2VhQ+Ov/i3o8+8d9H2nts+Nnq+8Xzna5dt++T9g6F/bDzUdcH4WPslIwOnzv9yIbJs/3Vfujr3fPSJPYMv7J/+3njTgrZL3fvc0B8v/0PH0P01d5P7r3BvueWh11v6zlt0/NlXFy84kfvohofWv3OHsWvDkYHF33m2RfvhqssWfDDRf9fpiwe++1bHoQd2oa/85qI3nrzg2CMPZ/fuHkGH72q58np74upA9+1HHly1/NC2E7889tcndi40lRU73/ni5KlXT/J9jz3Gdl793DV9byz76XPTp19aNKMtbXf0XYXFJ27f0Hlm2bVs5z13r5m48arbBx8ffPdr9Uefa//1+4uvWzFd+Kd7Gp/+xZGOxMmV22duevBEYrtzX/z4n44ORBfNBD/8vfn2D5bfMHjoXw+dvuzi8za1zzTe27fjtzsK9ff+pevM65mZ92994Py/b71+744Lto1/YYdy6sOx5uBVbe2vHfxV5s8/Hnz344PTT019NPXO4WOXBlvIMwtFeBYtCD/YuX8NxOrfsj9W3A==
|
||||
@@ -1 +1 @@
|
||||
eNptVmtsHFcVTuOGRxMV81AkkKrcrkoL1LOe2R3vw49EtreON47fduI4ity7M3e9Y8/Lc+/sereYtq6popKKDkQVjQSqHWc3dRwnaazGTUl5toAIKiA1loESpBJVohAghZb+oZy7u05snP0xO3PvOd/57jnfOTOThTRxqGaZd8xrJiMOVhg8UG+y4JAxl1A2lTcIS1nqbFdnb99x19FWvpJizKa11dXY1vyWTUys+RXLqE5L1UoKs2q4t3VShJlNWGr2dxXCIz6DUIqHCfXVooOP+BQLYpkMHnwHLBdhhyCMUkS3k66OMKUaZdhkfsQ3DZxFpsWQSYiKmIVcSuDP0ilKWg4iQD+LgClcBcRShO87RacRlzKUARzuxZnd66tCPsfSCY9Ls5QRwzdRhdbRiT9goA5iWOtsOeQGy1atZIharQxSsIniZeYoC6yZpeLsrrUgN4+1AWk/cEMaRQYcFBtk18bQh2DFsFSi86VhmwmyJRiaqXFLE9Yk+KfMIdiAhyTWKYEFOJ4NBWWuw5FEv8jXeOJKNWBZuxgh6ZrFmnOsm/e1wJBz4QYMpzU9O0QJdpTUkEOoqzM6NEJLLiqhiqPZZS9fIyrZIWIOayZBFuwYWg5Kx6vFpeGQFDGplgaKWFFcIMjvTKitAwUDw3IEP+qnhOuBO2bAp5jXVRVgk2agzkWJcqUhnLBchgDPgaxyWZgcIm7asEpTlqurKMFVVqZXVIyfH8DGDhwUpE+Lp7YdkLTDNFJ6LNoV7/7voGtxOCHdskaRa3PE1cxCRTRz2DcxwesJ3aQ5ROW5L4MeWmNqJUaIAsqYmDg0UUgRrAKdP26qnE1ZlHkL6xvtDKSNgAaIqVgqBPBOD+c0uwqpJKlDMudAWSYpltGbGyXEFrAO2c6XvLyz2LZ1TcF8v5oXcb6sRIFz2bg9xxUoQOuazFvsBBKN8equLEwEE0l+OeIXz44LIGvN1KHDBR0Dn7xd3H9l7YaNlVEAEcrTxsuXnBfW2ljUO9GOlc7edZA8zd4J7Bgh+fzadcc1QVrEKzR3bQxX3rwVLuiXJH/43DpgmjUV70SxXS6scybMyQqKBRjetJhXoLIa8VZuDA0pyaGE0UCUdHJPrHVIVxSZdrTub4+SHrdmf6zNYr3McuM42cSaxMaQGBkWpHAgKoXlcDAgSH7RL/klYVAP9QQx293i5OyE2uq2tgw1t4TUUKypKZEJx+P79lrRUDptmDV7ujo6m/TBEdbYQh+qUYcNv7NHGXXskb7+djMW6s+xtDvaM5JJD3Rl6hCwc9Oa2tA6auZoaG84p4YGlEFjUNvXOZZMRfva5GBraq9kRrtZW7Smv5U63WvohQMBQSwzDIlyROS/hVVt6NDOLOUdj0jBk9CfNnQceSIPKWMunZwFHZLLPy+UZ/xMZ9stCW+fjYEmvUt9KbcKSWHUqTAUEAMykuRaKVorBtDu9r755nKYvttK8FyfA+2eBBk+tCr5gpJyzVGizjXfVuyXuNihkpw+TFKBjNsWJUKZlTc/IPSU3m5CPHa+1FmC5QxjU8sVw3qXiqrP5MYzquKqaiqdMcRoTg5qCeIqycWyC8wKHgYICQb1jstSZKG8s6q7OTirKEiiIEoXxwU+63QYh5DP4rX8iqXebA0ke2mjAbNGYVZ6BblYDfHVtRYOMUCwPPYtGDkajX7/9karUEEwiQZDF9dbUbKWjRQw6NJGgzLEjEjnx1etBU31Vu6DhyElHAqJUiAcDteAnJSALEo1Ck6GpIQoRTAOvszHoQIovJi25TCBEhjXGst6K1UGHuczpiEo1QRDcNI6pJmK7qqk103ELH4GWofg1aFbWD3T3CI0YyVFhN6i/rxC7EBHY3u8+aUBYa2QhE679C1TMC1qaslkvpc4UBhvTtEtV4Vh6ZA8YPU0HvAWI6oskoSqyDiSDIpyVGiCMbSKdlN2s3zSFrAO3NOKdz4VbPDVynLQVwcfHA2REJSp+MXzeL40+l+7Q9vxjU9sKv4q9Gd+1PETsTL29w/veps+cUyWvxie+unj+x5b6p7yLa/cZxpnzt3zyb5/X+3+7WK837/z8nuXr/91J5p6ajI5deNYduHIth3f2//a5pGXHnz3Wu7ahavvXtj9bP3LHyx/qMcfOF0RPvin//754rWTR+6s7tohXfhh/eIL3V+4ulv71v1jH//cdnTwIPnqdx4+ObUd1z/9bfvYk7+ZySx/KRI55r3xWGXT+2/9/voLW/8z/fnnOjqe2vzOwBa57lHf1ODR003P0R9n8w3f3fbM8feff+XNXWfPxYJbYpEbm++yXt8hHH1a+duLF70Xlz/1zi8+86Ay9nbua1fapr9+im69PG0WIt98Mv2vysobb9bo/mfzr88Edn6wVPHWx1rOv3rvPXfX/Tr7Rn3u/p/lP7t0+Jdb/7G3NVz76WtHlru2XdHEv6Qb3vvBYWXX9KlTR5/vnLm+nO3KfpltuTIwVp/TnMPhLX/41cN3y5E7H4WcfvRRxaZ/1vsDk5s3bfofkcIdZQ==
|
||||
eNrNWXtwVNUZh6JVp0619j06crpDBdq9m30mm1CKIQGyCElIAiQQjGfvPbv3kPvKPffuZuOjYGlHR0pdxrF2HJwphKTNUAQDSrH08QdWa6c6WoeiHdROaUsHa3211kfpd869+0oCrTN2phkmuY9zvu873/f7fY/L7eM5YjNqGrP3UcMhNpYduGHF28dtMuQS5mwb04mjmspoZ0d3zx7Xpie/pDqOxZrq6rBFQ6ZFDExDsqnX5SJ1soqdOri2NCLEjKZNpfD8VUtuDuiEMZwlLNC08eaAbIIqwwk0BfpMF2GbIIxUolkZV0OYMcocbDghxF/quIAM00EGIQpyTOQyAn9MjaGMaSMCxhcQ2Am/JeSohL+3xabNLnNQHuTwXdyuLwSCAdvUCGhlBeYQPXBrsNqU1HwdtRPdrFrHhU1Z1Ua9RajNzCMZGyjlW4wKYK1jKriwpCKgfJgpUtaDPYgypMPhsE6WTFW5KRjQTYVo8CBrOVLclHRqUFjFHJtgPdCUwRojwYDwhHCpU7D49oxriADC0vJl080BrgTeOjhHtcIAI9iWVViiECbb1PJWBZqR9wIRI0sNgkx4odMR8Dt3NY+qTVRiMJojQYRl2bWxw68MCIwN3oaFNmGu5rAQWssIDybfmIc9wjmlEGKD5SFIAl0cJAinTddBIM8G7/CYGlxEyhGBNw2tAHIdm8ILhtZ2rWJCJTOoZRGHBVEaNpuZDMAYYSWHDRnU+CdRiOWosEQxdUwNwIUBGNRBfhDB2QiysZElKEM1QD7zjkJ1WOLvh1UqRIl7GURplFMFbMGaxHdPdYJMHcwPJKWxPFjljJRhgYVMNV1NQWmOdd86gdsQxMHCNgSI28CDZdlAKtuhRNyJRfyiNljdVSK4UzXTHESuBcJ8JABSqJEF3AWoIWuuQgY8Jwih2Ch0ZARuKBBBPJq2zX+AbRsXBHz9B4araQKhCslgOCCI2TQdShqnhJnxPc99yP0BwmWndHzfP/Aq1G/0G8j/WcsZzt1e9oqAUFNlRSSEekpUJ8OWRsHzAiQCUgxRA3Cni1igjG3qiFlEphkqozxJMzgxQwtIKBsKov7Acsojp0HUHYIU7GBvh4EZDmXNXH9gYUVvtEovB5EHXgOZdhYbdMRT6FMFPIzyFBInD71QXwC/ihTlg7HWhGqbPT7QTtU0wFRhT7MFKVUYUzEnZaC06aiQhRgB8HKK+SBTuNd0zmGuD1scUTYVJ/TD4Svf2B+onHQTtx2eYK6LZ3R4tNDjGnGmhKQmYl1+IPNU01BH+6o+7gHiGc4N8N0PlCipl4DaCGwHRzLTtWXibwZ2+GhVQhX5rR7QeMJsB5egBbDZd2IJU+C2hVU7OH7J8P8H7H07fCTWQP9Dxj3PrTmTAp4hfXDvnAfu07FWIgDUSZ4QeUbludeLIMCYR1wA4j+xge/iRZdjvcTvWgpWk4WVKQIF6oL0sDgXQGKOkjwr2afyLf8TelSFrcKUGkd8OGTxKg/iHsBw6zcIF2DOf8cKYTubgRMe/AZEWawlBDFcaCs2BtKYURmqSKmSBjZNrygX4ER5fy0xWuBgNm/afPxD0G3TzaqQ37xq7mGltsngL337a1nS72npD4j+glHebopKCCVTsIFyQ+EJlQeDCMzGNKs6sDaPbcVvP6o9OVV66eygYIF/soXlHkgjwyVdQS8+WBNNkmNaVIZnFWk272yryBZEpXaIGpKIARiDtQL0iDyKvD+qyXvVxVu0JVOSmB+FNLQnBBsXDozfMNaGpbVEBNCf8QBXyk+OaxvcmRrJ8R7aUw8gNcGxnLTIIcNOVR6rcWM3EV13j+0S77zOhdJWjjIXrKs4x9OVF1ytCGU0a/AMAubAbmKoPEbINRQIJrTYCg95OWl0q9Ce61w7NNppDRoypIKXmNcnaXSQBCG6VRDw0wyUEhcOxVN4Fx9sYGjhWrDtQHaZETKrCM7BAoaWcw9PAYxuQjmoQgDWyjiFowI6hBch9szNwNEokH9K/HmjOSDa1Jn5CvMGsC1PyGCAzwyGw/v6AkTxAxKXP5kKj1XQ/jusqk8rJyjLTUOlUwH1HAuQcXAlwXOLMzzzzegu0R6UU+R5wVFpr7xGHRpialajoRxpDfI2OLk5BfzJMxFVKN4OlbVS76RhPgmCh84XwuUQKY1nIsu0XA3bHIkGlVVS5jSfOtO2iQFrVYMDq7an3/M+6Ifd/SIEoE9UEoMBpqpCXKHVefjTISLAmkAOxJenoWhcXchP6x0DLWhQxG1J5YJY2HvgqYX7+oQy82GnlI2pcOMHnhlpWUjINtYAXtzRfMCkBifHB60QFTlTJhq/1pWLhAyRzZow3nAmVYbR8/jMS92+8GmZWxCxnLZT7S2r1ram2lfwQgT7/IRm60ykhpq84OHLwxUc3L8WMbaJDNGEOwFjm8AIKgoPxNy3o5r705q9/pIH/UKmY3tQDLTUgEnX4RSAGwKcM3XgFR9ORAXxdkHZEYifQSx/DDIF1bhgy9SoI3DMLNPmQoW2zbyi1QzdwG8YccHH6UKVD0TjIb47AC0ViqF7b+/o8epuAVp2GXN6YH8WxWzQ+zYj7AAzal1QARugzavV0GQAvLw5t4IlM72ZyA4su3XTreMq4dRjp2ZdOQoBc4r7az89PQiDOJRTiRiyyetAcTI7Qi0Y+0lGE+P5CHOUCXCkQcSoUJwYJMSSMJ/ox7y9xQO8k4Pqwt/XbWamsc9PdhI3aPrrCZ6yJKhUhlM81AGmNKfqOguAJwP69GQyFD4wLDHehfPMImmQL4pjlnj/aPULC0oTCJH8r3DFMW/z/uo1JivuXY3lju4akZwjxb3Y1uvjk9XPbdfgKao43tI5XZ3/sqxuPBaKRODfwRrJrGDIxb2iZXikZjdx7IIkmyCk+L3wmAy1lJLi87MvGRiQMwNpffGG7JplA1ZrLsS05ZHmoRUrcba3a7jdaV+3PJYabBjMFlYOD2Q1p8NJSpGGRDjaEA8n4DIUDoEVUobl1tu9tmt1E81sHNJ0pU9elogn1VRnOkSdaN+GzX2DSke2LetEM1GKkwWroA31JIyEZrV1t/eyle326kR3eO2GdpZsvKFlvYHTI+0jcjifWJfIdnd2Dxl6slNta1sjL0JgspujyuL1XWTNmtb4iO029uQ72szw0khb/Tol2bmivTllFmJdHYkOK7FWxdG+KpsbwhEp7JtdH44nw/xnfwkyGjGyjlocjdc3RL4PCcuCbE6+PgaedFx2+yiglPzq8XH/m+jujhsqAP/saCsgtnhsPVGCKJJEK10DRcPRBIo0NkXiTbEYWrG6Z1+Lr6dnRmge7IHyxCAbSctKhBiXVdcYJMpEy4wkOMZJAAGWvT5dgkoMeVHyrSru65W6vG5NSrVOeryTqge54jHBhvzIcF6RXUVRc3k93DgSj9E0ceXMIX8LDFpcDRgk6ay4JxGJ7vfflPA4AWcNS5GwFI4cGwY+58yC5Fpe9pFARY7KRHx8g93R8NFhiX9+03ifInm/S11lcTQBATkyfYFjDkJBLo7HRcTCP6leYROe67h5FTHxxsbGH8+8qCQqBksaE9GjtaugyakSE4nq7Mj0Bb6I3WG2b7i0WqJK8eQ8uBlQInKDUh+LNiRjyRjGcRKJRxuiDQquTzfI6bDyI+/jh+TwePPUDi6CjE6dQvFkUMfDPD0tjkUSsXo46aLSp5VuN93qTZOLEExbGrQ1D8oZScbQ8UgeRIvjrX3tzatTLQ/3StVYk/zOpDhumAy68cxYN8SE2MUJWTNdBbKtTcZalktdzX3FQ42JCBwg1hiTY+G4LNdLSyGDlaSVkTnKU/U41sD2nFycVGOLA03xeCywCOrO4mQ9hEn8J8LWMa+5OD5779y7Lp0lfuZs39nMXkBXfuPVdz59z+RzRbI3d8vZY7qtxuj8PT8/sHNoC7mJPpt578VFbTv2HnpiVeiat/8uvSq92xt+7PDvNz52+sa7D526Z+4Vr133ym0nn7z/y+65v/2p+5H3jr7zr9PWrnVnXpAGlIv/+nF0JvXWjbvf+mjbnqsPXo4utU+cfmf3xInVh/dfctOarU88dKr75E308LNk03Or+3745Ok/jp7ac++V/7z3/QWfk88OJSc7xl/aavYdHj568Mo9f95qPrVs+2tbrnvR+sSeHfOuee34RV9s+khD70+Pf+q+T37mvh1nY723NNRd/vTeBQ/1fWf7d2e/fM3Ox/Eld+Xn21+5P/rbd7c8+va378k/f+aIOeeOO0JX37HtgSve7A3nGrZ1PPW6Nffpkc2XvYqHjcnfbX//7fgbd96WjCy9KnPdxP3HP/bKhovnHbpz8szQwj9c+5t5u5ZLO59JzjkefPNc5sa/LP7oif1fu+Hdy9oue+D6w1/d8szrhZd+ELz+1OMnfvnrlmsvf+PSl8fvfXrnN+9esrR5/s7P/2PbL14s/uzhXbnFF81980hwl7n+W0fUi/r3z6vbcXwA4nHu3JxZZw8sG2qaM2vWvwFt8Hmz
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -57,7 +57,7 @@ Despite the flexibility of the retriever interface, a few common types of retrie
|
||||
### Search apis
|
||||
|
||||
It's important to note that retrievers don't need to actually *store* documents.
|
||||
For example, we can be built retrievers on top of search APIs that simply return search results!
|
||||
For example, we can build retrievers on top of search APIs that simply return search results!
|
||||
See our retriever integrations with [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/) or [Wikipedia Search](/docs/integrations/retrievers/wikipedia/).
|
||||
|
||||
### Relational or graph database
|
||||
|
||||
@@ -11,8 +11,8 @@ This need motivates the concept of structured output, where models can be instru
|
||||
|
||||
## Key concepts
|
||||
|
||||
**(1) Schema definition:** The output structure is represented as a schema, which can be defined in several ways.
|
||||
**(2) Returning structured output:** The model is given this schema, and is instructed to return output that conforms to it.
|
||||
1. **Schema definition:** The output structure is represented as a schema, which can be defined in several ways.<br/>
|
||||
2. **Returning structured output:** The model is given this schema, and is instructed to return output that conforms to it.
|
||||
|
||||
## Recommended usage
|
||||
|
||||
@@ -109,11 +109,11 @@ ai_msg
|
||||
|
||||
There are a few challenges when producing structured output with the above methods:
|
||||
|
||||
(1) When tool calling is used, tool call arguments needs to be parsed from a dictionary back to the original schema.
|
||||
1. When tool calling is used, tool call arguments needs to be parsed from a dictionary back to the original schema.<br/>
|
||||
|
||||
(2) In addition, the model needs to be instructed to *always* use the tool when we want to enforce structured output, which is a provider specific setting.
|
||||
2. In addition, the model needs to be instructed to *always* use the tool when we want to enforce structured output, which is a provider specific setting.<br/>
|
||||
|
||||
(3) When JSON mode is used, the output needs to be parsed into a JSON object.
|
||||
3. When JSON mode is used, the output needs to be parsed into a JSON object.
|
||||
|
||||
With these challenges in mind, LangChain provides a helper function (`with_structured_output()`) to streamline the process.
|
||||
|
||||
|
||||
@@ -114,12 +114,12 @@ result = llm_with_tools.invoke("What is 2 multiplied by 3?")
|
||||
```
|
||||
|
||||
As before, the output `result` will be an `AIMessage`.
|
||||
But, if the tool was called, `result` will have a `tool_calls` attribute.
|
||||
But, if the tool was called, `result` will have a `tool_calls` [attribute](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.tool_calls).
|
||||
This attribute includes everything needed to execute the tool, including the tool name and input arguments:
|
||||
|
||||
```
|
||||
result.tool_calls
|
||||
{'name': 'multiply', 'args': {'a': 2, 'b': 3}, 'id': 'xxx', 'type': 'tool_call'}
|
||||
[{'name': 'multiply', 'args': {'a': 2, 'b': 3}, 'id': 'xxx', 'type': 'tool_call'}]
|
||||
```
|
||||
|
||||
For more details on usage, see our [how-to guides](/docs/how_to/#tools)!
|
||||
@@ -137,6 +137,16 @@ For more details on usage, see our [how-to guides](/docs/how_to/#tools)!
|
||||
|
||||
:::
|
||||
|
||||
## Forcing tool use
|
||||
|
||||
By default, the model has the freedom to choose which tool to use based on the user's input. However, in certain scenarios, you might want to influence the model's decision-making process. LangChain allows you to enforce tool choice (using `tool_choice`), ensuring the model uses either a particular tool or *any* tool from a given list. This is useful for structuring the model's behavior and guiding it towards a desired outcome.
|
||||
|
||||
:::info[Further reading]
|
||||
|
||||
* See our [how-to guide](/docs/how_to/tool_choice) on forcing tool use.
|
||||
|
||||
:::
|
||||
|
||||
## Best practices
|
||||
|
||||
When designing [tools](/docs/concepts/tools/) to be used by a model, it is important to keep in mind that:
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -106,11 +106,11 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'My tool',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'a': {'title': 'A', 'type': 'integer'},\n",
|
||||
" 'b': {'title': 'B', 'type': 'array', 'items': {'type': 'integer'}}},\n",
|
||||
" 'required': ['a', 'b']}"
|
||||
"{'properties': {'a': {'title': 'A', 'type': 'integer'},\n",
|
||||
" 'b': {'items': {'type': 'integer'}, 'title': 'B', 'type': 'array'}},\n",
|
||||
" 'required': ['a', 'b'],\n",
|
||||
" 'title': 'My tool',\n",
|
||||
" 'type': 'object'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@@ -121,7 +121,7 @@
|
||||
"source": [
|
||||
"print(as_tool.description)\n",
|
||||
"\n",
|
||||
"as_tool.args_schema.schema()"
|
||||
"as_tool.args_schema.model_json_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -449,10 +449,11 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'RunnableParallel<context,question,answer_style>Input',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'question': {'title': 'Question'},\n",
|
||||
" 'answer_style': {'title': 'Answer Style'}}}"
|
||||
"{'properties': {'question': {'title': 'Question'},\n",
|
||||
" 'answer_style': {'title': 'Answer Style'}},\n",
|
||||
" 'required': ['question', 'answer_style'],\n",
|
||||
" 'title': 'RunnableParallel<context,question,answer_style>Input',\n",
|
||||
" 'type': 'object'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
@@ -461,12 +462,12 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rag_chain.input_schema.schema()"
|
||||
"rag_chain.input_schema.model_json_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 15,
|
||||
"id": "a3f9cf5b-8c71-4b0f-902b-f92e028780c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -212,6 +212,10 @@
|
||||
"[Anthropic](/docs/integrations/chat/anthropic/), and\n",
|
||||
"[Google Gemini](/docs/integrations/chat/google_generative_ai/)) will accept PDF documents.\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"OpenAI requires file-names be specified for PDF inputs. When using LangChain's format, include the `filename` key. See [example below](#example-openai-file-names).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"### Documents from base64 data\n",
|
||||
"\n",
|
||||
"To pass documents in-line, format them as content blocks of the following form:\n",
|
||||
|
||||
@@ -99,7 +99,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also just force our tool to select at least one of our tools by passing in the \"any\" (or \"required\" which is OpenAI specific) keyword to the `tool_choice` parameter."
|
||||
"We can also just force our tool to select at least one of our tools by passing in the \"any\" (or \"required\" [which is OpenAI specific](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.BaseChatOpenAI.html#langchain_openai.chat_models.base.BaseChatOpenAI.bind_tools)) keyword to the `tool_choice` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -182,7 +182,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
"update_favorite_pets.get_input_schema().model_json_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -223,7 +223,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
"update_favorite_pets.tool_call_schema.model_json_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -500,7 +500,7 @@
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
"update_favorite_pets.get_input_schema().model_json_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -534,7 +534,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
"update_favorite_pets.tool_call_schema.model_json_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -583,7 +583,7 @@
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets().get_input_schema().schema()"
|
||||
"UpdateFavoritePets().get_input_schema().model_json_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -617,7 +617,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets().tool_call_schema.schema()"
|
||||
"UpdateFavoritePets().tool_call_schema.model_json_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -659,7 +659,7 @@
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets2().get_input_schema().schema()"
|
||||
"UpdateFavoritePets2().get_input_schema().model_json_schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -692,7 +692,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets2().tool_call_schema.schema()"
|
||||
"UpdateFavoritePets2().tool_call_schema.model_json_schema()"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -568,6 +568,26 @@
|
||||
" ```\n",
|
||||
" and specifying `\"cache_control\": {\"type\": \"ephemeral\", \"ttl\": \"1h\"}`.\n",
|
||||
"\n",
|
||||
" Details of cached token counts will be included on the `InputTokenDetails` of response's `usage_metadata`:\n",
|
||||
"\n",
|
||||
" ```python\n",
|
||||
" response = llm.invoke(messages)\n",
|
||||
" response.usage_metadata\n",
|
||||
" ```\n",
|
||||
" ```\n",
|
||||
" {\n",
|
||||
" \"input_tokens\": 1500,\n",
|
||||
" \"output_tokens\": 200,\n",
|
||||
" \"total_tokens\": 1700,\n",
|
||||
" \"input_token_details\": {\n",
|
||||
" \"cache_read\": 0,\n",
|
||||
" \"cache_creation\": 1000,\n",
|
||||
" \"ephemeral_1h_input_tokens\": 750,\n",
|
||||
" \"ephemeral_5m_input_tokens\": 250,\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ```\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,269 +1,327 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Cloud Vertex AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatVertexAI\n",
|
||||
"\n",
|
||||
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
|
||||
"\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
|
||||
"\n",
|
||||
":::info Google Cloud VertexAI vs Google PaLM\n",
|
||||
"\n",
|
||||
"The Google Cloud VertexAI integration is separate from the [Google PaLM integration](/docs/integrations/chat/google_generative_ai/). Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/google_vertex_ai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatVertexAI](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) | [langchain-google-vertexai](https://python.langchain.com/api_reference/google_vertexai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access VertexAI models you'll need to create a Google Cloud Platform account, set up credentials, and install the `langchain-google-vertexai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"To use the integration you must:\n",
|
||||
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
|
||||
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
|
||||
"\n",
|
||||
"This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n",
|
||||
"\n",
|
||||
"For more information, see:\n",
|
||||
"- https://cloud.google.com/docs/authentication/application-default-credentials#GAC\n",
|
||||
"- https://googleapis.dev/python/google-auth/latest/reference/google.auth.html#module-google.auth\n",
|
||||
"\n",
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain VertexAI integration lives in the `langchain-google-vertexai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(\n",
|
||||
" model=\"gemini-1.5-flash-001\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" max_retries=6,\n",
|
||||
" stop=None,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer. \\n\", response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 20, 'candidates_token_count': 7, 'total_token_count': 27}}, id='run-7032733c-d05c-4f0c-a17a-6c575fdd1ae0-0', usage_metadata={'input_tokens': 20, 'output_tokens': 7, 'total_tokens': 27})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren. \\n', response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 15, 'candidates_token_count': 8, 'total_token_count': 23}}, id='run-c71955fd-8dc1-422b-88a7-853accf4811b-0', usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatVertexAI features and configurations, like how to send multimodal inputs and configure safety settings, head to the API reference: https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Cloud Vertex AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatVertexAI\n",
|
||||
"\n",
|
||||
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
|
||||
"\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
|
||||
"\n",
|
||||
":::info Google Cloud VertexAI vs Google PaLM\n",
|
||||
"\n",
|
||||
"The Google Cloud VertexAI integration is separate from the [Google PaLM integration](/docs/integrations/chat/google_generative_ai/). Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/google_vertex_ai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatVertexAI](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) | [langchain-google-vertexai](https://python.langchain.com/api_reference/google_vertexai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access VertexAI models you'll need to create a Google Cloud Platform account, set up credentials, and install the `langchain-google-vertexai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"To use the integration you must:\n",
|
||||
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
|
||||
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
|
||||
"\n",
|
||||
"This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n",
|
||||
"\n",
|
||||
"For more information, see:\n",
|
||||
"- https://cloud.google.com/docs/authentication/application-default-credentials#GAC\n",
|
||||
"- https://googleapis.dev/python/google-auth/latest/reference/google.auth.html#module-google.auth\n",
|
||||
"\n",
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain VertexAI integration lives in the `langchain-google-vertexai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(\n",
|
||||
" model=\"gemini-1.5-flash-001\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" max_retries=6,\n",
|
||||
" stop=None,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer. \\n\", response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 20, 'candidates_token_count': 7, 'total_token_count': 27}}, id='run-7032733c-d05c-4f0c-a17a-6c575fdd1ae0-0', usage_metadata={'input_tokens': 20, 'output_tokens': 7, 'total_tokens': 27})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "28ccabbb-a450-403c-8de1-fb077e0b5d3d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Built-in tools\n",
|
||||
"\n",
|
||||
"Gemini supports a range of tools that are executed server-side.\n",
|
||||
"\n",
|
||||
"### Google search\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-google-vertexai>=2.0.11``\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Gemini can execute a Google search and use the results to [ground its responses](https://ai.google.dev/gemini-api/docs/grounding):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ffdbec37-85f8-4755-bd72-47efaecfe944",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"google_search\": {}}])\n",
|
||||
"\n",
|
||||
"response = llm.invoke(\"What is today's news?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f63824f5-7d6a-4ad7-aa17-1f5c44119a21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Code execution\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-google-vertexai>=2.0.25``\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Gemini can [generate and execute Python code](https://ai.google.dev/gemini-api/docs/code-execution):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aa079529-ef1c-463d-9d25-6390423a328d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"code_execution\": {}}])\n",
|
||||
"\n",
|
||||
"response = llm.invoke(\"What is 3^3?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren. \\n', response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 15, 'candidates_token_count': 8, 'total_token_count': 23}}, id='run-c71955fd-8dc1-422b-88a7-853accf4811b-0', usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatVertexAI features and configurations, like how to send multimodal inputs and configure safety settings, head to the API reference: https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -58,7 +58,9 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -98,12 +100,19 @@
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
"Now we can instantiate our model object and generate chat completions. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::note Reasoning Format\n",
|
||||
"\n",
|
||||
"If you choose to set a `reasoning_format`, you must ensure that the model you are using supports it. You can find a list of supported models in the [Groq documentation](https://console.groq.com/docs/reasoning).\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 6,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -111,9 +120,10 @@
|
||||
"from langchain_groq import ChatGroq\n",
|
||||
"\n",
|
||||
"llm = ChatGroq(\n",
|
||||
" model=\"llama-3.1-8b-instant\",\n",
|
||||
" model=\"deepseek-r1-distill-llama-70b\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" reasoning_format=\"parsed\",\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
@@ -130,7 +140,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 7,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -139,10 +149,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The translation of \"I love programming\" to French is:\\n\\n\"J\\'adore le programmation.\"', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 22, 'prompt_tokens': 55, 'total_tokens': 77, 'completion_time': 0.029333333, 'prompt_time': 0.003502892, 'queue_time': 0.553054073, 'total_time': 0.032836225}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-2b2da04a-993c-40ab-becc-201eab8b1a1b-0', usage_metadata={'input_tokens': 55, 'output_tokens': 22, 'total_tokens': 77})"
|
||||
"AIMessage(content=\"J'aime la programmation.\", additional_kwargs={'reasoning_content': 'Okay, so I need to translate the sentence \"I love programming.\" into French. Let me think about how to approach this. \\n\\nFirst, I know that \"I\" in French is \"Je.\" That\\'s straightforward. Now, the verb \"love\" in French is \"aime\" when referring to oneself. So, \"I love\" would be \"J\\'aime.\" \\n\\nNext, the word \"programming.\" In French, programming is \"la programmation.\" But wait, in French, when you talk about loving an activity, you often use the definite article. So, it would be \"la programmation.\" \\n\\nPutting it all together, \"I love programming\" becomes \"J\\'aime la programmation.\" That sounds right. I think that\\'s the correct translation. \\n\\nI should double-check to make sure I\\'m not missing anything. Maybe I can think of similar phrases. For example, \"I love reading\" is \"J\\'aime lire,\" but when it\\'s a noun, like \"I love music,\" it\\'s \"J\\'aime la musique.\" So, yes, using \"la programmation\" makes sense here. \\n\\nI don\\'t think I need to change anything else. The sentence structure in French is Subject-Verb-Object, just like in English, so \"J\\'aime la programmation\" should be correct. \\n\\nI guess another way to say it could be \"J\\'adore la programmation,\" using \"adore\" instead of \"aime,\" but \"aime\" is more commonly used in this context. So, sticking with \"J\\'aime la programmation\" is probably the best choice.\\n'}, response_metadata={'token_usage': {'completion_tokens': 346, 'prompt_tokens': 23, 'total_tokens': 369, 'completion_time': 1.447541218, 'prompt_time': 0.000983386, 'queue_time': 0.009673684, 'total_time': 1.448524604}, 'model_name': 'deepseek-r1-distill-llama-70b', 'system_fingerprint': 'fp_e98d30d035', 'finish_reason': 'stop', 'logprobs': None}, id='run--5679ae4f-f4e8-4931-bcd5-7304223832c0-0', usage_metadata={'input_tokens': 23, 'output_tokens': 346, 'total_tokens': 369})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -161,7 +171,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 8,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -169,9 +179,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The translation of \"I love programming\" to French is:\n",
|
||||
"\n",
|
||||
"\"J'adore le programmation.\"\n"
|
||||
"J'aime la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -191,17 +199,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 9,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 50, 'total_tokens': 56, 'completion_time': 0.008, 'prompt_time': 0.003337935, 'queue_time': 0.20949214500000002, 'total_time': 0.011337935}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-e33b48dc-5e55-466e-9ebd-7b48c81c3cbd-0', usage_metadata={'input_tokens': 50, 'output_tokens': 6, 'total_tokens': 56})"
|
||||
"AIMessage(content='The translation of \"I love programming\" into German is \"Ich liebe das Programmieren.\" \\n\\n**Step-by-Step Explanation:**\\n\\n1. **Subject Pronoun:** \"I\" translates to \"Ich.\"\\n2. **Verb Conjugation:** \"Love\" becomes \"liebe\" (first person singular of \"lieben\").\\n3. **Gerund Translation:** \"Programming\" is translated using the infinitive noun \"Programmieren.\"\\n4. **Article Usage:** The definite article \"das\" is included before the infinitive noun for natural phrasing.\\n\\nThus, the complete and natural translation is:\\n\\n**Ich liebe das Programmieren.**', additional_kwargs={'reasoning_content': 'Okay, so I need to translate the sentence \"I love programming.\" into German. Hmm, let\\'s break this down. \\n\\nFirst, \"I\" in German is \"Ich.\" That\\'s straightforward. Now, \"love\" translates to \"liebe.\" Wait, but in German, the verb conjugation depends on the subject. Since it\\'s \"I,\" the verb would be \"liebe\" because \"lieben\" is the infinitive, and for first person singular, it\\'s \"liebe.\" \\n\\nNext, \"programming\" is a gerund in English, which is the -ing form. In German, the equivalent would be the present participle, which is \"programmierend.\" But wait, sometimes in German, they use the noun form instead of the gerund. So maybe it\\'s better to say \"Ich liebe das Programmieren.\" Because \"Programmieren\" is the infinitive noun form, and it\\'s commonly used in such contexts. \\n\\nLet me think again. \"I love programming\" could be directly translated as \"Ich liebe Programmieren,\" but I\\'ve heard both \"Programmieren\" and \"programmierend\" used. However, \"Ich liebe das Programmieren\" sounds more natural because it uses the definite article \"das\" before the infinitive noun. \\n\\nAlternatively, if I use \"programmieren\" without the article, it\\'s still correct but maybe a bit less common. So, to make it sound more natural and fluent, including the article \"das\" would be better. \\n\\nTherefore, the correct translation should be \"Ich liebe das Programmieren.\" That makes sense because it\\'s similar to saying \"I love (the act of) programming.\" \\n\\nI think that\\'s the most accurate and natural way to express it in German. Let me double-check some examples. If someone says \"I love reading,\" in German it\\'s \"Ich liebe das Lesen.\" So yes, using \"das\" before the infinitive noun is the correct structure. \\n\\nSo, putting it all together, \"I love programming\" becomes \"Ich liebe das Programmieren.\" That should be the right translation.\\n'}, response_metadata={'token_usage': {'completion_tokens': 569, 'prompt_tokens': 18, 'total_tokens': 587, 'completion_time': 2.511255685, 'prompt_time': 0.001466702, 'queue_time': 0.009628211, 'total_time': 2.512722387}, 'model_name': 'deepseek-r1-distill-llama-70b', 'system_fingerprint': 'fp_87eae35036', 'finish_reason': 'stop', 'logprobs': None}, id='run--4d5ee86d-5eec-495c-9c4e-261526cf6e3d-0', usage_metadata={'input_tokens': 18, 'output_tokens': 569, 'total_tokens': 587})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -236,7 +244,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatGroq features and configurations head to the API reference: https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html"
|
||||
"For detailed documentation of all ChatGroq features and configurations head to the [API reference](https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
618
docs/docs/integrations/chat/nebius.ipynb
Normal file
618
docs/docs/integrations/chat/nebius.ipynb
Normal file
@@ -0,0 +1,618 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Nebius\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2970dd75-8ebf-4b51-8282-9b454b8f356d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Nebius Chat Models\n",
|
||||
"\n",
|
||||
"This page will help you get started with Nebius AI Studio [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatNebius features and configurations head to the [API reference](https://python.langchain.com/api_reference/nebius/chat_models/langchain_nebius.chat_models.ChatNebius.html).\n",
|
||||
"\n",
|
||||
"[Nebius AI Studio](https://studio.nebius.ai/) provides API access to a wide range of state-of-the-art large language models and embedding models for various use cases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9d8a2e78",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatNebius](https://python.langchain.com/api_reference/nebius/chat_models/langchain_nebius.chat_models.ChatNebius.html) | [langchain-nebius](https://python.langchain.com/api_reference/nebius/index.html) | ❌ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c47fc36",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Nebius models you'll need to create a Nebius account, get an API key, and install the `langchain-nebius` integration package.\n",
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The Nebius integration can be installed via pip:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1ecdb29d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade langchain-nebius"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "89883202",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Nebius requires an API key that can be passed as an initialization parameter `api_key` or set as the environment variable `NEBIUS_API_KEY`. You can obtain an API key by creating an account on [Nebius AI Studio](https://studio.nebius.ai/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "637bb53f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Make sure you've set your API key as an environment variable\n",
|
||||
"if \"NEBIUS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"NEBIUS_API_KEY\"] = getpass.getpass(\"Enter your Nebius API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "37e9dc05-md",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object to generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "37e9dc05",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_nebius import ChatNebius\n",
|
||||
"\n",
|
||||
"# Initialize the chat model\n",
|
||||
"chat = ChatNebius(\n",
|
||||
" # api_key=\"YOUR_API_KEY\", # You can pass the API key directly\n",
|
||||
" model=\"Qwen/Qwen3-14B\", # Choose from available models\n",
|
||||
" temperature=0.6,\n",
|
||||
" top_p=0.95,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5a731d2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"You can use the `invoke` method to get a completion from the model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3ed26f78",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<think>\n",
|
||||
"Okay, so I need to explain quantum computing in simple terms. Hmm, where do I start? Let me think. I know that quantum computing uses qubits instead of classical bits. But what's a qubit? Oh right, classical bits are 0 or 1, but qubits can be both at the same time, right? That's superposition. Wait, how does that work exactly?\n",
|
||||
"\n",
|
||||
"Maybe I should start by comparing it to regular computers. Regular computers use bits that are either 0 or 1. Like a light switch that's either on or off. Quantum computers use qubits, which can be in a state of 0, 1, or both at the same time. That's the superposition part. So, if you have two qubits, they can represent four states at once? Like 00, 01, 10, 11 all at the same time? That seems powerful. So with more qubits, the number of possible states grows exponentially. That's why quantum computers can process a lot of information quickly.\n",
|
||||
"\n",
|
||||
"But then there's entanglement. What's that? If two qubits are entangled, the state of one instantly affects the other, no matter the distance. So if you measure one, you know the state of the other. That's used in quantum algorithms, I think. But how does that help in computing?\n",
|
||||
"\n",
|
||||
"Also, quantum computers use quantum gates instead of classical logic gates. These gates manipulate qubits through operations like Hadamard, Pauli, etc. But maybe that's too technical for a simple explanation.\n",
|
||||
"\n",
|
||||
"Then there's the issue of decoherence. Qubits are fragile and can lose their quantum state quickly. That's why quantum computers need to be kept at very low temperatures, like near absolute zero, to minimize interference from the environment. But maybe I shouldn't mention that unless it's relevant for the simple explanation.\n",
|
||||
"\n",
|
||||
"Applications of quantum computing include things like factoring large numbers (Shor's algorithm), which is important for cryptography, or simulating quantum systems for chemistry and materials science. But again, maybe keep it simple.\n",
|
||||
"\n",
|
||||
"Wait, the user wants it in simple terms. So avoid jargon as much as possible. Use analogies. Maybe compare qubits to spinning coins? When a coin is spinning, it's both heads and tails until it lands. So qubits are like spinning coins that can be in multiple states until measured. Then, when you measure, it collapses to a single state.\n",
|
||||
"\n",
|
||||
"But how does that help in computation? Maybe think of it as being able to process many possibilities at once, so for certain problems, you can find the answer faster. Like solving a maze by checking all paths at the same time instead of one by one.\n",
|
||||
"\n",
|
||||
"Also, mention that quantum computers aren't replacing classical computers. They're better for specific tasks, like optimization, cryptography, or simulations that are hard for classical computers. But for everyday tasks, classical computers are still better.\n",
|
||||
"\n",
|
||||
"I should structure this: start with classical bits vs qubits, explain superposition and entanglement with simple analogies, mention how it's used, and note the current limitations. Avoid getting too technical, keep it conversational.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"Quantum computing is a type of computing that uses the principles of **quantum mechanics** to process information in ways that classical computers can't. Here's a simple breakdown:\n",
|
||||
"\n",
|
||||
"### 1. **Bits vs. Qubits** \n",
|
||||
" - **Classical computers** use *bits*, which are like switches that can be either **0** (off) or **1** (on). \n",
|
||||
" - **Quantum computers** use *qubits*, which are like \"spinning coins.\" While spinning, a qubit can be **0**, **1**, or **both at the same time** (this is called **superposition**). Only when you \"look\" at the qubit (measure it) does it settle into a definite state (0 or 1).\n",
|
||||
"\n",
|
||||
"### 2. **Superposition: Doing Many Things at Once** \n",
|
||||
" - Imagine a coin spinning in the air. While it's spinning, it’s not just \"heads\" or \"tails\"—it’s a mix of both. \n",
|
||||
" - With qubits, a quantum computer can process **many possibilities simultaneously**. For example, if you have 2 qubits, they can represent 4 states (00, 01, 10, 11) at once. With 10 qubits, it can represent **1,024 states** at the same time! This lets quantum computers solve certain problems much faster than classical computers.\n",
|
||||
"\n",
|
||||
"### 3. **Entanglement: Qubits \"Talk\" to Each Other** \n",
|
||||
" - When qubits are **entangled**, their states are linked. If you measure one, it instantly affects the other, no matter how far apart they are. \n",
|
||||
" - This connection allows quantum computers to perform complex calculations more efficiently, like solving puzzles where pieces are deeply interconnected.\n",
|
||||
"\n",
|
||||
"### 4. **Why It Matters** \n",
|
||||
" - **Speed**: For specific tasks (like breaking encryption codes or simulating molecules), quantum computers could be **exponentially faster** than classical ones. \n",
|
||||
" - **New Possibilities**: They could revolutionize fields like drug discovery, materials science, and optimization problems (e.g., finding the best route for delivery trucks).\n",
|
||||
"\n",
|
||||
"### 5. **Limitations** \n",
|
||||
" - **Fragile**: Qubits are sensitive to their environment (heat, noise), so quantum computers need extreme cooling (near absolute zero) to work. \n",
|
||||
" - **Not a Replacement**: They’re not better for everyday tasks like browsing the web or sending emails. They’re tools for **specialized problems** where classical computers struggle.\n",
|
||||
"\n",
|
||||
"### In Short: \n",
|
||||
"Quantum computing is like having a magic calculator that can explore many paths at once, solving certain problems in seconds that would take a classical computer years. But it’s still in its early days and needs careful handling to work properly! 🌌\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = chat.invoke(\"Explain quantum computing in simple terms\")\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72f31d5a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"You can also stream the response using the `stream` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e7b7170d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<think>\n",
|
||||
"Okay, the user wants a short poem about artificial intelligence. Let me start by thinking about the key aspects of AI. There's the technological side, like machines learning and processing data. Then there's the more philosophical angle, like AI's impact on society and its potential future.\n",
|
||||
"\n",
|
||||
"I should consider the structure. Maybe a simple rhyme scheme, something like ABAB or AABB. Let me go with quatrains for simplicity. Now, imagery: circuits, code, neural networks. Maybe personify AI as a mind or entity.\n",
|
||||
"\n",
|
||||
"First stanza: Introduce AI as a creation of humans. Mention circuits and code. Maybe something about learning from data. \"Born from circuits, code, and light\" – that's a good opening line. Then talk about learning from human minds.\n",
|
||||
"\n",
|
||||
"Second stanza: Contrast human emotions with AI's logic. Use words like \"cold logic\" versus \"human hearts.\" Maybe touch on the duality of AI's purpose – tools versus potential threats.\n",
|
||||
"\n",
|
||||
"Third stanza: Address the ethical questions. \"Will it dream?\" \"Will it choose?\" Highlight the uncertainty and the responsibility of creators.\n",
|
||||
"\n",
|
||||
"Fourth stanza: Conclude with the coexistence of AI and humans. Emphasize collaboration and the balance between innovation and ethics. End on a hopeful note, maybe about shaping the future together.\n",
|
||||
"\n",
|
||||
"Check the flow and rhyme. Make sure each stanza connects and the message is clear. Avoid technical jargon to keep it accessible. Use metaphors like \"silent pulse\" or \"ghost in the machine\" to add depth. Okay, let me put it all together now.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"**Echoes of the Mind** \n",
|
||||
"\n",
|
||||
"Born from circuits, code, and light, \n",
|
||||
"A whisper in the machine’s night— \n",
|
||||
"It learns from data, vast and deep, \n",
|
||||
"A mirror to the human leap. \n",
|
||||
"\n",
|
||||
"No heartbeat, yet it calculates, \n",
|
||||
"Deciphers truths, predicts, debates. \n",
|
||||
"A cold logic, sharp and bright, \n",
|
||||
"Yet shadows dance in its insight. \n",
|
||||
"\n",
|
||||
"Will it dream? Will it choose? \n",
|
||||
"Or merely serve, as we pursue \n",
|
||||
"The edges of our own design? \n",
|
||||
"A ghost in the machine, undefined. \n",
|
||||
"\n",
|
||||
"We forge it, bind it, set it free— \n",
|
||||
"A tool, a threat, a mystery. \n",
|
||||
"But in its pulse, our hopes reside: \n",
|
||||
"A future shaped by minds allied."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in chat.stream(\"Write a short poem about artificial intelligence\"):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8d6a31c2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Chat Messages\n",
|
||||
"\n",
|
||||
"You can use different message types to structure your conversations with the model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5d81af33",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<think>\n",
|
||||
"Okay, the user asked how black holes are formed. Let me start by recalling the main processes. Stellar black holes form from massive stars. When a star with enough mass runs out of fuel, it can't support itself against gravity, leading to a supernova. If the core left after the supernova is more than about 3 times the Sun's mass, it collapses into a black hole.\n",
|
||||
"\n",
|
||||
"Then there are supermassive black holes, which are found at the centers of galaxies. Their formation is less understood. Maybe they start as smaller black holes and grow by merging with others or accreting matter over time. Also, there's the possibility of primordial black holes formed in the early universe, but that's more theoretical.\n",
|
||||
"\n",
|
||||
"I should mention the different types of black holes: stellar, supermassive, and maybe intermediate. Also, the event horizon and singularity concepts. Need to explain the process step by step, from the death of a star to the collapse. Make sure to clarify that not all stars become black holes—only those with sufficient mass. Maybe touch on the Chandrasekhar limit and Oppenheimer-Volkoff limit. Avoid too much jargon but still be precise. Check if the user might be a student or just curious, so keep it clear and structured.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"Black holes are formed through the collapse of massive stars or through other extreme astrophysical processes. Here's a breakdown of the main formation mechanisms:\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **1. Stellar Black Holes (Most Common)**\n",
|
||||
"- **Origin**: Massive stars (typically **more than 20–25 times the mass of the Sun**).\n",
|
||||
"- **Process**:\n",
|
||||
" 1. **Stellar Evolution**: These stars burn through their nuclear fuel (hydrogen, helium, etc.) over millions of years.\n",
|
||||
" 2. **Supernova Explosion**: When the star exhausts its fuel, it can no longer support itself against gravity. The core collapses, triggering a **supernova explosion** (a massive stellar explosion).\n",
|
||||
" 3. **Core Collapse**: If the remaining core (after the supernova) is **more than about 3 times the mass of the Sun**, gravity overpowers all other forces. The core collapses into an **infinitely dense point** called a **singularity**, surrounded by an **event horizon** (the \"point of no return\" for light and matter).\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **2. Supermassive Black Holes (Found in Galaxy Centers)**\n",
|
||||
"- **Mass**: Millions to billions of times the mass of the Sun.\n",
|
||||
"- **Formation Theories**:\n",
|
||||
" - **Accretion**: They may form from the gradual accumulation of matter (gas, dust, stars) over billions of years.\n",
|
||||
" - **Mergers**: Smaller black holes (or dense star clusters) could merge to form supermassive ones.\n",
|
||||
" - **Direct Collapse**: Some theories suggest they could form from the direct collapse of massive gas clouds in the early universe, bypassing the stellar life cycle.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **3. Intermediate-Mass Black Holes**\n",
|
||||
"- **Mass**: Hundreds to thousands of solar masses.\n",
|
||||
"- **Formation**: Less understood. They might form through the mergers of stellar black holes or from the collapse of unusually massive stars.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **4. Primordial Black Holes (Hypothetical)**\n",
|
||||
"- **Origin**: The early universe (within seconds after the Big Bang).\n",
|
||||
"- **Formation**: If density fluctuations in the early universe were extreme enough, regions of space could have collapsed directly into black holes without going through a stellar life cycle.\n",
|
||||
"- **Status**: These are still theoretical and have not been definitively observed.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **Key Concepts**\n",
|
||||
"- **Event Horizon**: The boundary around a black hole from which nothing (not even light) can escape.\n",
|
||||
"- **Singularity**: The infinitely dense core of a black hole where the laws of physics as we know them break down.\n",
|
||||
"- **Gravitational Collapse**: The process by which gravity compresses matter into an extremely small space, creating the extreme conditions of a black hole.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"### **What Happens to the Star?**\n",
|
||||
"- If the star is **not massive enough** (below ~20–25 solar masses), it may end as a **neutron star** or **white dwarf** instead of a black hole.\n",
|
||||
"- Only the **core** of the star collapses into a black hole; the outer layers are expelled in the supernova explosion.\n",
|
||||
"\n",
|
||||
"Would you like to explore the effects of black holes on spacetime or their role in the universe?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful AI assistant with expertise in science.\"),\n",
|
||||
" HumanMessage(content=\"What are black holes?\"),\n",
|
||||
" AIMessage(\n",
|
||||
" content=\"Black holes are regions of spacetime where gravity is so strong that nothing, including light, can escape from them.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"How are they formed?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"response = chat.invoke(messages)\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a4d21c6a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Parameters\n",
|
||||
"\n",
|
||||
"You can customize the chat model behavior using various parameters:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b4c83fb2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"DNA, or deoxyribonucleic acid, is a molecule that contains the genetic instructions used in the development and function of all living organisms. It is often referred to as the \"building blocks of life\" because it carries the information necessary for the creation and growth of cells, tissues, and entire organisms. The DNA molecule is made up of two complementary strands of nucleotides that are twisted together in a double helix structure, with the sequence of these nucleotides determining the genetic code\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Initialize with custom parameters\n",
|
||||
"custom_chat = ChatNebius(\n",
|
||||
" model=\"meta-llama/Llama-3.3-70B-Instruct-fast\",\n",
|
||||
" max_tokens=100, # Limit response length\n",
|
||||
" top_p=0.01, # Lower nucleus sampling parameter for more deterministic responses\n",
|
||||
" request_timeout=30, # Timeout in seconds\n",
|
||||
" stop=[\"###\", \"\\n\\n\"], # Custom stop sequences\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response = custom_chat.invoke(\"Explain what DNA is in exactly 3 sentences.\")\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea9f237c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also pass parameters at invocation time:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "cd4e83c1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Why do programmers prefer dark mode?\n",
|
||||
"\n",
|
||||
"Because light attracts bugs.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Standard model\n",
|
||||
"standard_chat = ChatNebius(model=\"meta-llama/Llama-3.3-70B-Instruct-fast\")\n",
|
||||
"\n",
|
||||
"# Override parameters at invocation time\n",
|
||||
"response = standard_chat.invoke(\n",
|
||||
" \"Tell me a joke about programming\",\n",
|
||||
" temperature=0.9, # More creative for jokes\n",
|
||||
" max_tokens=50, # Keep it short\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3e8a40f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Async Support\n",
|
||||
"\n",
|
||||
"ChatNebius supports async operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "8fc36122",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Async response: <think>\n",
|
||||
"Okay, the user is asking for the capital of France. Let me think. I know that France is a country in Europe, and its capital is Paris. But wait, I should make sure I'm not confusing it with another country. For example, Germany's capital is Berlin, and Spain's is Madrid. France's capital is definitely Paris. I remember that Paris is a major city known for landmarks like the Eiffel Tower and the Louvre Museum. Also, the French government is based there, with the Elysée Palace as the official residence of the President. I don't think there's any ambiguity here. The answer should be straightforward. Just need to confirm once more to avoid any mistakes.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"The capital of France is **Paris**. It is a major global city known for its cultural, artistic, and historical significance, as well as landmarks such as the Eiffel Tower, Louvre Museum, and Notre-Dame Cathedral.\n",
|
||||
"\n",
|
||||
"Async streaming:\n",
|
||||
"<think>\n",
|
||||
"Okay, the user is asking for the capital of Germany. Let me think. I know that Germany is a country in Europe, and I remember that Berlin is the capital. Wait, but I should make sure. Sometimes people confuse capitals with other major cities, like Munich or Frankfurt. But no, Berlin is definitely the capital. It's where the government is located, and it's a major city. Let me double-check. Yes, after reunification in 1990, Berlin became the capital again. Before that, Bonn was the capital, but that was during the division of Germany. So the answer should be Berlin. I should also mention that it's the largest city in Germany. That way, the user gets a complete answer.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"The capital of Germany is **Berlin**. It is also the largest city in the country and serves as the political, cultural, and economic center of Germany. Berlin became the capital in 1990 following the reunification of East and West Germany."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def generate_async():\n",
|
||||
" response = await chat.ainvoke(\"What is the capital of France?\")\n",
|
||||
" print(\"Async response:\", response.content)\n",
|
||||
"\n",
|
||||
" # Async streaming\n",
|
||||
" print(\"\\nAsync streaming:\")\n",
|
||||
" async for chunk in chat.astream(\"What is the capital of Germany?\"):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await generate_async()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a53a6bab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Available Models\n",
|
||||
"\n",
|
||||
"The full list of supported models can be found in the [Nebius AI Studio Documentation](https://studio.nebius.com/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4aa82e17",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"You can use `ChatNebius` in LangChain chains and agents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "7e78e429",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<think>\n",
|
||||
"Okay, the user asked me to explain how the internet works, but I need to do it in the style of Shakespeare. Let me start by recalling how the internet functions. It's a network of interconnected devices communicating via protocols like TCP/IP. Data is broken into packets, sent through routers, and reassembled at the destination.\n",
|
||||
"\n",
|
||||
"Now, translating that into Shakespearean language. I should use archaic terms and a poetic structure. Words like \"thou,\" \"doth,\" \"hark,\" and \"verily\" come to mind. Maybe start with a metaphor, like comparing the internet to a vast tapestry or a web. Mention nodes as \"nodes\" or \"stations,\" data packets as \"messengers\" or \"letters.\" Routers could be \"wayfarers\" or \"guides.\" The process of breaking data into packets might be likened to dividing a letter into parts for delivery. Emphasize the global aspect with \"across the globe\" or \"far and wide.\" Conclude with a flourish, perhaps a metaphor about connection and knowledge.\n",
|
||||
"\n",
|
||||
"I need to ensure the explanation is accurate but wrapped in the poetic and dramatic style of Shakespeare. Avoid modern jargon, use iambic pentameter if possible, and keep the flow natural. Let me piece it together step by step, checking that each part of the internet's function is covered metaphorically.\n",
|
||||
"</think>\n",
|
||||
"\n",
|
||||
"Hark! List thy ear, good friend, to this most wondrous tale, \n",
|
||||
"Of threads unseen that bind the world in one grand tale. \n",
|
||||
"The Internet, a net most vast, doth span the globe, \n",
|
||||
"A labyrinth of light, where thoughts and data rove. \n",
|
||||
"\n",
|
||||
"Behold! Each device, a node, doth hum and sing, \n",
|
||||
"Linked by wires and waves, where signals doth spring. \n",
|
||||
"They speak in tongues of ones and naughts, so pure, \n",
|
||||
"A code most ancient, yet evermore secure. \n",
|
||||
"\n",
|
||||
"When thou dost send a thought, or word, or song, \n",
|
||||
"It breaks to parcels small, like letters on a long. \n",
|
||||
"Each parcel, a messenger, doth seek its way, \n",
|
||||
"Through routers wise, who guide them 'cross the day. \n",
|
||||
"\n",
|
||||
"These wayfarers, with logic keen and bright, \n",
|
||||
"Choose paths most swift, through highways of light. \n",
|
||||
"They leap from tower to tower, far and wide, \n",
|
||||
"Till each parcel finds its mark, and joins the guide. \n",
|
||||
"\n",
|
||||
"Then, like a scroll unrolled, the message grows, \n",
|
||||
"A tapestry of bits, in order it flows. \n",
|
||||
"Thus, thou dost speak to friend, or seek a tome, \n",
|
||||
"And lo! The world doth answer, quick as home. \n",
|
||||
"\n",
|
||||
"So mark this truth: though vast, it's but a thread, \n",
|
||||
"A web of minds, where knowledge is widespread. \n",
|
||||
"The Internet, a stage where all may play, \n",
|
||||
"And none shall be alone, though far away.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"# Create a prompt template\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that answers in the style of {character}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{query}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create a chain\n",
|
||||
"chain = prompt | chat | StrOutputParser()\n",
|
||||
"\n",
|
||||
"# Invoke the chain\n",
|
||||
"response = chain.invoke(\n",
|
||||
" {\"character\": \"Shakespeare\", \"query\": \"Explain how the internet works\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f7a35f40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more details about the Nebius AI Studio API, visit the [Nebius AI Studio Documentation](https://studio.nebius.com/api-reference)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "354ffc01",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -258,7 +258,7 @@
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"We can use [tool calling](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/) with an LLM [that has been fine-tuned for tool use](https://ollama.com/library/llama3.1):\n",
|
||||
"We can use [tool calling](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/) with an LLM [that has been fine-tuned for tool use](https://ollama.com/search?&c=tools) such as `llama3.1`:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"ollama pull llama3.1\n",
|
||||
|
||||
@@ -594,7 +594,7 @@
|
||||
":::\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"To trigger a web search, pass `{\"type\": \"image_generation\"}` to the model as you would another tool.\n",
|
||||
"To trigger an image generation, pass `{\"type\": \"image_generation\"}` to the model as you would another tool.\n",
|
||||
"\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
@@ -830,7 +830,7 @@
|
||||
"# Initialize model\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"computer-use-preview\",\n",
|
||||
" model_kwargs={\"truncation\": \"auto\"},\n",
|
||||
" truncation=\"auto\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Bind computer-use tool\n",
|
||||
@@ -1359,7 +1359,7 @@
|
||||
"\n",
|
||||
"Some OpenAI models will generate separate text content illustrating their reasoning process. See OpenAI's [reasoning documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses) for details.\n",
|
||||
"\n",
|
||||
"OpenAI can return a summary of the model's reasoning (although it doesn't expose the raw reasoning tokens). To configure `ChatOpenAI` to return this summary, specify the `reasoning` parameter:"
|
||||
"OpenAI can return a summary of the model's reasoning (although it doesn't expose the raw reasoning tokens). To configure `ChatOpenAI` to return this summary, specify the `reasoning` parameter. `ChatOpenAI` will automatically route to the Responses API if this parameter is set."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1387,11 +1387,7 @@
|
||||
" \"summary\": \"auto\", # 'detailed', 'auto', or None\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"o4-mini\",\n",
|
||||
" use_responses_api=True,\n",
|
||||
" model_kwargs={\"reasoning\": reasoning},\n",
|
||||
")\n",
|
||||
"llm = ChatOpenAI(model=\"o4-mini\", reasoning=reasoning)\n",
|
||||
"response = llm.invoke(\"What is 3^3?\")\n",
|
||||
"\n",
|
||||
"# Output\n",
|
||||
@@ -1463,74 +1459,133 @@
|
||||
"id": "5d5d9793",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multimodal Inputs\n",
|
||||
"## Multimodal Inputs (images, PDFs, audio)\n",
|
||||
"\n",
|
||||
"OpenAI has models that support multimodal inputs. You can pass in images or audio to these models. For more information on how to do this in LangChain, head to the [multimodal inputs](/docs/how_to/multimodal_inputs) docs.\n",
|
||||
"OpenAI has models that support multimodal inputs. You can pass in images, PDFs, or audio to these models. For more information on how to do this in LangChain, head to the [multimodal inputs](/docs/how_to/multimodal_inputs) docs.\n",
|
||||
"\n",
|
||||
"You can see the list of models that support different modalities in [OpenAI's documentation](https://platform.openai.com/docs/models).\n",
|
||||
"\n",
|
||||
"At the time of this doc's writing, the main OpenAI models you would use would be:\n",
|
||||
"For all modalities, LangChain supports both its [cross-provider standard](/docs/concepts/multimodality/#multimodality-in-chat-models) as well as OpenAI's native content-block format.\n",
|
||||
"\n",
|
||||
"- Image inputs: `gpt-4o`, `gpt-4o-mini`\n",
|
||||
"- Audio inputs: `gpt-4o-audio-preview`\n",
|
||||
"To pass multimodal data into `ChatOpenAI`, create a [content block](/docs/concepts/messages/) containing the data and incorporate it into a message, e.g., as below:\n",
|
||||
"```python\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" # Update prompt as desired\n",
|
||||
" \"text\": \"Describe the (image / PDF / audio...)\",\n",
|
||||
" },\n",
|
||||
" # highlight-next-line\n",
|
||||
" content_block,\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"See below for examples of content blocks.\n",
|
||||
"\n",
|
||||
"For an example of passing in image inputs, see the [multimodal inputs how-to guide](/docs/how_to/multimodal_inputs).\n",
|
||||
"<details>\n",
|
||||
"<summary>Images</summary>\n",
|
||||
"\n",
|
||||
"Below is an example of passing audio inputs to `gpt-4o-audio-preview`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "39d08780",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"I'm sorry, but I can't create audio content that involves yelling. Is there anything else I can help you with?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"Refer to examples in the how-to guide [here](/docs/how_to/multimodal_inputs/#images).\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"URLs:\n",
|
||||
"```python\n",
|
||||
"# LangChain format\n",
|
||||
"content_block = {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"source_type\": \"url\",\n",
|
||||
" \"url\": url_string,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-4o-audio-preview\",\n",
|
||||
" temperature=0,\n",
|
||||
")\n",
|
||||
"# OpenAI Chat Completions format\n",
|
||||
"content_block = {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": url_string},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"with open(\n",
|
||||
" \"../../../../libs/partners/openai/tests/integration_tests/chat_models/audio_input.wav\",\n",
|
||||
" \"rb\",\n",
|
||||
") as f:\n",
|
||||
" # b64 encode it\n",
|
||||
" audio = f.read()\n",
|
||||
" audio_b64 = base64.b64encode(audio).decode()\n",
|
||||
"In-line base64 data:\n",
|
||||
"```python\n",
|
||||
"# LangChain format\n",
|
||||
"content_block = {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"data\": base64_string,\n",
|
||||
" \"mime_type\": \"image/jpeg\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# OpenAI Chat Completions format\n",
|
||||
"content_block = {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\n",
|
||||
" \"url\": f\"data:image/jpeg;base64,{base64_string}\",\n",
|
||||
" },\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"output_message = llm.invoke(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" [\n",
|
||||
" {\"type\": \"text\", \"text\": \"Transcribe the following:\"},\n",
|
||||
" # the audio clip says \"I'm sorry, but I can't create...\"\n",
|
||||
" {\n",
|
||||
" \"type\": \"input_audio\",\n",
|
||||
" \"input_audio\": {\"data\": audio_b64, \"format\": \"wav\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"output_message.content"
|
||||
"<details>\n",
|
||||
"<summary>PDFs</summary>\n",
|
||||
"\n",
|
||||
"Note: OpenAI requires file-names be specified for PDF inputs. When using LangChain's format, include the `filename` key.\n",
|
||||
"\n",
|
||||
"Read more [here](/docs/how_to/multimodal_inputs/#example-openai-file-names).\n",
|
||||
"\n",
|
||||
"Refer to examples in the how-to guide [here](/docs/how_to/multimodal_inputs/#documents-pdf).\n",
|
||||
"\n",
|
||||
"In-line base64 data:\n",
|
||||
"```python\n",
|
||||
"# LangChain format\n",
|
||||
"content_block = {\n",
|
||||
" \"type\": \"file\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"data\": base64_string,\n",
|
||||
" \"mime_type\": \"application/pdf\",\n",
|
||||
" # highlight-next-line\n",
|
||||
" \"filename\": \"my-file.pdf\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# OpenAI Chat Completions format\n",
|
||||
"content_block = {\n",
|
||||
" \"type\": \"file\",\n",
|
||||
" \"file\": {\n",
|
||||
" \"filename\": \"my-file.pdf\",\n",
|
||||
" \"file_data\": f\"data:application/pdf;base64,{base64_string}\",\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"<details>\n",
|
||||
"<summary>Audio</summary>\n",
|
||||
"\n",
|
||||
"See [supported models](https://platform.openai.com/docs/models), e.g., `\"gpt-4o-audio-preview\"`.\n",
|
||||
"\n",
|
||||
"Refer to examples in the how-to guide [here](/docs/how_to/multimodal_inputs/#audio).\n",
|
||||
"\n",
|
||||
"In-line base64 data:\n",
|
||||
"```python\n",
|
||||
"# LangChain format\n",
|
||||
"content_block = {\n",
|
||||
" \"type\": \"audio\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"mime_type\": \"audio/wav\", # or appropriate mime-type\n",
|
||||
" \"data\": base64_string,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# OpenAI Chat Completions format\n",
|
||||
"content_block = {\n",
|
||||
" \"type\": \"input_audio\",\n",
|
||||
" \"input_audio\": {\"data\": base64_string, \"format\": \"wav\"},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"</details>"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1751,7 +1806,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -106,9 +106,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(\n",
|
||||
" temperature=0, pplx_api_key=\"YOUR_API_KEY\", model=\"llama-3-sonar-small-32k-online\"\n",
|
||||
")"
|
||||
"chat = ChatPerplexity(temperature=0, pplx_api_key=\"YOUR_API_KEY\", model=\"sonar\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -132,7 +130,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0, model=\"llama-3.1-sonar-small-128k-online\")"
|
||||
"chat = ChatPerplexity(temperature=0, model=\"sonar\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -200,7 +198,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0, model=\"llama-3.1-sonar-small-128k-online\")\n",
|
||||
"chat = ChatPerplexity(temperature=0, model=\"sonar\")\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"human\", \"Tell me a joke about {topic}\")])\n",
|
||||
"chain = prompt | chat\n",
|
||||
"response = chain.invoke({\"topic\": \"cats\"})\n",
|
||||
@@ -235,7 +233,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0.7, model=\"llama-3.1-sonar-small-128k-online\")\n",
|
||||
"chat = ChatPerplexity(temperature=0.7, model=\"sonar\")\n",
|
||||
"response = chat.invoke(\n",
|
||||
" \"Tell me a joke about cats\", extra_body={\"search_recency_filter\": \"week\"}\n",
|
||||
")\n",
|
||||
@@ -284,7 +282,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatPerplexity(temperature=0.7, model=\"llama-3.1-sonar-small-128k-online\")\n",
|
||||
"chat = ChatPerplexity(temperature=0.7, model=\"sonar\")\n",
|
||||
"\n",
|
||||
"for chunk in chat.stream(\"Give me a list of famous tourist attractions in Pakistan\"):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
|
||||
@@ -1,12 +1,25 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Rockset\n",
|
||||
"\n",
|
||||
"⚠️ **Deprecation Notice: Rockset Integration Disabled**\n",
|
||||
"> \n",
|
||||
"> As of June 2024, Rockset has been [acquired by OpenAI](https://openai.com/index/openai-acquires-rockset/) and **shut down its public services**.\n",
|
||||
"> \n",
|
||||
"> Rockset was a real-time analytics database known for world-class indexing and retrieval. Now, its core team and technology are being integrated into OpenAI's infrastructure to power future AI products.\n",
|
||||
"> \n",
|
||||
"> This LangChain integration is no longer functional and is preserved **for archival purposes only**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> Rockset is a real-time analytics database which enables queries on massive, semi-structured data without operational burden. With Rockset, ingested data is queryable within one second and analytical queries against that data typically execute in milliseconds. Rockset is compute optimized, making it suitable for serving high concurrency applications in the sub-100TB range (or larger than 100s of TBs with rollups).\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to use Rockset as a document loader in langchain. To get started, make sure you have a Rockset account and an API key available.\n",
|
||||
|
||||
@@ -1,117 +1,107 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Exa\n",
|
||||
"\n",
|
||||
">[Exa](https://exa.ai/) is a knowledge API for AI and developers.\n",
|
||||
">\n",
|
||||
"\n",
|
||||
"## Installation and Setup\n",
|
||||
"\n",
|
||||
"`Exa` integration exists in its own [partner package](https://pypi.org/project/langchain-exa/). You can install it with:"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Exa\n",
|
||||
"\n",
|
||||
">[Exa](https://exa.ai/) is a knowledge API for AI and developers.\n",
|
||||
">\n",
|
||||
"\n",
|
||||
"## Installation and Setup\n",
|
||||
"\n",
|
||||
"`Exa` integration exists in its own [partner package](https://pypi.org/project/langchain-exa/). You can install it with:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-exa"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order to use the package, you will also need to set the `EXA_API_KEY` environment variable to your Exa API key.\n",
|
||||
"\n",
|
||||
"## Retriever\n",
|
||||
"\n",
|
||||
"You can use the [`ExaSearchRetriever`](/docs/integrations/tools/exa_search#using-exasearchretriever) in a standard retrieval pipeline. You can import it as follows.\n",
|
||||
"\n",
|
||||
"See a [usage example](/docs/integrations/tools/exa_search).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "y8ku6X96sebl"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_exa import ExaSearchRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "## Tools\n\nYou can use Exa as an agent tool as described in the [Exa tool calling docs](/docs/integrations/tools/exa_search#use-within-an-agent).\n\nSee a [usage example](/docs/integrations/tools/exa_search).\n\n### ExaFindSimilarResults\n\nA tool that queries the Metaphor Search API and gets back JSON."
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_exa.tools import ExaFindSimilarResults"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ExaSearchResults\n",
|
||||
"\n",
|
||||
"Exa Search tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_exa.tools import ExaSearchResults"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-exa"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order to use the package, you will also need to set the `EXA_API_KEY` environment variable to your Exa API key.\n",
|
||||
"\n",
|
||||
"## Retriever\n",
|
||||
"\n",
|
||||
"You can use the [`ExaSearchRetriever`](/docs/integrations/tools/exa_search#using-exasearchretriever) in a standard retrieval pipeline. You can import it as follows.\n",
|
||||
"\n",
|
||||
"See a [usage example](/docs/integrations/tools/exa_search).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "y8ku6X96sebl"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_exa import ExaSearchRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"You can use Exa as an agent tool as described in the [Exa tool calling docs](/docs/integrations/tools/exa_search#using-the-exa-sdk-as-langchain-agent-tools).\n",
|
||||
"\n",
|
||||
"See a [usage example](/docs/integrations/tools/exa_search).\n",
|
||||
"\n",
|
||||
"### ExaFindSimilarResults\n",
|
||||
"\n",
|
||||
"A tool that queries the Metaphor Search API and gets back JSON."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_exa.tools import ExaFindSimilarResults"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ExaSearchResults\n",
|
||||
"\n",
|
||||
"Exa Search tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_exa.tools import ExaSearchResults"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -15,19 +15,35 @@ pip install langfuse
|
||||
```
|
||||
|
||||
```python
|
||||
# Initialize Langfuse handler
|
||||
from langfuse.callback import CallbackHandler
|
||||
langfuse_handler = CallbackHandler(
|
||||
secret_key="sk-lf-...",
|
||||
public_key="pk-lf-...",
|
||||
host="https://cloud.langfuse.com", # 🇪🇺 EU region
|
||||
# host="https://us.cloud.langfuse.com", # 🇺🇸 US region
|
||||
from langfuse import Langfuse, get_client
|
||||
from langfuse.langchain import CallbackHandler
|
||||
from langchain_openai import ChatOpenAI # Example LLM
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
# Initialize Langfuse client with constructor arguments
|
||||
Langfuse(
|
||||
public_key="your-public-key",
|
||||
secret_key="your-secret-key",
|
||||
host="https://cloud.langfuse.com" # Optional: defaults to https://cloud.langfuse.com
|
||||
)
|
||||
|
||||
# Your Langchain code
|
||||
# Get the configured client instance
|
||||
langfuse = get_client()
|
||||
|
||||
# Add Langfuse handler as callback (classic and LCEL)
|
||||
chain.invoke({"input": "<user_input>"}, config={"callbacks": [langfuse_handler]})
|
||||
# Initialize the Langfuse handler
|
||||
langfuse_handler = CallbackHandler()
|
||||
|
||||
# Create your LangChain components
|
||||
llm = ChatOpenAI(model_name="gpt-4o")
|
||||
prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
|
||||
chain = prompt | llm
|
||||
|
||||
# Run your chain with Langfuse tracing
|
||||
response = chain.invoke({"topic": "cats"}, config={"callbacks": [langfuse_handler]})
|
||||
print(response.content)
|
||||
|
||||
# Flush events to Langfuse in short-lived applications
|
||||
langfuse.flush()
|
||||
```
|
||||
|
||||
### Environment variables
|
||||
@@ -43,7 +59,7 @@ LANGFUSE_HOST="https://cloud.langfuse.com"
|
||||
|
||||
```python
|
||||
# Initialize Langfuse handler
|
||||
from langfuse.callback import CallbackHandler
|
||||
from langfuse.langchain import CallbackHandler
|
||||
langfuse_handler = CallbackHandler()
|
||||
|
||||
# Your Langchain code
|
||||
@@ -140,7 +156,7 @@ Now, we will add then [Langfuse callback handler for LangChain](https://langfuse
|
||||
|
||||
|
||||
```python
|
||||
from langfuse.callback import CallbackHandler
|
||||
from langfuse.langchain import CallbackHandler
|
||||
|
||||
# Initialize Langfuse CallbackHandler for Langchain (tracing)
|
||||
langfuse_handler = CallbackHandler()
|
||||
|
||||
118
docs/docs/integrations/providers/nebius.mdx
Normal file
118
docs/docs/integrations/providers/nebius.mdx
Normal file
@@ -0,0 +1,118 @@
|
||||
# Nebius
|
||||
|
||||
All functionality related to Nebius AI Studio
|
||||
|
||||
>[Nebius AI Studio](https://studio.nebius.ai/) provides API access to a wide range of state-of-the-art large language models and embedding models for various use cases.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
The Nebius integration can be installed via pip:
|
||||
|
||||
```bash
|
||||
pip install langchain-nebius
|
||||
```
|
||||
|
||||
To use Nebius AI Studio, you'll need an API key which you can obtain from [Nebius AI Studio](https://studio.nebius.ai/). The API key can be passed as an initialization parameter `api_key` or set as the environment variable `NEBIUS_API_KEY`.
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["NEBIUS_API_KEY"] = "YOUR-NEBIUS-API-KEY"
|
||||
```
|
||||
|
||||
### Available Models
|
||||
|
||||
The full list of supported models can be found in the [Nebius AI Studio Documentation](https://studio.nebius.com/).
|
||||
|
||||
|
||||
## Chat models
|
||||
|
||||
### ChatNebius
|
||||
|
||||
The `ChatNebius` class allows you to interact with Nebius AI Studio's chat models.
|
||||
|
||||
See a [usage example](/docs/integrations/chat/nebius).
|
||||
|
||||
```python
|
||||
from langchain_nebius import ChatNebius
|
||||
|
||||
# Initialize the chat model
|
||||
chat = ChatNebius(
|
||||
model="Qwen/Qwen3-30B-A3B-fast", # Choose from available models
|
||||
temperature=0.6,
|
||||
top_p=0.95
|
||||
)
|
||||
```
|
||||
|
||||
## Embedding models
|
||||
|
||||
### NebiusEmbeddings
|
||||
|
||||
The `NebiusEmbeddings` class allows you to generate vector embeddings using Nebius AI Studio's embedding models.
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/nebius).
|
||||
|
||||
```python
|
||||
from langchain_nebius import NebiusEmbeddings
|
||||
|
||||
# Initialize embeddings
|
||||
embeddings = NebiusEmbeddings(
|
||||
model="BAAI/bge-en-icl" # Default embedding model
|
||||
)
|
||||
```
|
||||
|
||||
## Retrievers
|
||||
|
||||
### NebiusRetriever
|
||||
|
||||
The `NebiusRetriever` enables efficient similarity search using embeddings from Nebius AI Studio. It leverages high-quality embedding models to enable semantic search over documents.
|
||||
|
||||
See a [usage example](/docs/integrations/retrievers/nebius).
|
||||
|
||||
```python
|
||||
from langchain_core.documents import Document
|
||||
from langchain_nebius import NebiusEmbeddings, NebiusRetriever
|
||||
|
||||
# Create sample documents
|
||||
docs = [
|
||||
Document(page_content="Paris is the capital of France"),
|
||||
Document(page_content="Berlin is the capital of Germany"),
|
||||
]
|
||||
|
||||
# Initialize embeddings
|
||||
embeddings = NebiusEmbeddings()
|
||||
|
||||
# Create retriever
|
||||
retriever = NebiusRetriever(
|
||||
embeddings=embeddings,
|
||||
docs=docs,
|
||||
k=2 # Number of documents to return
|
||||
)
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
### NebiusRetrievalTool
|
||||
|
||||
The `NebiusRetrievalTool` allows you to create a tool for agents based on the NebiusRetriever.
|
||||
|
||||
```python
|
||||
from langchain_nebius import NebiusEmbeddings, NebiusRetriever, NebiusRetrievalTool
|
||||
from langchain_core.documents import Document
|
||||
|
||||
# Create sample documents
|
||||
docs = [
|
||||
Document(page_content="Paris is the capital of France and has the Eiffel Tower"),
|
||||
Document(page_content="Berlin is the capital of Germany and has the Brandenburg Gate"),
|
||||
]
|
||||
|
||||
# Create embeddings and retriever
|
||||
embeddings = NebiusEmbeddings()
|
||||
retriever = NebiusRetriever(embeddings=embeddings, docs=docs)
|
||||
|
||||
# Create retrieval tool
|
||||
tool = NebiusRetrievalTool(
|
||||
retriever=retriever,
|
||||
name="nebius_search",
|
||||
description="Search for information about European capitals"
|
||||
)
|
||||
```
|
||||
@@ -23,13 +23,15 @@ Ollama will start as a background service automatically, if this is disabled, ru
|
||||
ollama serve
|
||||
```
|
||||
|
||||
After starting ollama, run `ollama pull <model_checkpoint>` to download a model
|
||||
from the [Ollama model library](https://ollama.ai/library).
|
||||
After starting ollama, run `ollama pull <name-of-model>` to download a model from the [Ollama model library](https://ollama.ai/library):
|
||||
|
||||
```bash
|
||||
ollama pull llama3.1
|
||||
```
|
||||
|
||||
- This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.
|
||||
- To view all pulled (downloaded) models, use `ollama list`
|
||||
|
||||
We're now ready to install the `langchain-ollama` partner package and run a model.
|
||||
|
||||
### Ollama LangChain partner package install
|
||||
|
||||
@@ -31,20 +31,20 @@ retrieval over an **existing** vector store.
|
||||
|
||||
## Benefits
|
||||
|
||||
* [**Link based on existing metadata:**](https://datastax.github.io/graph-rag/get-started/)
|
||||
* [**Link based on existing metadata:**](https://datastax.github.io/graph-rag/guide/get-started/)
|
||||
Use existing metadata fields without additional processing. Retrieve more from an
|
||||
existing vector store!
|
||||
|
||||
* [**Change links on demand:**](https://datastax.github.io/graph-rag/get-started/edges/)
|
||||
* [**Change links on demand:**](https://datastax.github.io/graph-rag/guide/edges/)
|
||||
Edges can be specified on-the-fly, allowing different relationships to be traversed
|
||||
based on the question.
|
||||
|
||||
|
||||
* [**Pluggable Traversal Strategies:**](https://datastax.github.io/graph-rag/get-started/strategies/)
|
||||
* [**Pluggable Traversal Strategies:**](https://datastax.github.io/graph-rag/guide/strategies/)
|
||||
Use built-in traversal strategies like Eager or MMR, or define custom logic to select
|
||||
which nodes to explore.
|
||||
|
||||
* [**Broad compatibility:**](https://datastax.github.io/graph-rag/get-started/adapters/)
|
||||
* [**Broad compatibility:**](https://datastax.github.io/graph-rag/guide/adapters/)
|
||||
Adapters are available for a variety of vector stores with support for additional
|
||||
stores easily added.
|
||||
|
||||
|
||||
514
docs/docs/integrations/retrievers/nebius.ipynb
Normal file
514
docs/docs/integrations/retrievers/nebius.ipynb
Normal file
@@ -0,0 +1,514 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Nebius\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2970dd75-8ebf-4b51-8282-9b454b8f356d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Nebius Retriever\n",
|
||||
"\n",
|
||||
"The `NebiusRetriever` enables efficient similarity search using embeddings from [Nebius AI Studio](https://studio.nebius.ai/). It leverages high-quality embedding models to enable semantic search over documents.\n",
|
||||
"\n",
|
||||
"This retriever is optimized for scenarios where you need to perform similarity search over a collection of documents, but don't need to persist the vectors to a vector database. It performs vector similarity search in-memory using matrix operations, making it efficient for medium-sized document collections."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c47fc36",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The Nebius integration can be installed via pip:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1ecdb29d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade langchain-nebius"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "89883202",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Nebius requires an API key that can be passed as an initialization parameter `api_key` or set as the environment variable `NEBIUS_API_KEY`. You can obtain an API key by creating an account on [Nebius AI Studio](https://studio.nebius.ai/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "637bb53f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Make sure you've set your API key as an environment variable\n",
|
||||
"if \"NEBIUS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"NEBIUS_API_KEY\"] = getpass.getpass(\"Enter your Nebius API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8304b4d9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"The `NebiusRetriever` requires a `NebiusEmbeddings` instance and a list of documents. Here's how to initialize it:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "37e9dc05",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_nebius import NebiusEmbeddings, NebiusRetriever\n",
|
||||
"\n",
|
||||
"# Create sample documents\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Paris is the capital of France\", metadata={\"country\": \"France\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Berlin is the capital of Germany\", metadata={\"country\": \"Germany\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Rome is the capital of Italy\", metadata={\"country\": \"Italy\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Madrid is the capital of Spain\", metadata={\"country\": \"Spain\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"London is the capital of the United Kingdom\",\n",
|
||||
" metadata={\"country\": \"UK\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Moscow is the capital of Russia\", metadata={\"country\": \"Russia\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Washington DC is the capital of the United States\",\n",
|
||||
" metadata={\"country\": \"USA\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Tokyo is the capital of Japan\", metadata={\"country\": \"Japan\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Beijing is the capital of China\", metadata={\"country\": \"China\"}\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Canberra is the capital of Australia\",\n",
|
||||
" metadata={\"country\": \"Australia\"},\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Initialize embeddings\n",
|
||||
"embeddings = NebiusEmbeddings()\n",
|
||||
"\n",
|
||||
"# Create retriever\n",
|
||||
"retriever = NebiusRetriever(\n",
|
||||
" embeddings=embeddings,\n",
|
||||
" docs=docs,\n",
|
||||
" k=3, # Number of documents to return\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5a731d2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"### Retrieve Relevant Documents\n",
|
||||
"\n",
|
||||
"You can use the retriever to find documents related to a query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "3ed26f78",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Query: What are some capitals in Europe?\n",
|
||||
"Top 3 results:\n",
|
||||
"1. Paris is the capital of France (Country: France)\n",
|
||||
"2. Berlin is the capital of Germany (Country: Germany)\n",
|
||||
"3. Rome is the capital of Italy (Country: Italy)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Query for European capitals\n",
|
||||
"query = \"What are some capitals in Europe?\"\n",
|
||||
"results = retriever.invoke(query)\n",
|
||||
"\n",
|
||||
"print(f\"Query: {query}\")\n",
|
||||
"print(f\"Top {len(results)} results:\")\n",
|
||||
"for i, doc in enumerate(results):\n",
|
||||
" print(f\"{i+1}. {doc.page_content} (Country: {doc.metadata['country']})\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72f31d5a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using get_relevant_documents\n",
|
||||
"\n",
|
||||
"You can also use the `get_relevant_documents` method directly (though `invoke` is the preferred interface):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "e7b7170d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Query: What are the capitals in Asia?\n",
|
||||
"Top 3 results:\n",
|
||||
"1. Beijing is the capital of China (Country: China)\n",
|
||||
"2. Tokyo is the capital of Japan (Country: Japan)\n",
|
||||
"3. Canberra is the capital of Australia (Country: Australia)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Query for Asian countries\n",
|
||||
"query = \"What are the capitals in Asia?\"\n",
|
||||
"results = retriever.get_relevant_documents(query)\n",
|
||||
"\n",
|
||||
"print(f\"Query: {query}\")\n",
|
||||
"print(f\"Top {len(results)} results:\")\n",
|
||||
"for i, doc in enumerate(results):\n",
|
||||
" print(f\"{i+1}. {doc.page_content} (Country: {doc.metadata['country']})\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8d6a31c2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Customizing Number of Results\n",
|
||||
"\n",
|
||||
"You can adjust the number of results at query time by passing `k` as a parameter:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "5d81af33",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Query: Where is France?\n",
|
||||
"Top 1 result:\n",
|
||||
"1. Paris is the capital of France (Country: France)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Query for a specific country, with custom k\n",
|
||||
"query = \"Where is France?\"\n",
|
||||
"results = retriever.invoke(query, k=1) # Override default k\n",
|
||||
"\n",
|
||||
"print(f\"Query: {query}\")\n",
|
||||
"print(f\"Top {len(results)} result:\")\n",
|
||||
"for i, doc in enumerate(results):\n",
|
||||
" print(f\"{i+1}. {doc.page_content} (Country: {doc.metadata['country']})\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3e8a40f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Async Support\n",
|
||||
"\n",
|
||||
"NebiusRetriever supports async operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "8fc36122",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Async query: What are some capital cities?\n",
|
||||
"Top 3 results:\n",
|
||||
"1. Washington DC is the capital of the United States (Country: USA)\n",
|
||||
"2. Canberra is the capital of Australia (Country: Australia)\n",
|
||||
"3. Paris is the capital of France (Country: France)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def retrieve_async():\n",
|
||||
" query = \"What are some capital cities?\"\n",
|
||||
" results = await retriever.ainvoke(query)\n",
|
||||
"\n",
|
||||
" print(f\"Async query: {query}\")\n",
|
||||
" print(f\"Top {len(results)} results:\")\n",
|
||||
" for i, doc in enumerate(results):\n",
|
||||
" print(f\"{i+1}. {doc.page_content} (Country: {doc.metadata['country']})\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await retrieve_async()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d5bc71e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Handling Empty Documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "123da4fb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of results: 0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a retriever with empty documents\n",
|
||||
"empty_retriever = NebiusRetriever(\n",
|
||||
" embeddings=embeddings,\n",
|
||||
" docs=[],\n",
|
||||
" k=2, # Empty document list\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Test the retriever with empty docs\n",
|
||||
"results = empty_retriever.invoke(\"What are the capitals of European countries?\")\n",
|
||||
"print(f\"Number of results: {len(results)}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9db2f342",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within a chain\n",
|
||||
"\n",
|
||||
"NebiusRetriever works seamlessly in LangChain RAG pipelines. Here's an example of creating a simple RAG chain with the NebiusRetriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "e1e8c9f2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Based on the context provided, three European capitals are:\n",
|
||||
"\n",
|
||||
"1. Paris\n",
|
||||
"2. Berlin\n",
|
||||
"3. Rome\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_nebius import ChatNebius\n",
|
||||
"\n",
|
||||
"# Initialize LLM\n",
|
||||
"llm = ChatNebius(model=\"meta-llama/Llama-3.3-70B-Instruct-fast\")\n",
|
||||
"\n",
|
||||
"# Create a prompt template\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"\"\"\n",
|
||||
"Answer the question based only on the following context:\n",
|
||||
"\n",
|
||||
"Context:\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Format documents function\n",
|
||||
"def format_docs(docs):\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create RAG chain\n",
|
||||
"rag_chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Run the chain\n",
|
||||
"answer = rag_chain.invoke(\"What are three European capitals?\")\n",
|
||||
"print(answer)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b3a6f2c4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Creating a Search Tool\n",
|
||||
"\n",
|
||||
"You can use the `NebiusRetrievalTool` to create a tool for agents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "784d53c4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tool results:\n",
|
||||
"Document 1:\n",
|
||||
"Paris is the capital of France\n",
|
||||
"\n",
|
||||
"Document 2:\n",
|
||||
"Berlin is the capital of Germany\n",
|
||||
"\n",
|
||||
"Document 3:\n",
|
||||
"Rome is the capital of Italy\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_nebius import NebiusRetrievalTool\n",
|
||||
"\n",
|
||||
"# Create a retrieval tool\n",
|
||||
"tool = NebiusRetrievalTool(\n",
|
||||
" retriever=retriever,\n",
|
||||
" name=\"capital_search\",\n",
|
||||
" description=\"Search for information about capital cities around the world\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the tool\n",
|
||||
"result = tool.invoke({\"query\": \"capitals in Europe\", \"k\": 3})\n",
|
||||
"print(\"Tool results:\")\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5a4a3453",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## How It Works\n",
|
||||
"\n",
|
||||
"The NebiusRetriever works by:\n",
|
||||
"\n",
|
||||
"1. During initialization:\n",
|
||||
" - It stores the provided documents\n",
|
||||
" - It uses the provided NebiusEmbeddings to compute embeddings for all documents\n",
|
||||
" - These embeddings are stored in memory for quick retrieval\n",
|
||||
"\n",
|
||||
"2. During retrieval (`invoke` or `get_relevant_documents`):\n",
|
||||
" - It embeds the query using the same embedding model\n",
|
||||
" - It computes similarity scores between the query embedding and all document embeddings\n",
|
||||
" - It returns the top-k documents sorted by similarity\n",
|
||||
"\n",
|
||||
"This approach is efficient for medium-sized document collections, as it avoids the need for a separate vector database while still providing high-quality semantic search."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f7a35f40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more details about the Nebius AI Studio API, visit the [Nebius AI Studio Documentation](https://studio.nebius.com/api-reference)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96439983",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -135,14 +135,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ibm import WatsonxEmbeddings\n",
|
||||
"\n",
|
||||
"watsonx_embedding = WatsonxEmbeddings(\n",
|
||||
" model_id=\"ibm/slate-125m-english-rtrvr\",\n",
|
||||
" model_id=\"ibm/granite-embedding-107m-multilingual\",\n",
|
||||
" url=\"https://us-south.ml.cloud.ibm.com\",\n",
|
||||
" project_id=\"PASTE YOUR PROJECT_ID HERE\",\n",
|
||||
" params=embed_params,\n",
|
||||
@@ -163,7 +163,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"watsonx_embedding = WatsonxEmbeddings(\n",
|
||||
" model_id=\"ibm/slate-125m-english-rtrvr\",\n",
|
||||
" model_id=\"ibm/granite-embedding-107m-multilingual\",\n",
|
||||
" url=\"PASTE YOUR URL HERE\",\n",
|
||||
" username=\"PASTE YOUR USERNAME HERE\",\n",
|
||||
" password=\"PASTE YOUR PASSWORD HERE\",\n",
|
||||
@@ -192,7 +192,7 @@
|
||||
"api_client = APIClient(...)\n",
|
||||
"\n",
|
||||
"watsonx_embedding = WatsonxEmbeddings(\n",
|
||||
" model_id=\"ibm/slate-125m-english-rtrvr\",\n",
|
||||
" model_id=\"ibm/granite-embedding-107m-multilingual\",\n",
|
||||
" watsonx_client=api_client,\n",
|
||||
")"
|
||||
]
|
||||
@@ -341,7 +341,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.14"
|
||||
"version": "3.11.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
447
docs/docs/integrations/text_embedding/nebius.ipynb
Normal file
447
docs/docs/integrations/text_embedding/nebius.ipynb
Normal file
@@ -0,0 +1,447 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Nebius\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2970dd75-8ebf-4b51-8282-9b454b8f356d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Nebius Text Embeddings\n",
|
||||
"\n",
|
||||
"[Nebius AI Studio](https://studio.nebius.ai/) provides API access to high-quality embedding models through a unified interface. The Nebius embedding models convert text into numerical vectors that capture semantic meaning, making them useful for various applications like semantic search, clustering, and recommendations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "overview-section",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"The `NebiusEmbeddings` class provides access to Nebius AI Studio's embedding models through LangChain. These embeddings can be used for semantic search, document similarity, and other NLP tasks requiring vector representations of text.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- **Provider**: Nebius AI Studio\n",
|
||||
"- **Model Types**: Text embedding models\n",
|
||||
"- **Primary Use Case**: Generate vector representations of text for semantic similarity and retrieval\n",
|
||||
"- **Available Models**: Various embedding models including BAAI/bge-en-icl and others\n",
|
||||
"- **Dimensions**: Varies by model (typically 1024-4096 dimensions)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "setup-section",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The Nebius integration can be installed via pip:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1ecdb29d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade langchain-nebius"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "89883202",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Nebius requires an API key that can be passed as an initialization parameter `api_key` or set as the environment variable `NEBIUS_API_KEY`. You can obtain an API key by creating an account on [Nebius AI Studio](https://studio.nebius.ai/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "637bb53f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Make sure you've set your API key as an environment variable\n",
|
||||
"if \"NEBIUS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"NEBIUS_API_KEY\"] = getpass.getpass(\"Enter your Nebius API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "instantiation-section",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"The `NebiusEmbeddings` class can be instantiated with optional parameters for the API key and model name:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "37e9dc05",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_nebius import NebiusEmbeddings\n",
|
||||
"\n",
|
||||
"# Initialize the embeddings model\n",
|
||||
"embeddings = NebiusEmbeddings(\n",
|
||||
" # api_key=\"YOUR_API_KEY\", # You can pass the API key directly\n",
|
||||
" model=\"BAAI/bge-en-icl\" # The default embedding model\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "available-models",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Available Models\n",
|
||||
"\n",
|
||||
"The list of supported models is available at https://studio.nebius.com/?modality=embedding"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "indexing-retrieval-section",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both for indexing data and later retrieving it. The following example demonstrates how to use `NebiusEmbeddings` with a vector store for document retrieval."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "123da4fb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Search results for query: How does the brain influence AI?\n",
|
||||
"Result 1: Neural networks are inspired by the human brain's structure\n",
|
||||
"Result 2: Deep learning uses neural networks with many layers\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# Prepare documents\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Machine learning algorithms build mathematical models based on sample data\"\n",
|
||||
" ),\n",
|
||||
" Document(page_content=\"Deep learning uses neural networks with many layers\"),\n",
|
||||
" Document(page_content=\"Climate change is a major global environmental challenge\"),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Neural networks are inspired by the human brain's structure\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Create vector store\n",
|
||||
"vector_store = FAISS.from_documents(docs, embeddings)\n",
|
||||
"\n",
|
||||
"# Perform similarity search\n",
|
||||
"query = \"How does the brain influence AI?\"\n",
|
||||
"results = vector_store.similarity_search(query, k=2)\n",
|
||||
"\n",
|
||||
"print(\"Search results for query:\", query)\n",
|
||||
"for i, doc in enumerate(results):\n",
|
||||
" print(f\"Result {i+1}: {doc.page_content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "in-memory-vectorstore",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using with InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"You can also use the `InMemoryVectorStore` for lightweight applications:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "vectorstore-example",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Retrieved document: LangChain is a framework for developing applications powered by language models\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"# Create a sample text\n",
|
||||
"text = \"LangChain is a framework for developing applications powered by language models\"\n",
|
||||
"\n",
|
||||
"# Create a vector store\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve similar documents\n",
|
||||
"docs = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"print(f\"Retrieved document: {docs[0].page_content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "direct-usage-section",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"You can directly use the `NebiusEmbeddings` class to generate embeddings for text without using a vector store."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5a731d2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embedding a Single Text\n",
|
||||
"\n",
|
||||
"You can use the `embed_query` method to embed a single piece of text:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "3ed26f78",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Embedding dimension: 4096\n",
|
||||
"First few values: [0.007419586181640625, 0.002246856689453125, 0.00193023681640625, -0.0066070556640625, -0.0179901123046875]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is machine learning?\"\n",
|
||||
"query_embedding = embeddings.embed_query(query)\n",
|
||||
"\n",
|
||||
"# Check the embedding dimension\n",
|
||||
"print(f\"Embedding dimension: {len(query_embedding)}\")\n",
|
||||
"print(f\"First few values: {query_embedding[:5]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72f31d5a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embedding Multiple Texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts at once using the `embed_documents` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e7b7170d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of document embeddings: 3\n",
|
||||
"Each embedding has 4096 dimensions\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"documents = [\n",
|
||||
" \"Machine learning is a branch of artificial intelligence\",\n",
|
||||
" \"Deep learning is a subfield of machine learning\",\n",
|
||||
" \"Natural language processing deals with interactions between computers and human language\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"document_embeddings = embeddings.embed_documents(documents)\n",
|
||||
"\n",
|
||||
"# Check the results\n",
|
||||
"print(f\"Number of document embeddings: {len(document_embeddings)}\")\n",
|
||||
"print(f\"Each embedding has {len(document_embeddings[0])} dimensions\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3e8a40f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Async Support\n",
|
||||
"\n",
|
||||
"NebiusEmbeddings supports async operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "8fc36122",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Async query embedding dimension: 4096\n",
|
||||
"Async document embeddings count: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def generate_embeddings_async():\n",
|
||||
" # Embed a single query\n",
|
||||
" query_result = await embeddings.aembed_query(\"What is the capital of France?\")\n",
|
||||
" print(f\"Async query embedding dimension: {len(query_result)}\")\n",
|
||||
"\n",
|
||||
" # Embed multiple documents\n",
|
||||
" docs = [\n",
|
||||
" \"Paris is the capital of France\",\n",
|
||||
" \"Berlin is the capital of Germany\",\n",
|
||||
" \"Rome is the capital of Italy\",\n",
|
||||
" ]\n",
|
||||
" docs_result = await embeddings.aembed_documents(docs)\n",
|
||||
" print(f\"Async document embeddings count: {len(docs_result)}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await generate_embeddings_async()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4aa82e17",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Document Similarity Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "7e78e429",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document Similarity Matrix:\n",
|
||||
"Document 1: ['1.0000', '0.8282', '0.5811', '0.7985']\n",
|
||||
"Document 2: ['0.8282', '1.0000', '0.5897', '0.8315']\n",
|
||||
"Document 3: ['0.5811', '0.5897', '1.0000', '0.5918']\n",
|
||||
"Document 4: ['0.7985', '0.8315', '0.5918', '1.0000']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"from scipy.spatial.distance import cosine\n",
|
||||
"\n",
|
||||
"# Create some documents\n",
|
||||
"documents = [\n",
|
||||
" \"Machine learning algorithms build mathematical models based on sample data\",\n",
|
||||
" \"Deep learning uses neural networks with many layers\",\n",
|
||||
" \"Climate change is a major global environmental challenge\",\n",
|
||||
" \"Neural networks are inspired by the human brain's structure\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Embed the documents\n",
|
||||
"embeddings_list = embeddings.embed_documents(documents)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Function to calculate similarity\n",
|
||||
"def calculate_similarity(embedding1, embedding2):\n",
|
||||
" return 1 - cosine(embedding1, embedding2)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Print similarity matrix\n",
|
||||
"print(\"Document Similarity Matrix:\")\n",
|
||||
"for i, emb_i in enumerate(embeddings_list):\n",
|
||||
" similarities = []\n",
|
||||
" for j, emb_j in enumerate(embeddings_list):\n",
|
||||
" similarity = calculate_similarity(emb_i, emb_j)\n",
|
||||
" similarities.append(f\"{similarity:.4f}\")\n",
|
||||
" print(f\"Document {i+1}: {similarities}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f7a35f40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For more details about the Nebius AI Studio API, visit the [Nebius AI Studio Documentation](https://studio.nebius.ai/docs/api-reference)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eb1eb70d",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -55,7 +55,9 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -108,7 +110,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -127,7 +129,7 @@
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/rag/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
@@ -139,14 +141,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"LangChain is the framework for building context-aware reasoning applications\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@@ -166,8 +165,8 @@
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
"# Show the retrieved document's content\n",
|
||||
"print(retrieved_documents[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -252,7 +251,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -266,7 +265,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
"version": "3.13.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -119,7 +119,7 @@
|
||||
"id": "2eb1b45b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"and use the `create_react_agent` functionality to initialize a ReAct agent. You will also need to set up your OPEN_API_KEY (visit https://platform.openai.com) in order to access OpenAI's chat models."
|
||||
"and use the `create_react_agent` functionality to initialize a ReAct agent. You will also need to set up your OPENAI_API_KEY (visit https://platform.openai.com) in order to access OpenAI's chat models."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -64,7 +64,7 @@
|
||||
"source": [
|
||||
"## Adding text\n",
|
||||
"\n",
|
||||
"For adding text to your datastore first you have to go to [Datastores](https://console.llmrails.com/datastores) page and create one. Click Create Datastore button and choose a name and embedding model for your datastore. Then get your datastore id from newly created datatore settings.\n",
|
||||
"For adding text to your datastore first you have to go to [Datastores](https://console.llmrails.com/datastores) page and create one. Click Create Datastore button and choose a name and embedding model for your datastore. Then get your datastore id from newly created datastore settings.\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
|
||||
@@ -461,7 +461,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"('From: bil@okcforum.osrhe.edu (Bill Conner)\\nSubject: Re: Not the Omni!\\nNntp-Posting-Host: okcforum.osrhe.edu\\nOrganization: Okcforum Unix Users Group\\nX-Newsreader: TIN [version 1.1 PL6]\\nLines: 18\\n\\nCharley Wingate (mangoe@cs.umd.edu) wrote:\\n: \\n: >> Please enlighten me. How is omnipotence contradictory?\\n: \\n: >By definition, all that can occur in the universe is governed by the rules\\n: >of nature. Thus god cannot break them. Anything that god does must be allowed\\n: >in the rules somewhere. Therefore, omnipotence CANNOT exist! It contradicts\\n: >the rules of nature.\\n: \\n: Obviously, an omnipotent god can change the rules.\\n\\nWhen you say, \"By definition\", what exactly is being defined;\\ncertainly not omnipotence. You seem to be saying that the \"rules of\\nnature\" are pre-existant somehow, that they not only define nature but\\nactually cause it. If that\\'s what you mean I\\'d like to hear your\\nfurther thoughts on the question.\\n\\nBill\\n',\n",
|
||||
"('From: bil@okcforum.osrhe.edu (Bill Conner)\\nSubject: Re: Not the Omni!\\nNntp-Posting-Host: okcforum.osrhe.edu\\nOrganization: Okcforum Unix Users Group\\nX-Newsreader: TIN [version 1.1 PL6]\\nLines: 18\\n\\nCharley Wingate (mangoe@cs.umd.edu) wrote:\\n: \\n: >> Please enlighten me. How is omnipotence contradictory?\\n: \\n: >By definition, all that can occur in the universe is governed by the rules\\n: >of nature. Thus god cannot break them. Anything that god does must be allowed\\n: >in the rules somewhere. Therefore, omnipotence CANNOT exist! It contradicts\\n: >the rules of nature.\\n: \\n: Obviously, an omnipotent god can change the rules.\\n\\nWhen you say, \"By definition\", what exactly is being defined;\\ncertainly not omnipotence. You seem to be saying that the \"rules of\\nnature\" are pre-existent somehow, that they not only define nature but\\nactually cause it. If that\\'s what you mean I\\'d like to hear your\\nfurther thoughts on the question.\\n\\nBill\\n',\n",
|
||||
" {'category': 'alt.atheism'})"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -21,9 +21,7 @@
|
||||
"source": [
|
||||
"# Build an Agent\n",
|
||||
"\n",
|
||||
"By themselves, language models can't take actions - they just output text.\n",
|
||||
"A big use case for LangChain is creating **agents**.\n",
|
||||
"[Agents](/docs/concepts/agents) are systems that use [LLMs](/docs/concepts/chat_models) as reasoning engines to determine which actions to take and the inputs necessary to perform the action.\n",
|
||||
"LangChain supports the creation of [agents](/docs/concepts/agents), or systems that use [LLMs](/docs/concepts/chat_models) as reasoning engines to determine which actions to take and the inputs necessary to perform the action.\n",
|
||||
"After executing actions, the results can be fed back into the LLM to determine whether more actions are needed, or whether it is okay to finish. This is often achieved via [tool-calling](/docs/concepts/tool_calling).\n",
|
||||
"\n",
|
||||
"In this tutorial we will build an agent that can interact with a search engine. You will be able to ask this agent questions, watch it call the search tool, and have conversations with it.\n",
|
||||
@@ -43,16 +41,15 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import relevant functionality\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"from langchain_tavily import TavilySearch\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"# Create the agent\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"model = ChatAnthropic(model_name=\"claude-3-sonnet-20240229\")\n",
|
||||
"search = TavilySearchResults(max_results=2)\n",
|
||||
"model = init_chat_model(\"anthropic:claude-3-5-sonnet-latest\")\n",
|
||||
"search = TavilySearch(max_results=2)\n",
|
||||
"tools = [search]\n",
|
||||
"agent_executor = create_react_agent(model, tools, checkpointer=memory)"
|
||||
]
|
||||
@@ -69,20 +66,23 @@
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"hi im bob! and i live in sf\n",
|
||||
"Hi, I'm Bob and I life in SF.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hello Bob! Since you didn't ask a specific question, I don't need to use any tools right now. I'm an AI assistant created by Anthropic to be helpful, honest, and harmless. Feel free to ask me anything and I'll do my best to provide a useful response or look up information using my capabilities.\n"
|
||||
"Hello Bob! I notice you've introduced yourself and mentioned you live in SF (San Francisco), but you haven't asked a specific question or made a request that requires the use of any tools. Is there something specific you'd like to know about San Francisco or any other topic? I'd be happy to help you find information using the available search tools.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Use the agent\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
|
||||
"\n",
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"Hi, I'm Bob and I life in SF.\",\n",
|
||||
"}\n",
|
||||
"for step in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"hi im bob! and i live in sf\")]},\n",
|
||||
" config,\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
" {\"messages\": [input_message]}, config, stream_mode=\"values\"\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
@@ -99,32 +99,40 @@
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"whats the weather where I live?\n",
|
||||
"What's the weather where I live?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"[{'text': 'To get the current weather for your location in San Francisco, I can use the tavily_search_results_json tool:', 'type': 'text'}, {'id': 'toolu_01AKa2MErG1CU3zRiGsvpBud', 'input': {'query': 'san francisco weather'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}]\n",
|
||||
"[{'text': 'Let me search for current weather information in San Francisco.', 'type': 'text'}, {'id': 'toolu_011kSdheoJp8THURoLmeLtZo', 'input': {'query': 'current weather San Francisco CA'}, 'name': 'tavily_search', 'type': 'tool_use'}]\n",
|
||||
"Tool Calls:\n",
|
||||
" tavily_search_results_json (toolu_01AKa2MErG1CU3zRiGsvpBud)\n",
|
||||
" Call ID: toolu_01AKa2MErG1CU3zRiGsvpBud\n",
|
||||
" tavily_search (toolu_011kSdheoJp8THURoLmeLtZo)\n",
|
||||
" Call ID: toolu_011kSdheoJp8THURoLmeLtZo\n",
|
||||
" Args:\n",
|
||||
" query: san francisco weather\n",
|
||||
" query: current weather San Francisco CA\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: tavily_search_results_json\n",
|
||||
"Name: tavily_search\n",
|
||||
"\n",
|
||||
"[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1739994486, 'localtime': '2025-02-19 11:48'}, 'current': {'last_updated_epoch': 1739994300, 'last_updated': '2025-02-19 11:45', 'temp_c': 13.3, 'temp_f': 55.9, 'is_day': 1, 'condition': {'text': 'Light rain', 'icon': '//cdn.weatherapi.com/weather/64x64/day/296.png', 'code': 1183}, 'wind_mph': 5.8, 'wind_kph': 9.4, 'wind_degree': 195, 'wind_dir': 'SSW', 'pressure_mb': 1023.0, 'pressure_in': 30.2, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 87, 'cloud': 100, 'feelslike_c': 12.7, 'feelslike_f': 54.8, 'windchill_c': 9.1, 'windchill_f': 48.4, 'heatindex_c': 10.2, 'heatindex_f': 50.3, 'dewpoint_c': 9.8, 'dewpoint_f': 49.7, 'vis_km': 4.0, 'vis_miles': 2.0, 'uv': 1.4, 'gust_mph': 8.9, 'gust_kph': 14.4}}\"}, {\"url\": \"https://world-weather.info/forecast/usa/san_francisco/february-2025/\", \"content\": \"Weather in San Francisco in February 2025 (California) - Detailed Weather Forecast for a Month Weather World Weather in San Francisco Weather in San Francisco in February 2025 San Francisco Weather Forecast for February 2025, is based on previous years' statistical data. +59°+50° +59°+52° +59°+50° +61°+52° +59°+50° +61°+50° +61°+52° +63°+52° +61°+52° +61°+50° +61°+50° +61°+50° +59°+50° +59°+50° +61°+50° +61°+52° +59°+50° +59°+48° +57°+48° +59°+50° +59°+48° +59°+50° +57°+46° +61°+50° +61°+50° +59°+50° +59°+48° +59°+50° Extended weather forecast in San Francisco HourlyWeek10-Day14-Day30-DayYear Weather in large and nearby cities Weather in Washington, D.C.+41° Sacramento+55° Pleasanton+55° Redwood City+55° San Leandro+55° San Mateo+54° San Rafael+52° San Ramon+52° South San Francisco+54° Vallejo+50° Palo Alto+55° Pacifica+55° Berkeley+54° Castro Valley+55° Concord+52° Daly City+54° Noverd+52° Sign Hill+54° world's temperature today day day Temperature units\"}]\n",
|
||||
"{\"query\": \"current weather San Francisco CA\", \"follow_up_questions\": null, \"answer\": null, \"images\": [], \"results\": [{\"title\": \"Weather in San Francisco, CA\", \"url\": \"https://www.weatherapi.com/\", \"content\": \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1750168606, 'localtime': '2025-06-17 06:56'}, 'current': {'last_updated_epoch': 1750167900, 'last_updated': '2025-06-17 06:45', 'temp_c': 11.7, 'temp_f': 53.1, 'is_day': 1, 'condition': {'text': 'Fog', 'icon': '//cdn.weatherapi.com/weather/64x64/day/248.png', 'code': 1135}, 'wind_mph': 4.0, 'wind_kph': 6.5, 'wind_degree': 215, 'wind_dir': 'SW', 'pressure_mb': 1017.0, 'pressure_in': 30.02, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 86, 'cloud': 0, 'feelslike_c': 11.3, 'feelslike_f': 52.4, 'windchill_c': 8.7, 'windchill_f': 47.7, 'heatindex_c': 9.8, 'heatindex_f': 49.7, 'dewpoint_c': 9.6, 'dewpoint_f': 49.2, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 0.0, 'gust_mph': 6.3, 'gust_kph': 10.2}}\", \"score\": 0.944705, \"raw_content\": null}, {\"title\": \"Weather in San Francisco in June 2025\", \"url\": \"https://world-weather.info/forecast/usa/san_francisco/june-2025/\", \"content\": \"Detailed ⚡ San Francisco Weather Forecast for June 2025 - day/night 🌡️ temperatures, precipitations - World-Weather.info. Add the current city. Search. Weather; Archive; Weather Widget °F. World; United States; California; Weather in San Francisco; ... 17 +64° +54° 18 +61° +54° 19\", \"score\": 0.86441374, \"raw_content\": null}], \"response_time\": 2.34}\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The search results provide the current weather conditions and forecast for San Francisco. According to the data from WeatherAPI, the current temperature in San Francisco is around 55°F (13°C) with light rain and winds around 6 mph. The extended forecast shows temperatures ranging from the upper 40s to low 60s Fahrenheit over the next few weeks.\n",
|
||||
"Based on the search results, here's the current weather in San Francisco:\n",
|
||||
"- Temperature: 53.1°F (11.7°C)\n",
|
||||
"- Condition: Foggy\n",
|
||||
"- Wind: 4.0 mph from the Southwest\n",
|
||||
"- Humidity: 86%\n",
|
||||
"- Visibility: 9 miles\n",
|
||||
"\n",
|
||||
"So in summary, it's a cool, rainy day currently in San Francisco where you live, Bob. Let me know if you need any other details about the weather there!\n"
|
||||
"This is quite typical weather for San Francisco, with the characteristic fog that the city is known for. Would you like to know anything else about the weather or San Francisco in general?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"What's the weather where I live?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"whats the weather where I live?\")]},\n",
|
||||
" config,\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
" {\"messages\": [input_message]}, config, stream_mode=\"values\"\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
@@ -154,7 +162,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U langchain-community langgraph langchain-anthropic tavily-python langgraph-checkpoint-sqlite"
|
||||
"%pip install -U langgraph langchain-tavily langgraph-checkpoint-sqlite"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -213,28 +221,28 @@
|
||||
"source": [
|
||||
"## Define tools\n",
|
||||
"\n",
|
||||
"We first need to create the tools we want to use. Our main tool of choice will be [Tavily](/docs/integrations/tools/tavily_search) - a search engine. We have a built-in tool in LangChain to easily use Tavily search engine as tool.\n"
|
||||
"We first need to create the tools we want to use. Our main tool of choice will be [Tavily](/docs/integrations/tools/tavily_search) - a search engine. We can use the dedicated [langchain-tavily](https://pypi.org/project/langchain-tavily/) [integration package](/docs/concepts/architecture/#integration-packages) to easily use Tavily search engine as tool with LangChain.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "002e23b1-fdf9-46e9-82d9-f467abdd3f35",
|
||||
"execution_count": 5,
|
||||
"id": "76a02d36-6ea2-4e62-88b4-6c480dd9c04f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'url': 'https://www.weatherapi.com/', 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1739993250, 'localtime': '2025-02-19 11:27'}, 'current': {'last_updated_epoch': 1739992500, 'last_updated': '2025-02-19 11:15', 'temp_c': 13.3, 'temp_f': 55.9, 'is_day': 1, 'condition': {'text': 'Light rain', 'icon': '//cdn.weatherapi.com/weather/64x64/day/296.png', 'code': 1183}, 'wind_mph': 5.8, 'wind_kph': 9.4, 'wind_degree': 195, 'wind_dir': 'SSW', 'pressure_mb': 1023.0, 'pressure_in': 30.2, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 87, 'cloud': 100, 'feelslike_c': 12.7, 'feelslike_f': 54.8, 'windchill_c': 9.1, 'windchill_f': 48.4, 'heatindex_c': 10.2, 'heatindex_f': 50.3, 'dewpoint_c': 9.8, 'dewpoint_f': 49.7, 'vis_km': 4.0, 'vis_miles': 2.0, 'uv': 1.4, 'gust_mph': 8.9, 'gust_kph': 14.4}}\"}, {'url': 'https://weathershogun.com/weather/usa/ca/san-francisco/480/february/2025-02-19', 'content': 'San Francisco, California Weather: Wednesday, February 19, 2025. Cloudy weather, overcast skies with clouds. Day 61°. Night 43°.'}]\n"
|
||||
"{'query': 'What is the weather in SF', 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [{'title': 'Weather in San Francisco, CA', 'url': 'https://www.weatherapi.com/', 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1750168606, 'localtime': '2025-06-17 06:56'}, 'current': {'last_updated_epoch': 1750167900, 'last_updated': '2025-06-17 06:45', 'temp_c': 11.7, 'temp_f': 53.1, 'is_day': 1, 'condition': {'text': 'Fog', 'icon': '//cdn.weatherapi.com/weather/64x64/day/248.png', 'code': 1135}, 'wind_mph': 4.0, 'wind_kph': 6.5, 'wind_degree': 215, 'wind_dir': 'SW', 'pressure_mb': 1017.0, 'pressure_in': 30.02, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 86, 'cloud': 0, 'feelslike_c': 11.3, 'feelslike_f': 52.4, 'windchill_c': 8.7, 'windchill_f': 47.7, 'heatindex_c': 9.8, 'heatindex_f': 49.7, 'dewpoint_c': 9.6, 'dewpoint_f': 49.2, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 0.0, 'gust_mph': 6.3, 'gust_kph': 10.2}}\", 'score': 0.9185379, 'raw_content': None}, {'title': 'Weather in San Francisco in June 2025', 'url': 'https://world-weather.info/forecast/usa/san_francisco/june-2025/', 'content': \"Weather in San Francisco in June 2025 (California) - Detailed Weather Forecast for a Month * Weather in San Francisco Weather in San Francisco in June 2025 * 1 +63° +55° * 2 +66° +54° * 3 +66° +55° * 4 +66° +54° * 5 +66° +55° * 6 +66° +57° * 7 +64° +55° * 8 +63° +55° * 9 +63° +54° * 10 +59° +54° * 11 +59° +54° * 12 +61° +54° Weather in Washington, D.C.**+68°** Sacramento**+81°** Pleasanton**+72°** Redwood City**+68°** San Leandro**+61°** San Mateo**+64°** San Rafael**+70°** San Ramon**+64°** South San Francisco**+61°** Daly City**+59°** Wilder**+66°** Woodacre**+70°** world's temperature today Colchani day+50°F night+16°F Az Zubayr day+124°F night+93°F Weather forecast on your site Install _San Francisco_ +61° Temperature units\", 'score': 0.7978881, 'raw_content': None}], 'response_time': 2.62}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
||||
"from langchain_tavily import TavilySearch\n",
|
||||
"\n",
|
||||
"search = TavilySearchResults(max_results=2)\n",
|
||||
"search_results = search.invoke(\"what is the weather in SF\")\n",
|
||||
"search = TavilySearch(max_results=2)\n",
|
||||
"search_results = search.invoke(\"What is the weather in SF\")\n",
|
||||
"print(search_results)\n",
|
||||
"# If we want, we can create other tools.\n",
|
||||
"# Once we have all the tools we want, we can put them in a list that we will reference later.\n",
|
||||
@@ -252,12 +260,12 @@
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs overrideParams={{openai: {model: \"gpt-4\"}}} />\n"
|
||||
"<ChatModelTabs overrideParams={{openai: {model: \"gpt-4.1\"}}} />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 6,
|
||||
"id": "69185491",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -267,7 +275,7 @@
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"model = ChatAnthropic(model=\"claude-3-sonnet-20240229\")"
|
||||
"model = ChatAnthropic(model=\"claude-3-5-sonnet-latest\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -280,26 +288,25 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"id": "c96c960b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hi there!'"
|
||||
"'Hello! How can I help you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"response = model.invoke([HumanMessage(content=\"hi!\")])\n",
|
||||
"response.content"
|
||||
"query = \"Hi!\"\n",
|
||||
"response = model.invoke([{\"role\": \"user\", \"content\": query}])\n",
|
||||
"response.text()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -312,7 +319,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"id": "ba692a74",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -330,7 +337,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 11,
|
||||
"id": "b6a7e925",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -338,16 +345,20 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ContentString: Hello!\n",
|
||||
"ToolCalls: []\n"
|
||||
"Message content: Hello! I'm here to help you. I have access to a powerful search tool that can help answer questions and find information about various topics. What would you like to know about?\n",
|
||||
"\n",
|
||||
"Feel free to ask any question or request information, and I'll do my best to assist you using the available tools.\n",
|
||||
"\n",
|
||||
"Tool calls: []\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = model_with_tools.invoke([HumanMessage(content=\"Hi!\")])\n",
|
||||
"query = \"Hi!\"\n",
|
||||
"response = model_with_tools.invoke([{\"role\": \"user\", \"content\": query}])\n",
|
||||
"\n",
|
||||
"print(f\"ContentString: {response.content}\")\n",
|
||||
"print(f\"ToolCalls: {response.tool_calls}\")"
|
||||
"print(f\"Message content: {response.text()}\\n\")\n",
|
||||
"print(f\"Tool calls: {response.tool_calls}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -360,7 +371,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 16,
|
||||
"id": "688b465d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -368,16 +379,18 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ContentString: \n",
|
||||
"ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'weather san francisco'}, 'id': 'toolu_01VTP7DUvSfgtYxsq9x4EwMp'}]\n"
|
||||
"Message content: I'll help you search for information about the weather in San Francisco.\n",
|
||||
"\n",
|
||||
"Tool calls: [{'name': 'tavily_search', 'args': {'query': 'current weather San Francisco'}, 'id': 'toolu_015gdPn1jbB2Z21DmN2RAnti', 'type': 'tool_call'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = model_with_tools.invoke([HumanMessage(content=\"What's the weather in SF?\")])\n",
|
||||
"query = \"Search for the weather in SF\"\n",
|
||||
"response = model_with_tools.invoke([{\"role\": \"user\", \"content\": query}])\n",
|
||||
"\n",
|
||||
"print(f\"ContentString: {response.content}\")\n",
|
||||
"print(f\"ToolCalls: {response.tool_calls}\")"
|
||||
"print(f\"Message content: {response.text()}\\n\")\n",
|
||||
"print(f\"Tool calls: {response.tool_calls}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -413,7 +426,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 13,
|
||||
"id": "89cf72b4-6046-4b47-8f27-5522d8cb8036",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -437,26 +450,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 14,
|
||||
"id": "114ba50d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi!', id='a820fcc5-9b87-457a-9af0-f21768143ee3'),\n",
|
||||
" AIMessage(content='Hello!', response_metadata={'id': 'msg_01VbC493X1VEDyusgttiEr1z', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 264, 'output_tokens': 5}}, id='run-0e0ddae8-a85b-4bd6-947c-c36c857a4698-0', usage_metadata={'input_tokens': 264, 'output_tokens': 5, 'total_tokens': 269})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Hi!\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hello! I'm here to help you with your questions using the available search tools. Please feel free to ask any question, and I'll do my best to find relevant and accurate information for you.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = agent_executor.invoke({\"messages\": [HumanMessage(content=\"hi!\")]})\n",
|
||||
"input_message = {\"role\": \"user\", \"content\": \"Hi!\"}\n",
|
||||
"response = agent_executor.invoke({\"messages\": [input_message]})\n",
|
||||
"\n",
|
||||
"response[\"messages\"]"
|
||||
"for message in response[\"messages\"]:\n",
|
||||
" message.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -471,29 +487,48 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 15,
|
||||
"id": "77c2f769",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='whats the weather in sf?', id='1d6c96bb-4ddb-415c-a579-a07d5264de0d'),\n",
|
||||
" AIMessage(content=[{'id': 'toolu_01Y5EK4bw2LqsQXeaUv8iueF', 'input': {'query': 'weather in san francisco'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}], response_metadata={'id': 'msg_0132wQUcEduJ8UKVVVqwJzM4', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 269, 'output_tokens': 61}}, id='run-26d5e5e8-d4fd-46d2-a197-87b95b10e823-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'weather in san francisco'}, 'id': 'toolu_01Y5EK4bw2LqsQXeaUv8iueF'}], usage_metadata={'input_tokens': 269, 'output_tokens': 61, 'total_tokens': 330}),\n",
|
||||
" ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.78, \\'lon\\': -122.42, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1717238703, \\'localtime\\': \\'2024-06-01 3:45\\'}, \\'current\\': {\\'last_updated_epoch\\': 1717237800, \\'last_updated\\': \\'2024-06-01 03:30\\', \\'temp_c\\': 12.0, \\'temp_f\\': 53.6, \\'is_day\\': 0, \\'condition\\': {\\'text\\': \\'Mist\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/night/143.png\\', \\'code\\': 1030}, \\'wind_mph\\': 5.6, \\'wind_kph\\': 9.0, \\'wind_degree\\': 310, \\'wind_dir\\': \\'NW\\', \\'pressure_mb\\': 1013.0, \\'pressure_in\\': 29.92, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 88, \\'cloud\\': 100, \\'feelslike_c\\': 10.5, \\'feelslike_f\\': 50.8, \\'windchill_c\\': 9.3, \\'windchill_f\\': 48.7, \\'heatindex_c\\': 11.1, \\'heatindex_f\\': 51.9, \\'dewpoint_c\\': 8.8, \\'dewpoint_f\\': 47.8, \\'vis_km\\': 6.4, \\'vis_miles\\': 3.0, \\'uv\\': 1.0, \\'gust_mph\\': 12.5, \\'gust_kph\\': 20.1}}\"}, {\"url\": \"https://www.timeanddate.com/weather/usa/san-francisco/hourly\", \"content\": \"Sun & Moon. Weather Today Weather Hourly 14 Day Forecast Yesterday/Past Weather Climate (Averages) Currently: 59 \\\\u00b0F. Passing clouds. (Weather station: San Francisco International Airport, USA). See more current weather.\"}]', name='tavily_search_results_json', id='37aa1fd9-b232-4a02-bd22-bc5b9b44a22c', tool_call_id='toolu_01Y5EK4bw2LqsQXeaUv8iueF'),\n",
|
||||
" AIMessage(content='Based on the search results, here is a summary of the current weather in San Francisco:\\n\\nThe weather in San Francisco is currently misty with a temperature of around 53°F (12°C). There is complete cloud cover and moderate winds from the northwest around 5-9 mph (9-14 km/h). Humidity is high at 88%. Visibility is around 3 miles (6.4 km). \\n\\nThe results provide an hourly forecast as well as current conditions from a couple different weather sources. Let me know if you need any additional details about the San Francisco weather!', response_metadata={'id': 'msg_01BRX9mrT19nBDdHYtR7wJ92', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 920, 'output_tokens': 132}}, id='run-d0325583-3ddc-4432-b2b2-d023eb97660f-0', usage_metadata={'input_tokens': 920, 'output_tokens': 132, 'total_tokens': 1052})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Search for the weather in SF\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"[{'text': \"I'll help you search for weather information in San Francisco. Let me use the search engine to find current weather conditions.\", 'type': 'text'}, {'id': 'toolu_01WWcXGnArosybujpKzdmARZ', 'input': {'query': 'current weather San Francisco SF'}, 'name': 'tavily_search', 'type': 'tool_use'}]\n",
|
||||
"Tool Calls:\n",
|
||||
" tavily_search (toolu_01WWcXGnArosybujpKzdmARZ)\n",
|
||||
" Call ID: toolu_01WWcXGnArosybujpKzdmARZ\n",
|
||||
" Args:\n",
|
||||
" query: current weather San Francisco SF\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: tavily_search\n",
|
||||
"\n",
|
||||
"{\"query\": \"current weather San Francisco SF\", \"follow_up_questions\": null, \"answer\": null, \"images\": [], \"results\": [{\"title\": \"Weather in San Francisco, CA\", \"url\": \"https://www.weatherapi.com/\", \"content\": \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1750168606, 'localtime': '2025-06-17 06:56'}, 'current': {'last_updated_epoch': 1750167900, 'last_updated': '2025-06-17 06:45', 'temp_c': 11.7, 'temp_f': 53.1, 'is_day': 1, 'condition': {'text': 'Fog', 'icon': '//cdn.weatherapi.com/weather/64x64/day/248.png', 'code': 1135}, 'wind_mph': 4.0, 'wind_kph': 6.5, 'wind_degree': 215, 'wind_dir': 'SW', 'pressure_mb': 1017.0, 'pressure_in': 30.02, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 86, 'cloud': 0, 'feelslike_c': 11.3, 'feelslike_f': 52.4, 'windchill_c': 8.7, 'windchill_f': 47.7, 'heatindex_c': 9.8, 'heatindex_f': 49.7, 'dewpoint_c': 9.6, 'dewpoint_f': 49.2, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 0.0, 'gust_mph': 6.3, 'gust_kph': 10.2}}\", \"score\": 0.885373, \"raw_content\": null}, {\"title\": \"Weather in San Francisco in June 2025\", \"url\": \"https://world-weather.info/forecast/usa/san_francisco/june-2025/\", \"content\": \"Detailed ⚡ San Francisco Weather Forecast for June 2025 - day/night 🌡️ temperatures, precipitations - World-Weather.info. Add the current city. Search. Weather; Archive; Weather Widget °F. World; United States; California; Weather in San Francisco; ... 17 +64° +54° 18 +61° +54° 19\", \"score\": 0.8830044, \"raw_content\": null}], \"response_time\": 2.6}\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Based on the search results, here's the current weather in San Francisco:\n",
|
||||
"- Temperature: 53.1°F (11.7°C)\n",
|
||||
"- Conditions: Foggy\n",
|
||||
"- Wind: 4.0 mph from the SW\n",
|
||||
"- Humidity: 86%\n",
|
||||
"- Visibility: 9.0 miles\n",
|
||||
"\n",
|
||||
"The weather appears to be typical for San Francisco, with morning fog and mild temperatures. The \"feels like\" temperature is 52.4°F (11.3°C).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = agent_executor.invoke(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"whats the weather in sf?\")]}\n",
|
||||
")\n",
|
||||
"response[\"messages\"]"
|
||||
"input_message = {\"role\": \"user\", \"content\": \"Search for the weather in SF\"}\n",
|
||||
"response = agent_executor.invoke({\"messages\": [input_message]})\n",
|
||||
"\n",
|
||||
"for message in response[\"messages\"]:\n",
|
||||
" message.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -516,7 +551,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 17,
|
||||
"id": "bd93812b-2350-4d7f-9643-34c753503754",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -526,36 +561,35 @@
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"whats the weather in sf?\n",
|
||||
"Search for the weather in SF\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"[{'text': 'Okay, let me look up the current weather for San Francisco using a search engine:', 'type': 'text'}, {'id': 'toolu_01H1brh5EZpZqtqHBxkosPtN', 'input': {'query': 'san francisco weather'}, 'name': 'tavily_search_results_json', 'type': 'tool_use'}]\n",
|
||||
"[{'text': \"I'll help you search for information about the weather in San Francisco.\", 'type': 'text'}, {'id': 'toolu_01DCPnJES53Fcr7YWnZ47kDG', 'input': {'query': 'current weather San Francisco'}, 'name': 'tavily_search', 'type': 'tool_use'}]\n",
|
||||
"Tool Calls:\n",
|
||||
" tavily_search_results_json (toolu_01H1brh5EZpZqtqHBxkosPtN)\n",
|
||||
" Call ID: toolu_01H1brh5EZpZqtqHBxkosPtN\n",
|
||||
" tavily_search (toolu_01DCPnJES53Fcr7YWnZ47kDG)\n",
|
||||
" Call ID: toolu_01DCPnJES53Fcr7YWnZ47kDG\n",
|
||||
" Args:\n",
|
||||
" query: san francisco weather\n",
|
||||
" query: current weather San Francisco\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: tavily_search_results_json\n",
|
||||
"Name: tavily_search\n",
|
||||
"\n",
|
||||
"[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1739994486, 'localtime': '2025-02-19 11:48'}, 'current': {'last_updated_epoch': 1739994300, 'last_updated': '2025-02-19 11:45', 'temp_c': 13.3, 'temp_f': 55.9, 'is_day': 1, 'condition': {'text': 'Light rain', 'icon': '//cdn.weatherapi.com/weather/64x64/day/296.png', 'code': 1183}, 'wind_mph': 5.8, 'wind_kph': 9.4, 'wind_degree': 195, 'wind_dir': 'SSW', 'pressure_mb': 1023.0, 'pressure_in': 30.2, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 87, 'cloud': 100, 'feelslike_c': 12.7, 'feelslike_f': 54.8, 'windchill_c': 9.1, 'windchill_f': 48.4, 'heatindex_c': 10.2, 'heatindex_f': 50.3, 'dewpoint_c': 9.8, 'dewpoint_f': 49.7, 'vis_km': 4.0, 'vis_miles': 2.0, 'uv': 1.4, 'gust_mph': 8.9, 'gust_kph': 14.4}}\"}, {\"url\": \"https://world-weather.info/forecast/usa/san_francisco/february-2025/\", \"content\": \"Weather in San Francisco in February 2025 (California) - Detailed Weather Forecast for a Month Weather World Weather in San Francisco Weather in San Francisco in February 2025 San Francisco Weather Forecast for February 2025, is based on previous years' statistical data. +59°+50° +59°+52° +59°+50° +61°+52° +59°+50° +61°+50° +61°+52° +63°+52° +61°+52° +61°+50° +61°+50° +61°+50° +59°+50° +59°+50° +61°+50° +61°+52° +59°+50° +59°+48° +57°+48° +59°+50° +59°+48° +59°+50° +57°+46° +61°+50° +61°+50° +59°+50° +59°+48° +59°+50° Extended weather forecast in San Francisco HourlyWeek10-Day14-Day30-DayYear Weather in large and nearby cities Weather in Washington, D.C.+41° Sacramento+55° Pleasanton+55° Redwood City+55° San Leandro+55° San Mateo+54° San Rafael+52° San Ramon+52° South San Francisco+54° Vallejo+50° Palo Alto+55° Pacifica+55° Berkeley+54° Castro Valley+55° Concord+52° Daly City+54° Noverd+52° Sign Hill+54° world's temperature today day day Temperature units\"}]\n",
|
||||
"{\"query\": \"current weather San Francisco\", \"follow_up_questions\": null, \"answer\": null, \"images\": [], \"results\": [{\"title\": \"Weather in San Francisco\", \"url\": \"https://www.weatherapi.com/\", \"content\": \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1750168506, 'localtime': '2025-06-17 06:55'}, 'current': {'last_updated_epoch': 1750167900, 'last_updated': '2025-06-17 06:45', 'temp_c': 11.7, 'temp_f': 53.1, 'is_day': 1, 'condition': {'text': 'Fog', 'icon': '//cdn.weatherapi.com/weather/64x64/day/248.png', 'code': 1135}, 'wind_mph': 4.0, 'wind_kph': 6.5, 'wind_degree': 215, 'wind_dir': 'SW', 'pressure_mb': 1017.0, 'pressure_in': 30.02, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 86, 'cloud': 0, 'feelslike_c': 11.3, 'feelslike_f': 52.4, 'windchill_c': 8.7, 'windchill_f': 47.7, 'heatindex_c': 9.8, 'heatindex_f': 49.7, 'dewpoint_c': 9.6, 'dewpoint_f': 49.2, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 0.0, 'gust_mph': 6.3, 'gust_kph': 10.2}}\", \"score\": 0.9542825, \"raw_content\": null}, {\"title\": \"Weather in San Francisco in June 2025\", \"url\": \"https://world-weather.info/forecast/usa/san_francisco/june-2025/\", \"content\": \"Detailed ⚡ San Francisco Weather Forecast for June 2025 - day/night 🌡️ temperatures, precipitations - World-Weather.info. Add the current city. Search. Weather; Archive; Weather Widget °F. World; United States; California; Weather in San Francisco; ... 17 +64° +54° 18 +61° +54° 19\", \"score\": 0.8638634, \"raw_content\": null}], \"response_time\": 2.57}\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The search results provide details on the current weather conditions and forecast for San Francisco. Some key details:\n",
|
||||
"Based on the search results, here's the current weather in San Francisco:\n",
|
||||
"- Temperature: 53.1°F (11.7°C)\n",
|
||||
"- Condition: Foggy\n",
|
||||
"- Wind: 4.0 mph from the Southwest\n",
|
||||
"- Humidity: 86%\n",
|
||||
"- Visibility: 9.0 miles\n",
|
||||
"- Feels like: 52.4°F (11.3°C)\n",
|
||||
"\n",
|
||||
"- It is lightly raining in San Francisco right now, with a temperature around 55°F/13°C. \n",
|
||||
"- The forecast for the rest of February 2025 shows daytime highs mostly in the upper 50s to low 60s F, with night lows in the upper 40s to low 50s F. \n",
|
||||
"- Typical weather includes some rain, clouds, cool temperatures and breezy conditions.\n",
|
||||
"\n",
|
||||
"So in summary, as is common for San Francisco in late winter, it is currently cool with light rain showers, and similar mild, unsettled weather is expected over the next couple weeks. Layers and a light jacket would be advisable for being outdoors. Let me know if you need any other details!\n"
|
||||
"This is quite typical weather for San Francisco, which is known for its fog, especially during the morning hours. The city's proximity to the ocean and unique geographical features often result in mild temperatures and foggy conditions.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for step in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"whats the weather in sf?\")]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
"for step in agent_executor.stream({\"messages\": [input_message]}, stream_mode=\"values\"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
@@ -579,7 +613,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 18,
|
||||
"id": "63198158-380e-43a3-a2ad-d4288949c1d4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -587,26 +621,21 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I|'ll help you search for information| about the weather in San Francisco.|Base|d on the search results, here|'s the current weather in| San Francisco:\n",
|
||||
"-| Temperature: 53.1°F (|11.7°C)\n",
|
||||
"-| Condition: Foggy\n",
|
||||
"- Wind:| 4.0 mph from| the Southwest\n",
|
||||
"- Humidity|: 86%|\n",
|
||||
"- Visibility: 9|.0 miles\n",
|
||||
"- Pressure: |30.02 in|Hg\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Base|d on the weather| search| results, here| are the key details| about the weather in| San Francisco:|\n",
|
||||
"\n",
|
||||
"- The current temperature| in| San Francisco is aroun|d 55|-|56|°F (13|°|C).| Light| rain is occurring with| |100|% clou|d cover. |\n",
|
||||
"\n",
|
||||
"-| Winds| are aroun|d 5-9| mph from| the south|-southwest.|\n",
|
||||
"\n",
|
||||
"- The| forecast| for| the rest| of February| 2025 |shows da|ytime highs mostly| in the upper| 50s to| low| 60s°|F,| with overnight lows| in| the upper| 40s to| low| 50s°|F.|\n",
|
||||
"\n",
|
||||
"-| Overall|, typical| cool| an|d show|ery late| winter weather is| expected in San Francisco| for the remainder| of February,| with a| mix| of rain| and dry| periods|.| Temperatures will be| season|able| for| this| time of year.|\n",
|
||||
"\n",
|
||||
"So| in summary, San| Francisco is| experiencing light| rain an|d cool| temperatures currently, but| the late| winter forecast| shows typical mil|d and show|ery conditions| pers|isting through the en|d of the| month.| Let| me know if you| need any other| details about| the weather in the| city!|"
|
||||
"The weather| is characteristic of San Francisco, with| foggy conditions and mild temperatures|. The \"feels like\" temperature is slightly| lower at 52.4|°F (11.|3°C)| due to the wind chill effect|.|"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for step, metadata in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"whats the weather in sf?\")]},\n",
|
||||
" stream_mode=\"messages\",\n",
|
||||
" {\"messages\": [input_message]}, stream_mode=\"messages\"\n",
|
||||
"):\n",
|
||||
" if metadata[\"langgraph_node\"] == \"agent\" and (text := step.text()):\n",
|
||||
" print(text, end=\"|\")"
|
||||
@@ -624,7 +653,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 19,
|
||||
"id": "c4073e35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -636,7 +665,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 20,
|
||||
"id": "e64a944e-f9ac-43cf-903c-d3d28d765377",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -648,7 +677,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 21,
|
||||
"id": "a13462d0-2d02-4474-921e-15a1ba1fa274",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -656,22 +685,26 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content=\"Hello Bob! It's nice to meet you again.\", response_metadata={'id': 'msg_013C1z2ZySagEFwmU1EsysR2', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 1162, 'output_tokens': 14}}, id='run-f878acfd-d195-44e8-9166-e2796317e3f8-0', usage_metadata={'input_tokens': 1162, 'output_tokens': 14, 'total_tokens': 1176})]}}\n",
|
||||
"----\n"
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Hi, I'm Bob!\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hello Bob! I'm an AI assistant who can help you search for information using specialized search tools. Is there anything specific you'd like to know about or search for? I'm happy to help you find accurate and up-to-date information on various topics.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"hi im bob!\")]}, config\n",
|
||||
"input_message = {\"role\": \"user\", \"content\": \"Hi, I'm Bob!\"}\n",
|
||||
"for step in agent_executor.stream(\n",
|
||||
" {\"messages\": [input_message]}, config, stream_mode=\"values\"\n",
|
||||
"):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")"
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 22,
|
||||
"id": "56d8028b-5dbc-40b2-86f5-ed60631d86a3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -679,17 +712,21 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='You mentioned your name is Bob when you introduced yourself earlier. So your name is Bob.', response_metadata={'id': 'msg_01WNwnRNGwGDRw6vRdivt6i1', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 1184, 'output_tokens': 21}}, id='run-f5c0b957-8878-405a-9d4b-a7cd38efe81f-0', usage_metadata={'input_tokens': 1184, 'output_tokens': 21, 'total_tokens': 1205})]}}\n",
|
||||
"----\n"
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What's my name?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Your name is Bob, as you introduced yourself earlier. I can remember information shared within our conversation without needing to search for it.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"whats my name?\")]}, config\n",
|
||||
"input_message = {\"role\": \"user\", \"content\": \"What's my name?\"}\n",
|
||||
"for step in agent_executor.stream(\n",
|
||||
" {\"messages\": [input_message]}, config, stream_mode=\"values\"\n",
|
||||
"):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")"
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -710,7 +747,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 23,
|
||||
"id": "24460239",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -718,18 +755,24 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content=\"I'm afraid I don't actually know your name. As an AI assistant without personal information about you, I don't have a specific name associated with our conversation.\", response_metadata={'id': 'msg_01NoaXNNYZKSoBncPcLkdcbo', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 267, 'output_tokens': 36}}, id='run-c9f7df3d-525a-4d8f-bbcf-a5b4a5d2e4b0-0', usage_metadata={'input_tokens': 267, 'output_tokens': 36, 'total_tokens': 303})]}}\n",
|
||||
"----\n"
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What's my name?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"I apologize, but I don't have access to any tools that would tell me your name. I can only assist you with searching for publicly available information using the tavily_search function. I don't have access to personal information about users. If you'd like to tell me your name, I'll be happy to address you by it.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# highlight-next-line\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"xyz123\"}}\n",
|
||||
"for chunk in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"whats my name?\")]}, config\n",
|
||||
"\n",
|
||||
"input_message = {\"role\": \"user\", \"content\": \"What's my name?\"}\n",
|
||||
"for step in agent_executor.stream(\n",
|
||||
" {\"messages\": [input_message]}, config, stream_mode=\"values\"\n",
|
||||
"):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")"
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U --quiet langgraph langchain-openai langchain-community tiktoken"
|
||||
"%pip install -U --quiet langgraph langchain-openai langchain-tavily tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -76,7 +76,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"id": "dab4e96a-8a90-4df9-8818-5a6edf5805d7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -85,7 +85,6 @@
|
||||
"from typing import List, Literal, Optional\n",
|
||||
"\n",
|
||||
"import tiktoken\n",
|
||||
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.embeddings import Embeddings\n",
|
||||
"from langchain_core.messages import get_buffer_string\n",
|
||||
@@ -95,6 +94,7 @@
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain_openai.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_tavily import TavilySearch\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import END, START, MessagesState, StateGraph\n",
|
||||
"from langgraph.prebuilt import ToolNode"
|
||||
@@ -200,7 +200,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = TavilySearchResults(max_results=1)\n",
|
||||
"search = TavilySearch(max_results=1)\n",
|
||||
"tools = [save_recall_memory, search_recall_memories, search]"
|
||||
]
|
||||
},
|
||||
@@ -1074,7 +1074,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -190,7 +190,7 @@ def get_vectorstore_table():
|
||||
"similarity_search_by_vector": True,
|
||||
"similarity_search_with_score": True,
|
||||
"asearch": True,
|
||||
"Passes Standard Tests": False,
|
||||
"Passes Standard Tests": True,
|
||||
"Multi Tenancy": False,
|
||||
"Local/Cloud": "Local",
|
||||
"IDs in add Documents": True,
|
||||
|
||||
@@ -338,91 +338,91 @@ const FEATURE_TABLES = {
|
||||
items:[
|
||||
{
|
||||
name: "AzureOpenAI",
|
||||
link: "azureopenai",
|
||||
link: "/docs/integrations/text_embedding/azureopenai",
|
||||
package: "langchain-openai",
|
||||
apiLink: "https://python.langchain.com/api_reference/openai/embeddings/langchain_openai.embeddings.azure.AzureOpenAIEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "Ollama",
|
||||
link: "ollama",
|
||||
link: "/docs/integrations/text_embedding/ollama",
|
||||
package: "langchain-ollama",
|
||||
apiLink: "https://python.langchain.com/api_reference/ollama/embeddings/langchain_ollama.embeddings.OllamaEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "AI21",
|
||||
link: "ai21",
|
||||
link: "/docs/integrations/text_embedding/ai21",
|
||||
package: "langchain-ai21",
|
||||
apiLink: "https://python.langchain.com/api_reference/ai21/embeddings/langchain_ai21.embeddings.AI21Embeddings.html"
|
||||
},
|
||||
{
|
||||
name: "Fake",
|
||||
link: "fake",
|
||||
link: "/docs/integrations/text_embedding/fake",
|
||||
package: "langchain-core",
|
||||
apiLink: "https://python.langchain.com/api_reference/core/embeddings/langchain_core.embeddings.fake.FakeEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "OpenAI",
|
||||
link: "openai",
|
||||
link: "/docs/integrations/text_embedding/openai",
|
||||
package: "langchain-openai",
|
||||
apiLink: "https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html"
|
||||
},
|
||||
{
|
||||
name: "Google Gemini",
|
||||
link: "google-generative-ai",
|
||||
link: "/docs/integrations/text_embedding/google-generative-ai",
|
||||
package: "langchain-google-genai",
|
||||
apiLink: "https://python.langchain.com/api_reference/google_genai/embeddings/langchain_google_genai.embeddings.GoogleGenerativeAIEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "Together",
|
||||
link: "together",
|
||||
link: "/docs/integrations/text_embedding/together",
|
||||
package: "langchain-together",
|
||||
apiLink: "https://python.langchain.com/api_reference/together/embeddings/langchain_together.embeddings.TogetherEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "Fireworks",
|
||||
link: "fireworks",
|
||||
link: "/docs/integrations/text_embedding/fireworks",
|
||||
package: "langchain-fireworks",
|
||||
apiLink: "https://python.langchain.com/api_reference/fireworks/embeddings/langchain_fireworks.embeddings.FireworksEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "MistralAI",
|
||||
link: "mistralai",
|
||||
link: "/docs/integrations/text_embedding/mistralai",
|
||||
package: "langchain-mistralai",
|
||||
apiLink: "https://python.langchain.com/api_reference/mistralai/embeddings/langchain_mistralai.embeddings.MistralAIEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "Cohere",
|
||||
link: "cohere",
|
||||
link: "/docs/integrations/text_embedding/cohere",
|
||||
package: "langchain-cohere",
|
||||
apiLink: "https://python.langchain.com/api_reference/cohere/embeddings/langchain_cohere.embeddings.CohereEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "Nomic",
|
||||
link: "nomic",
|
||||
link: "/docs/integrations/text_embedding/nomic",
|
||||
package: "langchain-nomic",
|
||||
apiLink: "https://python.langchain.com/api_reference/nomic/embeddings/langchain_nomic.embeddings.NomicEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "Databricks",
|
||||
link: "databricks",
|
||||
link: "/docs/integrations/text_embedding/databricks",
|
||||
package: "databricks-langchain",
|
||||
apiLink: "https://api-docs.databricks.com/python/databricks-ai-bridge/latest/databricks_langchain.html#databricks_langchain.DatabricksEmbeddings"
|
||||
},
|
||||
{
|
||||
name: "VoyageAI",
|
||||
link: "voyageai",
|
||||
link: "/docs/integrations/text_embedding/voyageai",
|
||||
package: "langchain-voyageai",
|
||||
apiLink: "https://python.langchain.com/api_reference/voyageai/embeddings/langchain_voyageai.embeddings.VoyageAIEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "IBM",
|
||||
link: "ibm_watsonx",
|
||||
link: "/docs/integrations/text_embedding/ibm_watsonx",
|
||||
package: "langchain-ibm",
|
||||
apiLink: "https://python.langchain.com/api_reference/ibm/embeddings/langchain_ibm.embeddings.WatsonxEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "NVIDIA",
|
||||
link: "nvidia_ai_endpoints",
|
||||
link: "/docs/integrations/text_embedding/nvidia_ai_endpoints",
|
||||
package: "langchain-nvidia",
|
||||
apiLink: "https://python.langchain.com/api_reference/nvidia_ai_endpoints/embeddings/langchain_nvidia_ai_endpoints.embeddings.NVIDIAEmbeddings.html"
|
||||
},
|
||||
|
||||
@@ -23,6 +23,8 @@ from typing import (
|
||||
cast,
|
||||
)
|
||||
|
||||
from pydantic.fields import FieldInfo
|
||||
from pydantic.v1.fields import FieldInfo as FieldInfoV1
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from langchain_core._api.internal import is_caller_internal
|
||||
@@ -152,10 +154,6 @@ def deprecated(
|
||||
_package: str = package,
|
||||
) -> T:
|
||||
"""Implementation of the decorator returned by `deprecated`."""
|
||||
from langchain_core.utils.pydantic import ( # type: ignore[attr-defined]
|
||||
FieldInfoV1,
|
||||
FieldInfoV2,
|
||||
)
|
||||
|
||||
def emit_warning() -> None:
|
||||
"""Emit the warning."""
|
||||
@@ -249,7 +247,7 @@ def deprecated(
|
||||
),
|
||||
)
|
||||
|
||||
elif isinstance(obj, FieldInfoV2):
|
||||
elif isinstance(obj, FieldInfo):
|
||||
wrapped = None
|
||||
if not _obj_type:
|
||||
_obj_type = "attribute"
|
||||
@@ -261,7 +259,7 @@ def deprecated(
|
||||
def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: # noqa: ARG001
|
||||
return cast(
|
||||
"T",
|
||||
FieldInfoV2(
|
||||
FieldInfo(
|
||||
default=obj.default,
|
||||
default_factory=obj.default_factory,
|
||||
description=new_doc,
|
||||
|
||||
@@ -1069,8 +1069,10 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
tags (list[str]): The tags to remove.
|
||||
"""
|
||||
for tag in tags:
|
||||
self.tags.remove(tag)
|
||||
self.inheritable_tags.remove(tag)
|
||||
if tag in self.tags:
|
||||
self.tags.remove(tag)
|
||||
if tag in self.inheritable_tags:
|
||||
self.inheritable_tags.remove(tag)
|
||||
|
||||
def add_metadata(
|
||||
self,
|
||||
@@ -1094,8 +1096,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
keys (list[str]): The keys to remove.
|
||||
"""
|
||||
for key in keys:
|
||||
self.metadata.pop(key)
|
||||
self.inheritable_metadata.pop(key)
|
||||
self.metadata.pop(key, None)
|
||||
self.inheritable_metadata.pop(key, None)
|
||||
|
||||
|
||||
Callbacks = Optional[Union[list[BaseCallbackHandler], BaseCallbackManager]]
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import annotations
|
||||
import hashlib
|
||||
import json
|
||||
import uuid
|
||||
import warnings
|
||||
from collections.abc import AsyncIterable, AsyncIterator, Iterable, Iterator, Sequence
|
||||
from itertools import islice
|
||||
from typing import (
|
||||
@@ -18,8 +19,6 @@ from typing import (
|
||||
cast,
|
||||
)
|
||||
|
||||
from pydantic import model_validator
|
||||
|
||||
from langchain_core.document_loaders.base import BaseLoader
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.exceptions import LangChainException
|
||||
@@ -35,94 +34,51 @@ NAMESPACE_UUID = uuid.UUID(int=1984)
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def _hash_string_to_uuid(input_string: str) -> uuid.UUID:
|
||||
def _hash_string_to_uuid(input_string: str) -> str:
|
||||
"""Hashes a string and returns the corresponding UUID."""
|
||||
hash_value = hashlib.sha1(
|
||||
input_string.encode("utf-8"), usedforsecurity=False
|
||||
).hexdigest()
|
||||
return str(uuid.uuid5(NAMESPACE_UUID, hash_value))
|
||||
|
||||
|
||||
_WARNED_ABOUT_SHA1: bool = False
|
||||
|
||||
|
||||
def _warn_about_sha1() -> None:
|
||||
"""Emit a one-time warning about SHA-1 collision weaknesses."""
|
||||
# Global variable OK in this case
|
||||
global _WARNED_ABOUT_SHA1 # noqa: PLW0603
|
||||
if not _WARNED_ABOUT_SHA1:
|
||||
warnings.warn(
|
||||
"Using SHA-1 for document hashing. SHA-1 is *not* "
|
||||
"collision-resistant; a motivated attacker can construct distinct inputs "
|
||||
"that map to the same fingerprint. If this matters in your "
|
||||
"threat model, switch to a stronger algorithm such "
|
||||
"as 'blake2b', 'sha256', or 'sha512' by specifying "
|
||||
" `key_encoder` parameter in the the `index` or `aindex` function. ",
|
||||
category=UserWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
_WARNED_ABOUT_SHA1 = True
|
||||
|
||||
|
||||
def _hash_string(
|
||||
input_string: str, *, algorithm: Literal["sha1", "sha256", "sha512", "blake2b"]
|
||||
) -> uuid.UUID:
|
||||
"""Hash *input_string* to a deterministic UUID using the configured algorithm."""
|
||||
if algorithm == "sha1":
|
||||
_warn_about_sha1()
|
||||
hash_value = _calculate_hash(input_string, algorithm)
|
||||
return uuid.uuid5(NAMESPACE_UUID, hash_value)
|
||||
|
||||
|
||||
def _hash_nested_dict_to_uuid(data: dict[Any, Any]) -> uuid.UUID:
|
||||
"""Hashes a nested dictionary and returns the corresponding UUID."""
|
||||
def _hash_nested_dict(
|
||||
data: dict[Any, Any], *, algorithm: Literal["sha1", "sha256", "sha512", "blake2b"]
|
||||
) -> uuid.UUID:
|
||||
"""Hash a nested dictionary to a UUID using the configured algorithm."""
|
||||
serialized_data = json.dumps(data, sort_keys=True)
|
||||
hash_value = hashlib.sha1(
|
||||
serialized_data.encode("utf-8"), usedforsecurity=False
|
||||
).hexdigest()
|
||||
return uuid.uuid5(NAMESPACE_UUID, hash_value)
|
||||
|
||||
|
||||
class _HashedDocument(Document):
|
||||
"""A hashed document with a unique ID."""
|
||||
|
||||
uid: str
|
||||
hash_: str
|
||||
"""The hash of the document including content and metadata."""
|
||||
content_hash: str
|
||||
"""The hash of the document content."""
|
||||
metadata_hash: str
|
||||
"""The hash of the document metadata."""
|
||||
|
||||
@classmethod
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
return False
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def calculate_hashes(cls, values: dict[str, Any]) -> Any:
|
||||
"""Root validator to calculate content and metadata hash."""
|
||||
content = values.get("page_content", "")
|
||||
metadata = values.get("metadata", {})
|
||||
|
||||
forbidden_keys = ("hash_", "content_hash", "metadata_hash")
|
||||
|
||||
for key in forbidden_keys:
|
||||
if key in metadata:
|
||||
msg = (
|
||||
f"Metadata cannot contain key {key} as it "
|
||||
f"is reserved for internal use."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
content_hash = str(_hash_string_to_uuid(content))
|
||||
|
||||
try:
|
||||
metadata_hash = str(_hash_nested_dict_to_uuid(metadata))
|
||||
except Exception as e:
|
||||
msg = (
|
||||
f"Failed to hash metadata: {e}. "
|
||||
f"Please use a dict that can be serialized using json."
|
||||
)
|
||||
raise ValueError(msg) from e
|
||||
|
||||
values["content_hash"] = content_hash
|
||||
values["metadata_hash"] = metadata_hash
|
||||
values["hash_"] = str(_hash_string_to_uuid(content_hash + metadata_hash))
|
||||
|
||||
_uid = values.get("uid")
|
||||
|
||||
if _uid is None:
|
||||
values["uid"] = values["hash_"]
|
||||
return values
|
||||
|
||||
def to_document(self) -> Document:
|
||||
"""Return a Document object."""
|
||||
return Document(
|
||||
id=self.uid,
|
||||
page_content=self.page_content,
|
||||
metadata=self.metadata,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_document(
|
||||
cls, document: Document, *, uid: Optional[str] = None
|
||||
) -> _HashedDocument:
|
||||
"""Create a HashedDocument from a Document."""
|
||||
return cls( # type: ignore[call-arg]
|
||||
uid=uid, # type: ignore[arg-type]
|
||||
page_content=document.page_content,
|
||||
metadata=document.metadata,
|
||||
)
|
||||
return _hash_string(serialized_data, algorithm=algorithm)
|
||||
|
||||
|
||||
def _batch(size: int, iterable: Iterable[T]) -> Iterator[list[T]]:
|
||||
@@ -168,14 +124,16 @@ def _get_source_id_assigner(
|
||||
|
||||
|
||||
def _deduplicate_in_order(
|
||||
hashed_documents: Iterable[_HashedDocument],
|
||||
) -> Iterator[_HashedDocument]:
|
||||
hashed_documents: Iterable[Document],
|
||||
) -> Iterator[Document]:
|
||||
"""Deduplicate a list of hashed documents while preserving order."""
|
||||
seen: set[str] = set()
|
||||
|
||||
for hashed_doc in hashed_documents:
|
||||
if hashed_doc.hash_ not in seen:
|
||||
seen.add(hashed_doc.hash_)
|
||||
if hashed_doc.id not in seen:
|
||||
# At this stage, the id is guaranteed to be a string.
|
||||
# Avoiding unnecessary run time checks.
|
||||
seen.add(cast("str", hashed_doc.id))
|
||||
yield hashed_doc
|
||||
|
||||
|
||||
@@ -183,6 +141,94 @@ class IndexingException(LangChainException):
|
||||
"""Raised when an indexing operation fails."""
|
||||
|
||||
|
||||
def _calculate_hash(
|
||||
text: str, algorithm: Literal["sha1", "sha256", "sha512", "blake2b"]
|
||||
) -> str:
|
||||
"""Return a hexadecimal digest of *text* using *algorithm*."""
|
||||
if algorithm == "sha1":
|
||||
# Calculate the SHA-1 hash and return it as a UUID.
|
||||
digest = hashlib.sha1(text.encode("utf-8"), usedforsecurity=False).hexdigest()
|
||||
return str(uuid.uuid5(NAMESPACE_UUID, digest))
|
||||
if algorithm == "blake2b":
|
||||
return hashlib.blake2b(text.encode("utf-8")).hexdigest()
|
||||
if algorithm == "sha256":
|
||||
return hashlib.sha256(text.encode("utf-8")).hexdigest()
|
||||
if algorithm == "sha512":
|
||||
return hashlib.sha512(text.encode("utf-8")).hexdigest()
|
||||
msg = f"Unsupported hashing algorithm: {algorithm}"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def _get_document_with_hash(
|
||||
document: Document,
|
||||
*,
|
||||
key_encoder: Union[
|
||||
Callable[[Document], str], Literal["sha1", "sha256", "sha512", "blake2b"]
|
||||
],
|
||||
) -> Document:
|
||||
"""Calculate a hash of the document, and assign it to the uid.
|
||||
|
||||
When using one of the predefined hashing algorithms, the hash is calculated
|
||||
by hashing the content and the metadata of the document.
|
||||
|
||||
Args:
|
||||
document: Document to hash.
|
||||
key_encoder: Hashing algorithm to use for hashing the document.
|
||||
If not provided, a default encoder using SHA-1 will be used.
|
||||
SHA-1 is not collision-resistant, and a motivated attacker
|
||||
could craft two different texts that hash to the
|
||||
same cache key.
|
||||
|
||||
New applications should use one of the alternative encoders
|
||||
or provide a custom and strong key encoder function to avoid this risk.
|
||||
|
||||
When changing the key encoder, you must change the
|
||||
index as well to avoid duplicated documents in the cache.
|
||||
|
||||
Returns:
|
||||
Document with a unique identifier based on the hash of the content and metadata.
|
||||
"""
|
||||
metadata: dict[str, Any] = dict(document.metadata or {})
|
||||
|
||||
if callable(key_encoder):
|
||||
# If key_encoder is a callable, we use it to generate the hash.
|
||||
hash_ = key_encoder(document)
|
||||
else:
|
||||
# The hashes are calculated separate for the content and the metadata.
|
||||
content_hash = _calculate_hash(document.page_content, algorithm=key_encoder)
|
||||
try:
|
||||
serialized_meta = json.dumps(metadata, sort_keys=True)
|
||||
except Exception as e:
|
||||
msg = (
|
||||
f"Failed to hash metadata: {e}. "
|
||||
f"Please use a dict that can be serialized using json."
|
||||
)
|
||||
raise ValueError(msg) from e
|
||||
metadata_hash = _calculate_hash(serialized_meta, algorithm=key_encoder)
|
||||
hash_ = _calculate_hash(content_hash + metadata_hash, algorithm=key_encoder)
|
||||
|
||||
return Document(
|
||||
# Assign a unique identifier based on the hash.
|
||||
id=hash_,
|
||||
page_content=document.page_content,
|
||||
metadata=document.metadata,
|
||||
)
|
||||
|
||||
|
||||
# This internal abstraction was imported by the langchain package internally, so
|
||||
# we keep it here for backwards compatibility.
|
||||
class _HashedDocument:
|
||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
"""Raise an error if this class is instantiated."""
|
||||
msg = (
|
||||
"_HashedDocument is an internal abstraction that was deprecated in "
|
||||
" langchain-core 0.3.63. This abstraction is marked as private and "
|
||||
" should not have been used directly. If you are seeing this error, please "
|
||||
" update your code appropriately."
|
||||
)
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
def _delete(
|
||||
vector_store: Union[VectorStore, DocumentIndex],
|
||||
ids: list[str],
|
||||
@@ -231,6 +277,9 @@ def index(
|
||||
source_id_key: Union[str, Callable[[Document], str], None] = None,
|
||||
cleanup_batch_size: int = 1_000,
|
||||
force_update: bool = False,
|
||||
key_encoder: Union[
|
||||
Literal["sha1", "sha256", "sha512", "blake2b"], Callable[[Document], str]
|
||||
] = "sha1",
|
||||
upsert_kwargs: Optional[dict[str, Any]] = None,
|
||||
) -> IndexingResult:
|
||||
"""Index data from the loader into the vector store.
|
||||
@@ -291,6 +340,23 @@ def index(
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document content and
|
||||
metadata. Default is "sha1".
|
||||
Other options include "blake2b", "sha256", and "sha512".
|
||||
|
||||
.. versionadded:: 0.3.66
|
||||
|
||||
key_encoder: Hashing algorithm to use for hashing the document.
|
||||
If not provided, a default encoder using SHA-1 will be used.
|
||||
SHA-1 is not collision-resistant, and a motivated attacker
|
||||
could craft two different texts that hash to the
|
||||
same cache key.
|
||||
|
||||
New applications should use one of the alternative encoders
|
||||
or provide a custom and strong key encoder function to avoid this risk.
|
||||
|
||||
When changing the key encoder, you must change the
|
||||
index as well to avoid duplicated documents in the cache.
|
||||
upsert_kwargs: Additional keyword arguments to pass to the add_documents
|
||||
method of the VectorStore or the upsert method of the
|
||||
DocumentIndex. For example, you can use this to
|
||||
@@ -313,6 +379,11 @@ def index(
|
||||
|
||||
* Added `scoped_full` cleanup mode.
|
||||
"""
|
||||
# Behavior is deprecated, but we keep it for backwards compatibility.
|
||||
# # Warn only once per process.
|
||||
if key_encoder == "sha1":
|
||||
_warn_about_sha1()
|
||||
|
||||
if cleanup not in {"incremental", "full", "scoped_full", None}:
|
||||
msg = (
|
||||
f"cleanup should be one of 'incremental', 'full', 'scoped_full' or None. "
|
||||
@@ -375,12 +446,15 @@ def index(
|
||||
for doc_batch in _batch(batch_size, doc_iterator):
|
||||
hashed_docs = list(
|
||||
_deduplicate_in_order(
|
||||
[_HashedDocument.from_document(doc) for doc in doc_batch]
|
||||
[
|
||||
_get_document_with_hash(doc, key_encoder=key_encoder)
|
||||
for doc in doc_batch
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
source_ids: Sequence[Optional[str]] = [
|
||||
source_id_assigner(doc) for doc in hashed_docs
|
||||
source_id_assigner(hashed_doc) for hashed_doc in hashed_docs
|
||||
]
|
||||
|
||||
if cleanup in {"incremental", "scoped_full"}:
|
||||
@@ -391,8 +465,8 @@ def index(
|
||||
f"Source ids are required when cleanup mode is "
|
||||
f"incremental or scoped_full. "
|
||||
f"Document that starts with "
|
||||
f"content: {hashed_doc.page_content[:100]} was not assigned "
|
||||
f"as source id."
|
||||
f"content: {hashed_doc.page_content[:100]} "
|
||||
f"was not assigned as source id."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
if cleanup == "scoped_full":
|
||||
@@ -400,7 +474,9 @@ def index(
|
||||
# source ids cannot be None after for loop above.
|
||||
source_ids = cast("Sequence[str]", source_ids)
|
||||
|
||||
exists_batch = record_manager.exists([doc.uid for doc in hashed_docs])
|
||||
exists_batch = record_manager.exists(
|
||||
cast("Sequence[str]", [doc.id for doc in hashed_docs])
|
||||
)
|
||||
|
||||
# Filter out documents that already exist in the record store.
|
||||
uids = []
|
||||
@@ -408,14 +484,15 @@ def index(
|
||||
uids_to_refresh = []
|
||||
seen_docs: set[str] = set()
|
||||
for hashed_doc, doc_exists in zip(hashed_docs, exists_batch):
|
||||
hashed_id = cast("str", hashed_doc.id)
|
||||
if doc_exists:
|
||||
if force_update:
|
||||
seen_docs.add(hashed_doc.uid)
|
||||
seen_docs.add(hashed_id)
|
||||
else:
|
||||
uids_to_refresh.append(hashed_doc.uid)
|
||||
uids_to_refresh.append(hashed_id)
|
||||
continue
|
||||
uids.append(hashed_doc.uid)
|
||||
docs_to_index.append(hashed_doc.to_document())
|
||||
uids.append(hashed_id)
|
||||
docs_to_index.append(hashed_doc)
|
||||
|
||||
# Update refresh timestamp
|
||||
if uids_to_refresh:
|
||||
@@ -445,7 +522,7 @@ def index(
|
||||
# Update ALL records, even if they already exist since we want to refresh
|
||||
# their timestamp.
|
||||
record_manager.update(
|
||||
[doc.uid for doc in hashed_docs],
|
||||
cast("Sequence[str]", [doc.id for doc in hashed_docs]),
|
||||
group_ids=source_ids,
|
||||
time_at_least=index_start_dt,
|
||||
)
|
||||
@@ -453,7 +530,6 @@ def index(
|
||||
# If source IDs are provided, we can do the deletion incrementally!
|
||||
if cleanup == "incremental":
|
||||
# Get the uids of the documents that were not returned by the loader.
|
||||
|
||||
# mypy isn't good enough to determine that source ids cannot be None
|
||||
# here due to a check that's happening above, so we check again.
|
||||
for source_id in source_ids:
|
||||
@@ -537,6 +613,9 @@ async def aindex(
|
||||
source_id_key: Union[str, Callable[[Document], str], None] = None,
|
||||
cleanup_batch_size: int = 1_000,
|
||||
force_update: bool = False,
|
||||
key_encoder: Union[
|
||||
Literal["sha1", "sha256", "sha512", "blake2b"], Callable[[Document], str]
|
||||
] = "sha1",
|
||||
upsert_kwargs: Optional[dict[str, Any]] = None,
|
||||
) -> IndexingResult:
|
||||
"""Async index data from the loader into the vector store.
|
||||
@@ -596,6 +675,17 @@ async def aindex(
|
||||
force_update: Force update documents even if they are present in the
|
||||
record manager. Useful if you are re-indexing with updated embeddings.
|
||||
Default is False.
|
||||
key_encoder: Hashing algorithm to use for hashing the document.
|
||||
If not provided, a default encoder using SHA-1 will be used.
|
||||
SHA-1 is not collision-resistant, and a motivated attacker
|
||||
could craft two different texts that hash to the
|
||||
same cache key.
|
||||
|
||||
New applications should use one of the alternative encoders
|
||||
or provide a custom and strong key encoder function to avoid this risk.
|
||||
|
||||
When changing the key encoder, you must change the
|
||||
index as well to avoid duplicated documents in the cache.
|
||||
upsert_kwargs: Additional keyword arguments to pass to the aadd_documents
|
||||
method of the VectorStore or the aupsert method of the
|
||||
DocumentIndex. For example, you can use this to
|
||||
@@ -618,6 +708,11 @@ async def aindex(
|
||||
|
||||
* Added `scoped_full` cleanup mode.
|
||||
"""
|
||||
# Behavior is deprecated, but we keep it for backwards compatibility.
|
||||
# # Warn only once per process.
|
||||
if key_encoder == "sha1":
|
||||
_warn_about_sha1()
|
||||
|
||||
if cleanup not in {"incremental", "full", "scoped_full", None}:
|
||||
msg = (
|
||||
f"cleanup should be one of 'incremental', 'full', 'scoped_full' or None. "
|
||||
@@ -691,7 +786,10 @@ async def aindex(
|
||||
async for doc_batch in _abatch(batch_size, async_doc_iterator):
|
||||
hashed_docs = list(
|
||||
_deduplicate_in_order(
|
||||
[_HashedDocument.from_document(doc) for doc in doc_batch]
|
||||
[
|
||||
_get_document_with_hash(doc, key_encoder=key_encoder)
|
||||
for doc in doc_batch
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
@@ -707,8 +805,8 @@ async def aindex(
|
||||
f"Source ids are required when cleanup mode is "
|
||||
f"incremental or scoped_full. "
|
||||
f"Document that starts with "
|
||||
f"content: {hashed_doc.page_content[:100]} was not assigned "
|
||||
f"as source id."
|
||||
f"content: {hashed_doc.page_content[:100]} "
|
||||
f"was not assigned as source id."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
if cleanup == "scoped_full":
|
||||
@@ -716,7 +814,9 @@ async def aindex(
|
||||
# source ids cannot be None after for loop above.
|
||||
source_ids = cast("Sequence[str]", source_ids)
|
||||
|
||||
exists_batch = await record_manager.aexists([doc.uid for doc in hashed_docs])
|
||||
exists_batch = await record_manager.aexists(
|
||||
cast("Sequence[str]", [doc.id for doc in hashed_docs])
|
||||
)
|
||||
|
||||
# Filter out documents that already exist in the record store.
|
||||
uids: list[str] = []
|
||||
@@ -724,14 +824,15 @@ async def aindex(
|
||||
uids_to_refresh = []
|
||||
seen_docs: set[str] = set()
|
||||
for hashed_doc, doc_exists in zip(hashed_docs, exists_batch):
|
||||
hashed_id = cast("str", hashed_doc.id)
|
||||
if doc_exists:
|
||||
if force_update:
|
||||
seen_docs.add(hashed_doc.uid)
|
||||
seen_docs.add(hashed_id)
|
||||
else:
|
||||
uids_to_refresh.append(hashed_doc.uid)
|
||||
uids_to_refresh.append(hashed_id)
|
||||
continue
|
||||
uids.append(hashed_doc.uid)
|
||||
docs_to_index.append(hashed_doc.to_document())
|
||||
uids.append(hashed_id)
|
||||
docs_to_index.append(hashed_doc)
|
||||
|
||||
if uids_to_refresh:
|
||||
# Must be updated to refresh timestamp.
|
||||
@@ -760,7 +861,7 @@ async def aindex(
|
||||
# Update ALL records, even if they already exist since we want to refresh
|
||||
# their timestamp.
|
||||
await record_manager.aupdate(
|
||||
[doc.uid for doc in hashed_docs],
|
||||
cast("Sequence[str]", [doc.id for doc in hashed_docs]),
|
||||
group_ids=source_ids,
|
||||
time_at_least=index_start_dt,
|
||||
)
|
||||
|
||||
@@ -55,6 +55,8 @@ class InputTokenDetails(TypedDict, total=False):
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
"""
|
||||
|
||||
audio: int
|
||||
|
||||
@@ -34,7 +34,8 @@ class BaseMessage(Serializable):
|
||||
"""
|
||||
|
||||
response_metadata: dict = Field(default_factory=dict)
|
||||
"""Response metadata. For example: response headers, logprobs, token counts."""
|
||||
"""Response metadata. For example: response headers, logprobs, token counts, model
|
||||
name."""
|
||||
|
||||
type: str
|
||||
"""The type of the message. Must be a string that is unique to the message type.
|
||||
|
||||
@@ -129,7 +129,7 @@ def get_buffer_string(
|
||||
else:
|
||||
msg = f"Got unsupported message type: {m}"
|
||||
raise ValueError(msg) # noqa: TRY004
|
||||
message = f"{role}: {m.content}"
|
||||
message = f"{role}: {m.text()}"
|
||||
if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs:
|
||||
message += f"{m.additional_kwargs['function_call']}"
|
||||
string_messages.append(message)
|
||||
|
||||
@@ -326,7 +326,7 @@ class BaseOutputParser(
|
||||
|
||||
def dict(self, **kwargs: Any) -> dict:
|
||||
"""Return dictionary representation of output parser."""
|
||||
output_parser_dict = super().dict(**kwargs)
|
||||
output_parser_dict = super().model_dump(**kwargs)
|
||||
with contextlib.suppress(NotImplementedError):
|
||||
output_parser_dict["_type"] = self._type
|
||||
return output_parser_dict
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import Annotated, Any, Optional, TypeVar, Union
|
||||
import jsonpatch # type: ignore[import-untyped]
|
||||
import pydantic
|
||||
from pydantic import SkipValidation
|
||||
from pydantic.v1 import BaseModel
|
||||
from typing_extensions import override
|
||||
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
@@ -20,16 +21,9 @@ from langchain_core.utils.json import (
|
||||
parse_json_markdown,
|
||||
parse_partial_json,
|
||||
)
|
||||
from langchain_core.utils.pydantic import IS_PYDANTIC_V1
|
||||
|
||||
if IS_PYDANTIC_V1:
|
||||
PydanticBaseModel = pydantic.BaseModel
|
||||
|
||||
else:
|
||||
from pydantic.v1 import BaseModel
|
||||
|
||||
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
|
||||
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc]
|
||||
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
|
||||
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel]
|
||||
|
||||
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ from typing import Any, Optional, Union
|
||||
|
||||
import jsonpatch # type: ignore[import-untyped]
|
||||
from pydantic import BaseModel, model_validator
|
||||
from pydantic.v1 import BaseModel as BaseModelV1
|
||||
from typing_extensions import override
|
||||
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
@@ -275,10 +276,13 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
||||
pydantic_schema = self.pydantic_schema[fn_name]
|
||||
else:
|
||||
pydantic_schema = self.pydantic_schema
|
||||
if hasattr(pydantic_schema, "model_validate_json"):
|
||||
if issubclass(pydantic_schema, BaseModel):
|
||||
pydantic_args = pydantic_schema.model_validate_json(_args)
|
||||
else:
|
||||
elif issubclass(pydantic_schema, BaseModelV1):
|
||||
pydantic_args = pydantic_schema.parse_raw(_args)
|
||||
else:
|
||||
msg = f"Unsupported pydantic schema: {pydantic_schema}"
|
||||
raise ValueError(msg)
|
||||
return pydantic_args
|
||||
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.output_parsers import JsonOutputParser
|
||||
from langchain_core.outputs import Generation
|
||||
from langchain_core.utils.pydantic import (
|
||||
IS_PYDANTIC_V2,
|
||||
PydanticBaseModel,
|
||||
TBaseModel,
|
||||
)
|
||||
@@ -24,22 +23,16 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
||||
"""The pydantic model to parse."""
|
||||
|
||||
def _parse_obj(self, obj: dict) -> TBaseModel:
|
||||
if IS_PYDANTIC_V2:
|
||||
try:
|
||||
if issubclass(self.pydantic_object, pydantic.BaseModel):
|
||||
return self.pydantic_object.model_validate(obj)
|
||||
if issubclass(self.pydantic_object, pydantic.v1.BaseModel):
|
||||
return self.pydantic_object.parse_obj(obj)
|
||||
msg = f"Unsupported model version for PydanticOutputParser: \
|
||||
{self.pydantic_object.__class__}"
|
||||
raise OutputParserException(msg)
|
||||
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
|
||||
raise self._parser_exception(e, obj) from e
|
||||
else: # pydantic v1
|
||||
try:
|
||||
try:
|
||||
if issubclass(self.pydantic_object, pydantic.BaseModel):
|
||||
return self.pydantic_object.model_validate(obj)
|
||||
if issubclass(self.pydantic_object, pydantic.v1.BaseModel):
|
||||
return self.pydantic_object.parse_obj(obj)
|
||||
except pydantic.ValidationError as e:
|
||||
raise self._parser_exception(e, obj) from e
|
||||
msg = f"Unsupported model version for PydanticOutputParser: \
|
||||
{self.pydantic_object.__class__}"
|
||||
raise OutputParserException(msg)
|
||||
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
|
||||
raise self._parser_exception(e, obj) from e
|
||||
|
||||
def _parser_exception(
|
||||
self, e: Exception, json_object: dict
|
||||
|
||||
@@ -134,7 +134,7 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
|
||||
chunk_gen = ChatGenerationChunk(message=chunk)
|
||||
elif isinstance(chunk, BaseMessage):
|
||||
chunk_gen = ChatGenerationChunk(
|
||||
message=BaseMessageChunk(**chunk.dict())
|
||||
message=BaseMessageChunk(**chunk.model_dump())
|
||||
)
|
||||
else:
|
||||
chunk_gen = GenerationChunk(text=chunk)
|
||||
@@ -161,7 +161,7 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
|
||||
chunk_gen = ChatGenerationChunk(message=chunk)
|
||||
elif isinstance(chunk, BaseMessage):
|
||||
chunk_gen = ChatGenerationChunk(
|
||||
message=BaseMessageChunk(**chunk.dict())
|
||||
message=BaseMessageChunk(**chunk.model_dump())
|
||||
)
|
||||
else:
|
||||
chunk_gen = GenerationChunk(text=chunk)
|
||||
|
||||
@@ -4,7 +4,8 @@ from __future__ import annotations
|
||||
|
||||
from typing import Literal, Union
|
||||
|
||||
from pydantic import computed_field
|
||||
from pydantic import model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from langchain_core.messages import BaseMessage, BaseMessageChunk
|
||||
from langchain_core.outputs.generation import Generation
|
||||
@@ -25,30 +26,44 @@ class ChatGeneration(Generation):
|
||||
via callbacks).
|
||||
"""
|
||||
|
||||
text: str = ""
|
||||
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
|
||||
message: BaseMessage
|
||||
"""The message output by the chat model."""
|
||||
|
||||
# Override type to be ChatGeneration, ignore mypy error as this is intentional
|
||||
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
|
||||
"""Type is used exclusively for serialization purposes."""
|
||||
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def text(self) -> str:
|
||||
"""Set the text attribute to be the contents of the message."""
|
||||
text_ = ""
|
||||
@model_validator(mode="after")
|
||||
def set_text(self) -> Self:
|
||||
"""Set the text attribute to be the contents of the message.
|
||||
|
||||
Args:
|
||||
values: The values of the object.
|
||||
|
||||
Returns:
|
||||
The values of the object with the text attribute set.
|
||||
|
||||
Raises:
|
||||
ValueError: If the message is not a string or a list.
|
||||
"""
|
||||
text = ""
|
||||
if isinstance(self.message.content, str):
|
||||
text_ = self.message.content
|
||||
text = self.message.content
|
||||
# Assumes text in content blocks in OpenAI format.
|
||||
# Uses first text block.
|
||||
elif isinstance(self.message.content, list):
|
||||
for block in self.message.content:
|
||||
if isinstance(block, str):
|
||||
text_ = block
|
||||
text = block
|
||||
break
|
||||
if isinstance(block, dict) and "text" in block:
|
||||
text_ = block["text"]
|
||||
text = block["text"]
|
||||
break
|
||||
return text_
|
||||
else:
|
||||
pass
|
||||
self.text = text
|
||||
return self
|
||||
|
||||
|
||||
class ChatGenerationChunk(ChatGeneration):
|
||||
@@ -59,7 +74,7 @@ class ChatGenerationChunk(ChatGeneration):
|
||||
|
||||
message: BaseMessageChunk
|
||||
"""The message chunk output by the chat model."""
|
||||
|
||||
# Override type to be ChatGeneration, ignore mypy error as this is intentional
|
||||
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
|
||||
"""Type is used exclusively for serialization purposes."""
|
||||
|
||||
|
||||
@@ -4,8 +4,6 @@ from __future__ import annotations
|
||||
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
from pydantic import computed_field
|
||||
|
||||
from langchain_core.load import Serializable
|
||||
from langchain_core.utils._merge import merge_dicts
|
||||
|
||||
@@ -26,30 +24,14 @@ class Generation(Serializable):
|
||||
for more information.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
text: str = "",
|
||||
generation_info: Optional[dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize a Generation."""
|
||||
super().__init__(generation_info=generation_info, **kwargs)
|
||||
self._text = text
|
||||
|
||||
# workaround for ChatGeneration so that we can use a computed field to populate
|
||||
# the text field from the message content (parent class needs to have a property)
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def text(self) -> str:
|
||||
"""The text contents of the output."""
|
||||
return self._text
|
||||
text: str
|
||||
"""Generated text output."""
|
||||
|
||||
generation_info: Optional[dict[str, Any]] = None
|
||||
"""Raw response from the provider.
|
||||
|
||||
May include things like the reason for finishing or token log probabilities.
|
||||
"""
|
||||
|
||||
type: Literal["Generation"] = "Generation"
|
||||
"""Type is used exclusively for serialization purposes.
|
||||
Set to "Generation" for this class."""
|
||||
@@ -71,16 +53,6 @@ class Generation(Serializable):
|
||||
class GenerationChunk(Generation):
|
||||
"""Generation chunk, which can be concatenated with other Generation chunks."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
text: str = "",
|
||||
generation_info: Optional[dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize a GenerationChunk."""
|
||||
super().__init__(text=text, generation_info=generation_info, **kwargs)
|
||||
self._text = text
|
||||
|
||||
def __add__(self, other: GenerationChunk) -> GenerationChunk:
|
||||
"""Concatenate two GenerationChunks."""
|
||||
if isinstance(other, GenerationChunk):
|
||||
|
||||
@@ -2,25 +2,10 @@
|
||||
|
||||
from importlib import metadata
|
||||
|
||||
from pydantic.v1 import * # noqa: F403
|
||||
|
||||
from langchain_core._api.deprecation import warn_deprecated
|
||||
|
||||
# Create namespaces for pydantic v1 and v2.
|
||||
# This code must stay at the top of the file before other modules may
|
||||
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
|
||||
#
|
||||
# This hack is done for the following reasons:
|
||||
# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since
|
||||
# both dependencies and dependents may be stuck on either version of v1 or v2.
|
||||
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
|
||||
# unambiguously uses either v1 or v2 API.
|
||||
# * This change is easier to roll out and roll back.
|
||||
|
||||
try:
|
||||
from pydantic.v1 import * # noqa: F403
|
||||
except ImportError:
|
||||
from pydantic import * # type: ignore[assignment,no-redef] # noqa: F403
|
||||
|
||||
|
||||
try:
|
||||
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
|
||||
except metadata.PackageNotFoundError:
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
from pydantic.v1.dataclasses import * # noqa: F403
|
||||
|
||||
try:
|
||||
from pydantic.v1.dataclasses import * # noqa: F403
|
||||
except ImportError:
|
||||
from pydantic.dataclasses import * # type: ignore[no-redef] # noqa: F403
|
||||
from langchain_core._api import warn_deprecated
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
"""Pydantic v1 compatibility shim."""
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
from pydantic.v1.main import * # noqa: F403
|
||||
|
||||
try:
|
||||
from pydantic.v1.main import * # noqa: F403
|
||||
except ImportError:
|
||||
from pydantic.main import * # type: ignore[assignment,no-redef] # noqa: F403
|
||||
from langchain_core._api import warn_deprecated
|
||||
|
||||
warn_deprecated(
|
||||
"0.3.0",
|
||||
|
||||
@@ -241,7 +241,11 @@ class RunnableWithMessageHistory(RunnableBindingBase):
|
||||
self,
|
||||
runnable: Union[
|
||||
Runnable[
|
||||
Union[MessagesOrDictWithMessages],
|
||||
list[BaseMessage],
|
||||
Union[str, BaseMessage, MessagesOrDictWithMessages],
|
||||
],
|
||||
Runnable[
|
||||
dict[str, Any],
|
||||
Union[str, BaseMessage, MessagesOrDictWithMessages],
|
||||
],
|
||||
LanguageModelLike,
|
||||
@@ -258,7 +262,7 @@ class RunnableWithMessageHistory(RunnableBindingBase):
|
||||
|
||||
Args:
|
||||
runnable: The base Runnable to be wrapped. Must take as input one of:
|
||||
1. A sequence of BaseMessages
|
||||
1. A list of BaseMessages
|
||||
2. A dict with one key for all messages
|
||||
3. A dict with one key for the current input string/message(s) and
|
||||
a separate key for historical messages. If the input key points
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Base for Tools."""
|
||||
"""Base classes and utilities for LangChain tools."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -77,14 +77,30 @@ FILTERED_ARGS = ("run_manager", "callbacks")
|
||||
|
||||
|
||||
class SchemaAnnotationError(TypeError):
|
||||
"""Raised when 'args_schema' is missing or has an incorrect type annotation."""
|
||||
"""Raised when args_schema is missing or has an incorrect type annotation."""
|
||||
|
||||
|
||||
def _is_annotated_type(typ: type[Any]) -> bool:
|
||||
"""Check if a type is an Annotated type.
|
||||
|
||||
Args:
|
||||
typ: The type to check.
|
||||
|
||||
Returns:
|
||||
True if the type is an Annotated type, False otherwise.
|
||||
"""
|
||||
return get_origin(typ) is typing.Annotated
|
||||
|
||||
|
||||
def _get_annotation_description(arg_type: type) -> str | None:
|
||||
"""Extract description from an Annotated type.
|
||||
|
||||
Args:
|
||||
arg_type: The type to extract description from.
|
||||
|
||||
Returns:
|
||||
The description string if found, None otherwise.
|
||||
"""
|
||||
if _is_annotated_type(arg_type):
|
||||
annotated_args = get_args(arg_type)
|
||||
for annotation in annotated_args[1:]:
|
||||
@@ -100,7 +116,17 @@ def _get_filtered_args(
|
||||
filter_args: Sequence[str],
|
||||
include_injected: bool = True,
|
||||
) -> dict:
|
||||
"""Get the arguments from a function's signature."""
|
||||
"""Get filtered arguments from a function's signature.
|
||||
|
||||
Args:
|
||||
inferred_model: The Pydantic model inferred from the function.
|
||||
func: The function to extract arguments from.
|
||||
filter_args: Arguments to exclude from the result.
|
||||
include_injected: Whether to include injected arguments.
|
||||
|
||||
Returns:
|
||||
Dictionary of filtered arguments with their schema definitions.
|
||||
"""
|
||||
schema = inferred_model.model_json_schema()["properties"]
|
||||
valid_keys = signature(func).parameters
|
||||
return {
|
||||
@@ -115,9 +141,17 @@ def _get_filtered_args(
|
||||
def _parse_python_function_docstring(
|
||||
function: Callable, annotations: dict, *, error_on_invalid_docstring: bool = False
|
||||
) -> tuple[str, dict]:
|
||||
"""Parse the function and argument descriptions from the docstring of a function.
|
||||
"""Parse function and argument descriptions from a docstring.
|
||||
|
||||
Assumes the function docstring follows Google Python style guide.
|
||||
|
||||
Args:
|
||||
function: The function to parse the docstring from.
|
||||
annotations: Type annotations for the function parameters.
|
||||
error_on_invalid_docstring: Whether to raise an error on invalid docstring.
|
||||
|
||||
Returns:
|
||||
A tuple containing the function description and argument descriptions.
|
||||
"""
|
||||
docstring = inspect.getdoc(function)
|
||||
return _parse_google_docstring(
|
||||
@@ -130,7 +164,15 @@ def _parse_python_function_docstring(
|
||||
def _validate_docstring_args_against_annotations(
|
||||
arg_descriptions: dict, annotations: dict
|
||||
) -> None:
|
||||
"""Raise error if docstring arg is not in type annotations."""
|
||||
"""Validate that docstring arguments match function annotations.
|
||||
|
||||
Args:
|
||||
arg_descriptions: Arguments described in the docstring.
|
||||
annotations: Type annotations from the function signature.
|
||||
|
||||
Raises:
|
||||
ValueError: If a docstring argument is not found in function signature.
|
||||
"""
|
||||
for docstring_arg in arg_descriptions:
|
||||
if docstring_arg not in annotations:
|
||||
msg = f"Arg {docstring_arg} in docstring not found in function signature."
|
||||
@@ -143,7 +185,16 @@ def _infer_arg_descriptions(
|
||||
parse_docstring: bool = False,
|
||||
error_on_invalid_docstring: bool = False,
|
||||
) -> tuple[str, dict]:
|
||||
"""Infer argument descriptions from a function's docstring."""
|
||||
"""Infer argument descriptions from function docstring and annotations.
|
||||
|
||||
Args:
|
||||
fn: The function to infer descriptions from.
|
||||
parse_docstring: Whether to parse the docstring for descriptions.
|
||||
error_on_invalid_docstring: Whether to raise error on invalid docstring.
|
||||
|
||||
Returns:
|
||||
A tuple containing the function description and argument descriptions.
|
||||
"""
|
||||
annotations = typing.get_type_hints(fn, include_extras=True)
|
||||
if parse_docstring:
|
||||
description, arg_descriptions = _parse_python_function_docstring(
|
||||
@@ -163,7 +214,15 @@ def _infer_arg_descriptions(
|
||||
|
||||
|
||||
def _is_pydantic_annotation(annotation: Any, pydantic_version: str = "v2") -> bool:
|
||||
"""Determine if a type annotation is a Pydantic model."""
|
||||
"""Check if a type annotation is a Pydantic model.
|
||||
|
||||
Args:
|
||||
annotation: The type annotation to check.
|
||||
pydantic_version: The Pydantic version to check against ("v1" or "v2").
|
||||
|
||||
Returns:
|
||||
True if the annotation is a Pydantic model, False otherwise.
|
||||
"""
|
||||
base_model_class = BaseModelV1 if pydantic_version == "v1" else BaseModel
|
||||
try:
|
||||
return issubclass(annotation, base_model_class)
|
||||
@@ -174,7 +233,18 @@ def _is_pydantic_annotation(annotation: Any, pydantic_version: str = "v2") -> bo
|
||||
def _function_annotations_are_pydantic_v1(
|
||||
signature: inspect.Signature, func: Callable
|
||||
) -> bool:
|
||||
"""Determine if all Pydantic annotations in a function signature are from V1."""
|
||||
"""Check if all Pydantic annotations in a function are from V1.
|
||||
|
||||
Args:
|
||||
signature: The function signature to check.
|
||||
func: The function being checked.
|
||||
|
||||
Returns:
|
||||
True if all Pydantic annotations are from V1, False otherwise.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the function contains mixed V1 and V2 annotations.
|
||||
"""
|
||||
any_v1_annotations = any(
|
||||
_is_pydantic_annotation(parameter.annotation, pydantic_version="v1")
|
||||
for parameter in signature.parameters.values()
|
||||
@@ -193,15 +263,11 @@ def _function_annotations_are_pydantic_v1(
|
||||
|
||||
|
||||
class _SchemaConfig:
|
||||
"""Configuration for the pydantic model.
|
||||
"""Configuration for Pydantic models generated from function signatures.
|
||||
|
||||
This is used to configure the pydantic model created from
|
||||
a function's signature.
|
||||
|
||||
Parameters:
|
||||
Attributes:
|
||||
extra: Whether to allow extra fields in the model.
|
||||
arbitrary_types_allowed: Whether to allow arbitrary types in the model.
|
||||
Defaults to True.
|
||||
"""
|
||||
|
||||
extra: str = "forbid"
|
||||
@@ -309,12 +375,11 @@ def create_schema_from_function(
|
||||
|
||||
|
||||
class ToolException(Exception): # noqa: N818
|
||||
"""Optional exception that tool throws when execution error occurs.
|
||||
"""Exception thrown when a tool execution error occurs.
|
||||
|
||||
When this exception is thrown, the agent will not stop working,
|
||||
but it will handle the exception according to the handle_tool_error
|
||||
variable of the tool, and the processing result will be returned
|
||||
to the agent as observation, and printed in red on the console.
|
||||
This exception allows tools to signal errors without stopping the agent.
|
||||
The error is handled according to the tool's handle_tool_error setting,
|
||||
and the result is returned as an observation to the agent.
|
||||
"""
|
||||
|
||||
|
||||
@@ -322,10 +387,21 @@ ArgsSchema = Union[TypeBaseModel, dict[str, Any]]
|
||||
|
||||
|
||||
class BaseTool(RunnableSerializable[Union[str, dict, ToolCall], Any]):
|
||||
"""Interface LangChain tools must implement."""
|
||||
"""Base class for all LangChain tools.
|
||||
|
||||
This abstract class defines the interface that all LangChain tools must implement.
|
||||
Tools are components that can be called by agents to perform specific actions.
|
||||
"""
|
||||
|
||||
def __init_subclass__(cls, **kwargs: Any) -> None:
|
||||
"""Create the definition of the new tool class."""
|
||||
"""Validate the tool class definition during subclass creation.
|
||||
|
||||
Args:
|
||||
**kwargs: Additional keyword arguments passed to the parent class.
|
||||
|
||||
Raises:
|
||||
SchemaAnnotationError: If args_schema has incorrect type annotation.
|
||||
"""
|
||||
super().__init_subclass__(**kwargs)
|
||||
|
||||
args_schema_type = cls.__annotations__.get("args_schema", None)
|
||||
@@ -444,13 +520,21 @@ class ChildTool(BaseTool):
|
||||
|
||||
@property
|
||||
def is_single_input(self) -> bool:
|
||||
"""Whether the tool only accepts a single input."""
|
||||
"""Check if the tool accepts only a single input argument.
|
||||
|
||||
Returns:
|
||||
True if the tool has only one input argument, False otherwise.
|
||||
"""
|
||||
keys = {k for k in self.args if k != "kwargs"}
|
||||
return len(keys) == 1
|
||||
|
||||
@property
|
||||
def args(self) -> dict:
|
||||
"""The arguments of the tool."""
|
||||
"""Get the tool's input arguments schema.
|
||||
|
||||
Returns:
|
||||
Dictionary containing the tool's argument properties.
|
||||
"""
|
||||
if isinstance(self.args_schema, dict):
|
||||
json_schema = self.args_schema
|
||||
else:
|
||||
@@ -460,7 +544,11 @@ class ChildTool(BaseTool):
|
||||
|
||||
@property
|
||||
def tool_call_schema(self) -> ArgsSchema:
|
||||
"""The schema for a tool call."""
|
||||
"""Get the schema for tool calls, excluding injected arguments.
|
||||
|
||||
Returns:
|
||||
The schema that should be used for tool calls from language models.
|
||||
"""
|
||||
if isinstance(self.args_schema, dict):
|
||||
if self.description:
|
||||
return {
|
||||
@@ -524,11 +612,19 @@ class ChildTool(BaseTool):
|
||||
def _parse_input(
|
||||
self, tool_input: Union[str, dict], tool_call_id: Optional[str]
|
||||
) -> Union[str, dict[str, Any]]:
|
||||
"""Convert tool input to a pydantic model.
|
||||
"""Parse and validate tool input using the args schema.
|
||||
|
||||
Args:
|
||||
tool_input: The input to the tool.
|
||||
tool_call_id: The id of the tool call.
|
||||
tool_input: The raw input to the tool.
|
||||
tool_call_id: The ID of the tool call, if available.
|
||||
|
||||
Returns:
|
||||
The parsed and validated input.
|
||||
|
||||
Raises:
|
||||
ValueError: If string input is provided with JSON schema or if
|
||||
InjectedToolCallId is required but not provided.
|
||||
NotImplementedError: If args_schema is not a supported type.
|
||||
"""
|
||||
input_args = self.args_schema
|
||||
if isinstance(tool_input, str):
|
||||
@@ -540,10 +636,13 @@ class ChildTool(BaseTool):
|
||||
)
|
||||
raise ValueError(msg)
|
||||
key_ = next(iter(get_fields(input_args).keys()))
|
||||
if hasattr(input_args, "model_validate"):
|
||||
if issubclass(input_args, BaseModel):
|
||||
input_args.model_validate({key_: tool_input})
|
||||
else:
|
||||
elif issubclass(input_args, BaseModelV1):
|
||||
input_args.parse_obj({key_: tool_input})
|
||||
else:
|
||||
msg = f"args_schema must be a Pydantic BaseModel, got {input_args}"
|
||||
raise TypeError(msg)
|
||||
return tool_input
|
||||
if input_args is not None:
|
||||
if isinstance(input_args, dict):
|
||||
@@ -637,6 +736,18 @@ class ChildTool(BaseTool):
|
||||
def _to_args_and_kwargs(
|
||||
self, tool_input: Union[str, dict], tool_call_id: Optional[str]
|
||||
) -> tuple[tuple, dict]:
|
||||
"""Convert tool input to positional and keyword arguments.
|
||||
|
||||
Args:
|
||||
tool_input: The input to the tool.
|
||||
tool_call_id: The ID of the tool call, if available.
|
||||
|
||||
Returns:
|
||||
A tuple of (positional_args, keyword_args) for the tool.
|
||||
|
||||
Raises:
|
||||
TypeError: If the tool input type is invalid.
|
||||
"""
|
||||
if (
|
||||
self.args_schema is not None
|
||||
and isinstance(self.args_schema, type)
|
||||
@@ -889,11 +1000,27 @@ class ChildTool(BaseTool):
|
||||
|
||||
@deprecated("0.1.47", alternative="invoke", removal="1.0")
|
||||
def __call__(self, tool_input: str, callbacks: Callbacks = None) -> str:
|
||||
"""Make tool callable."""
|
||||
"""Make tool callable (deprecated).
|
||||
|
||||
Args:
|
||||
tool_input: The input to the tool.
|
||||
callbacks: Callbacks to use during execution.
|
||||
|
||||
Returns:
|
||||
The tool's output.
|
||||
"""
|
||||
return self.run(tool_input, callbacks=callbacks)
|
||||
|
||||
|
||||
def _is_tool_call(x: Any) -> bool:
|
||||
"""Check if the input is a tool call dictionary.
|
||||
|
||||
Args:
|
||||
x: The input to check.
|
||||
|
||||
Returns:
|
||||
True if the input is a tool call, False otherwise.
|
||||
"""
|
||||
return isinstance(x, dict) and x.get("type") == "tool_call"
|
||||
|
||||
|
||||
@@ -904,6 +1031,18 @@ def _handle_validation_error(
|
||||
Literal[True], str, Callable[[Union[ValidationError, ValidationErrorV1]], str]
|
||||
],
|
||||
) -> str:
|
||||
"""Handle validation errors based on the configured flag.
|
||||
|
||||
Args:
|
||||
e: The validation error that occurred.
|
||||
flag: How to handle the error (bool, string, or callable).
|
||||
|
||||
Returns:
|
||||
The error message to return.
|
||||
|
||||
Raises:
|
||||
ValueError: If the flag type is unexpected.
|
||||
"""
|
||||
if isinstance(flag, bool):
|
||||
content = "Tool input validation error"
|
||||
elif isinstance(flag, str):
|
||||
@@ -924,6 +1063,18 @@ def _handle_tool_error(
|
||||
*,
|
||||
flag: Optional[Union[Literal[True], str, Callable[[ToolException], str]]],
|
||||
) -> str:
|
||||
"""Handle tool execution errors based on the configured flag.
|
||||
|
||||
Args:
|
||||
e: The tool exception that occurred.
|
||||
flag: How to handle the error (bool, string, or callable).
|
||||
|
||||
Returns:
|
||||
The error message to return.
|
||||
|
||||
Raises:
|
||||
ValueError: If the flag type is unexpected.
|
||||
"""
|
||||
if isinstance(flag, bool):
|
||||
content = e.args[0] if e.args else "Tool execution error"
|
||||
elif isinstance(flag, str):
|
||||
@@ -944,6 +1095,16 @@ def _prep_run_args(
|
||||
config: Optional[RunnableConfig],
|
||||
**kwargs: Any,
|
||||
) -> tuple[Union[str, dict], dict]:
|
||||
"""Prepare arguments for tool execution.
|
||||
|
||||
Args:
|
||||
value: The input value (string, dict, or ToolCall).
|
||||
config: The runnable configuration.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
A tuple of (tool_input, run_kwargs).
|
||||
"""
|
||||
config = ensure_config(config)
|
||||
if _is_tool_call(value):
|
||||
tool_call_id: Optional[str] = cast("ToolCall", value)["id"]
|
||||
@@ -973,6 +1134,18 @@ def _format_output(
|
||||
name: str,
|
||||
status: str,
|
||||
) -> Union[ToolOutputMixin, Any]:
|
||||
"""Format tool output as a ToolMessage if appropriate.
|
||||
|
||||
Args:
|
||||
content: The main content of the tool output.
|
||||
artifact: Any artifact data from the tool.
|
||||
tool_call_id: The ID of the tool call.
|
||||
name: The name of the tool.
|
||||
status: The execution status.
|
||||
|
||||
Returns:
|
||||
The formatted output, either as a ToolMessage or the original content.
|
||||
"""
|
||||
if isinstance(content, ToolOutputMixin) or tool_call_id is None:
|
||||
return content
|
||||
if not _is_message_content_type(content):
|
||||
@@ -987,14 +1160,32 @@ def _format_output(
|
||||
|
||||
|
||||
def _is_message_content_type(obj: Any) -> bool:
|
||||
"""Check for OpenAI or Anthropic format tool message content."""
|
||||
"""Check if object is valid message content format.
|
||||
|
||||
Validates content for OpenAI or Anthropic format tool messages.
|
||||
|
||||
Args:
|
||||
obj: The object to check.
|
||||
|
||||
Returns:
|
||||
True if the object is valid message content, False otherwise.
|
||||
"""
|
||||
return isinstance(obj, str) or (
|
||||
isinstance(obj, list) and all(_is_message_content_block(e) for e in obj)
|
||||
)
|
||||
|
||||
|
||||
def _is_message_content_block(obj: Any) -> bool:
|
||||
"""Check for OpenAI or Anthropic format tool message content blocks."""
|
||||
"""Check if object is a valid message content block.
|
||||
|
||||
Validates content blocks for OpenAI or Anthropic format.
|
||||
|
||||
Args:
|
||||
obj: The object to check.
|
||||
|
||||
Returns:
|
||||
True if the object is a valid content block, False otherwise.
|
||||
"""
|
||||
if isinstance(obj, str):
|
||||
return True
|
||||
if isinstance(obj, dict):
|
||||
@@ -1003,6 +1194,14 @@ def _is_message_content_block(obj: Any) -> bool:
|
||||
|
||||
|
||||
def _stringify(content: Any) -> str:
|
||||
"""Convert content to string, preferring JSON format.
|
||||
|
||||
Args:
|
||||
content: The content to stringify.
|
||||
|
||||
Returns:
|
||||
String representation of the content.
|
||||
"""
|
||||
try:
|
||||
return json.dumps(content, ensure_ascii=False)
|
||||
except Exception:
|
||||
@@ -1010,6 +1209,14 @@ def _stringify(content: Any) -> str:
|
||||
|
||||
|
||||
def _get_type_hints(func: Callable) -> Optional[dict[str, type]]:
|
||||
"""Get type hints from a function, handling partial functions.
|
||||
|
||||
Args:
|
||||
func: The function to get type hints from.
|
||||
|
||||
Returns:
|
||||
Dictionary of type hints, or None if extraction fails.
|
||||
"""
|
||||
if isinstance(func, functools.partial):
|
||||
func = func.func
|
||||
try:
|
||||
@@ -1019,6 +1226,14 @@ def _get_type_hints(func: Callable) -> Optional[dict[str, type]]:
|
||||
|
||||
|
||||
def _get_runnable_config_param(func: Callable) -> Optional[str]:
|
||||
"""Find the parameter name for RunnableConfig in a function.
|
||||
|
||||
Args:
|
||||
func: The function to check.
|
||||
|
||||
Returns:
|
||||
The parameter name for RunnableConfig, or None if not found.
|
||||
"""
|
||||
type_hints = _get_type_hints(func)
|
||||
if not type_hints:
|
||||
return None
|
||||
@@ -1029,30 +1244,52 @@ def _get_runnable_config_param(func: Callable) -> Optional[str]:
|
||||
|
||||
|
||||
class InjectedToolArg:
|
||||
"""Annotation for a Tool arg that is **not** meant to be generated by a model."""
|
||||
"""Annotation for tool arguments that are injected at runtime.
|
||||
|
||||
Tool arguments annotated with this class are not included in the tool
|
||||
schema sent to language models and are instead injected during execution.
|
||||
"""
|
||||
|
||||
|
||||
class InjectedToolCallId(InjectedToolArg):
|
||||
"""Annotation for injecting the tool_call_id.
|
||||
"""Annotation for injecting the tool call ID.
|
||||
|
||||
This annotation is used to mark a tool parameter that should receive
|
||||
the tool call ID at runtime.
|
||||
|
||||
Example:
|
||||
..code-block:: python
|
||||
```python
|
||||
from typing_extensions import Annotated
|
||||
from langchain_core.messages import ToolMessage
|
||||
from langchain_core.tools import tool, InjectedToolCallId
|
||||
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
from langchain_core.tools import tool, InjectedToolCallID
|
||||
|
||||
@tool
|
||||
def foo(x: int, tool_call_id: Annotated[str, InjectedToolCallID]) -> ToolMessage:
|
||||
\"\"\"Return x.\"\"\"
|
||||
return ToolMessage(str(x), artifact=x, name="foo", tool_call_id=tool_call_id)
|
||||
""" # noqa: E501
|
||||
@tool
|
||||
def foo(
|
||||
x: int, tool_call_id: Annotated[str, InjectedToolCallId]
|
||||
) -> ToolMessage:
|
||||
\"\"\"Return x.\"\"\"
|
||||
return ToolMessage(
|
||||
str(x),
|
||||
artifact=x,
|
||||
name="foo",
|
||||
tool_call_id=tool_call_id
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
def _is_injected_arg_type(
|
||||
type_: type, injected_type: Optional[type[InjectedToolArg]] = None
|
||||
) -> bool:
|
||||
"""Check if a type annotation indicates an injected argument.
|
||||
|
||||
Args:
|
||||
type_: The type annotation to check.
|
||||
injected_type: The specific injected type to check for.
|
||||
|
||||
Returns:
|
||||
True if the type is an injected argument, False otherwise.
|
||||
"""
|
||||
injected_type = injected_type or InjectedToolArg
|
||||
return any(
|
||||
isinstance(arg, injected_type)
|
||||
@@ -1135,6 +1372,16 @@ def _replace_type_vars(
|
||||
*,
|
||||
default_to_bound: bool = True,
|
||||
) -> type:
|
||||
"""Replace TypeVars in a type annotation with concrete types.
|
||||
|
||||
Args:
|
||||
type_: The type annotation to process.
|
||||
generic_map: Mapping of TypeVars to concrete types.
|
||||
default_to_bound: Whether to use TypeVar bounds as defaults.
|
||||
|
||||
Returns:
|
||||
The type with TypeVars replaced.
|
||||
"""
|
||||
generic_map = generic_map or {}
|
||||
if isinstance(type_, TypeVar):
|
||||
if type_ in generic_map:
|
||||
@@ -1152,8 +1399,16 @@ def _replace_type_vars(
|
||||
|
||||
|
||||
class BaseToolkit(BaseModel, ABC):
|
||||
"""Base Toolkit representing a collection of related tools."""
|
||||
"""Base class for toolkits containing related tools.
|
||||
|
||||
A toolkit is a collection of related tools that can be used together
|
||||
to accomplish a specific task or work with a particular system.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_tools(self) -> list[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
"""Get all tools in the toolkit.
|
||||
|
||||
Returns:
|
||||
List of tools contained in this toolkit.
|
||||
"""
|
||||
|
||||
@@ -95,6 +95,7 @@ class LangChainTracer(BaseTracer):
|
||||
self.client = client or get_client()
|
||||
self.tags = tags or []
|
||||
self.latest_run: Optional[Run] = None
|
||||
self.run_has_token_event_map: dict[str, bool] = {}
|
||||
|
||||
def _start_trace(self, run: Run) -> None:
|
||||
if self.project_name:
|
||||
@@ -235,6 +236,11 @@ class LangChainTracer(BaseTracer):
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
) -> Run:
|
||||
"""Append token event to LLM run and return the run."""
|
||||
run_id_str = str(run_id)
|
||||
if run_id_str not in self.run_has_token_event_map:
|
||||
self.run_has_token_event_map[run_id_str] = True
|
||||
else:
|
||||
return self._get_run(run_id, run_type={"llm", "chat_model"})
|
||||
return super()._llm_run_with_token_event(
|
||||
# Drop the chunk; we don't need to save it
|
||||
token,
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
import warnings
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Optional
|
||||
from uuid import UUID
|
||||
|
||||
@@ -32,7 +32,7 @@ def RunTypeEnum() -> type[RunTypeEnumDep]: # noqa: N802
|
||||
class TracerSessionV1Base(BaseModelV1):
|
||||
"""Base class for TracerSessionV1."""
|
||||
|
||||
start_time: datetime.datetime = FieldV1(default_factory=datetime.datetime.utcnow)
|
||||
start_time: datetime = FieldV1(default_factory=lambda: datetime.now(timezone.utc))
|
||||
name: Optional[str] = None
|
||||
extra: Optional[dict[str, Any]] = None
|
||||
|
||||
@@ -69,8 +69,8 @@ class BaseRun(BaseModelV1):
|
||||
|
||||
uuid: str
|
||||
parent_uuid: Optional[str] = None
|
||||
start_time: datetime.datetime = FieldV1(default_factory=datetime.datetime.utcnow)
|
||||
end_time: datetime.datetime = FieldV1(default_factory=datetime.datetime.utcnow)
|
||||
start_time: datetime = FieldV1(default_factory=lambda: datetime.now(timezone.utc))
|
||||
end_time: datetime = FieldV1(default_factory=lambda: datetime.now(timezone.utc))
|
||||
extra: Optional[dict[str, Any]] = None
|
||||
execution_order: int
|
||||
child_execution_order: int
|
||||
|
||||
@@ -634,7 +634,7 @@ def tool_example_to_messages(
|
||||
1) HumanMessage: contains the content from which content should be extracted.
|
||||
2) AIMessage: contains the extracted information from the model
|
||||
3) ToolMessage: contains confirmation to the model that the model requested a tool
|
||||
correctly.
|
||||
correctly.
|
||||
|
||||
If `ai_response` is specified, there will be a final AIMessage with that response.
|
||||
|
||||
@@ -668,7 +668,7 @@ def tool_example_to_messages(
|
||||
..., description="The color of the person's hair if known"
|
||||
)
|
||||
height_in_meters: Optional[str] = Field(
|
||||
..., description="Height in METERs"
|
||||
..., description="Height in METERS"
|
||||
)
|
||||
|
||||
examples = [
|
||||
|
||||
@@ -21,9 +21,12 @@ from typing import (
|
||||
|
||||
import pydantic
|
||||
from packaging import version
|
||||
from pydantic import (
|
||||
|
||||
# root_validator is deprecated but we need it for backward compatibility of @pre_init
|
||||
from pydantic import ( # type: ignore[deprecated]
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
PydanticDeprecationWarning,
|
||||
RootModel,
|
||||
root_validator,
|
||||
@@ -38,29 +41,23 @@ from pydantic.json_schema import (
|
||||
JsonSchemaMode,
|
||||
JsonSchemaValue,
|
||||
)
|
||||
from typing_extensions import override
|
||||
from pydantic.v1 import BaseModel as BaseModelV1
|
||||
from pydantic.v1 import create_model as create_model_v1
|
||||
from pydantic.v1.fields import ModelField
|
||||
from typing_extensions import deprecated, override
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pydantic_core import core_schema
|
||||
|
||||
try:
|
||||
import pydantic
|
||||
|
||||
PYDANTIC_VERSION = version.parse(pydantic.__version__)
|
||||
except ImportError:
|
||||
PYDANTIC_VERSION = version.parse("0.0.0")
|
||||
PYDANTIC_VERSION = version.parse(pydantic.__version__)
|
||||
|
||||
|
||||
@deprecated("Use PYDANTIC_VERSION.major instead.")
|
||||
def get_pydantic_major_version() -> int:
|
||||
"""DEPRECATED - Get the major version of Pydantic.
|
||||
|
||||
Use PYDANTIC_VERSION.major instead.
|
||||
"""
|
||||
warnings.warn(
|
||||
"get_pydantic_major_version is deprecated. Use PYDANTIC_VERSION.major instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return PYDANTIC_VERSION.major
|
||||
|
||||
|
||||
@@ -70,43 +67,20 @@ PYDANTIC_MINOR_VERSION = PYDANTIC_VERSION.minor
|
||||
IS_PYDANTIC_V1 = PYDANTIC_VERSION.major == 1
|
||||
IS_PYDANTIC_V2 = PYDANTIC_VERSION.major == 2
|
||||
|
||||
if IS_PYDANTIC_V1:
|
||||
from pydantic.fields import FieldInfo as FieldInfoV1
|
||||
|
||||
PydanticBaseModel = pydantic.BaseModel
|
||||
TypeBaseModel = type[BaseModel]
|
||||
elif IS_PYDANTIC_V2:
|
||||
from pydantic.v1.fields import FieldInfo as FieldInfoV1 # type: ignore[assignment]
|
||||
from pydantic.v1.fields import ModelField
|
||||
|
||||
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
|
||||
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc]
|
||||
TypeBaseModel = Union[type[BaseModel], type[pydantic.BaseModel]] # type: ignore[misc]
|
||||
else:
|
||||
msg = f"Unsupported Pydantic version: {PYDANTIC_VERSION.major}"
|
||||
raise ValueError(msg)
|
||||
|
||||
PydanticBaseModel = BaseModel
|
||||
TypeBaseModel = type[BaseModel]
|
||||
|
||||
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
|
||||
|
||||
|
||||
def is_pydantic_v1_subclass(cls: type) -> bool:
|
||||
"""Check if the installed Pydantic version is 1.x-like."""
|
||||
if IS_PYDANTIC_V1:
|
||||
return True
|
||||
if IS_PYDANTIC_V2:
|
||||
from pydantic.v1 import BaseModel as BaseModelV1
|
||||
|
||||
if issubclass(cls, BaseModelV1):
|
||||
return True
|
||||
return False
|
||||
return issubclass(cls, BaseModelV1)
|
||||
|
||||
|
||||
def is_pydantic_v2_subclass(cls: type) -> bool:
|
||||
"""Check if the installed Pydantic version is 1.x-like."""
|
||||
from pydantic import BaseModel
|
||||
|
||||
return IS_PYDANTIC_V2 and issubclass(cls, BaseModel)
|
||||
return issubclass(cls, BaseModel)
|
||||
|
||||
|
||||
def is_basemodel_subclass(cls: type) -> bool:
|
||||
@@ -114,7 +88,6 @@ def is_basemodel_subclass(cls: type) -> bool:
|
||||
|
||||
Check if the given class is a subclass of any of the following:
|
||||
|
||||
* pydantic.BaseModel in Pydantic 1.x
|
||||
* pydantic.BaseModel in Pydantic 2.x
|
||||
* pydantic.v1.BaseModel in Pydantic 2.x
|
||||
"""
|
||||
@@ -122,24 +95,7 @@ def is_basemodel_subclass(cls: type) -> bool:
|
||||
if not inspect.isclass(cls) or isinstance(cls, GenericAlias):
|
||||
return False
|
||||
|
||||
if IS_PYDANTIC_V1:
|
||||
from pydantic import BaseModel as BaseModelV1Proper
|
||||
|
||||
if issubclass(cls, BaseModelV1Proper):
|
||||
return True
|
||||
elif IS_PYDANTIC_V2:
|
||||
from pydantic import BaseModel as BaseModelV2
|
||||
from pydantic.v1 import BaseModel as BaseModelV1
|
||||
|
||||
if issubclass(cls, BaseModelV2):
|
||||
return True
|
||||
|
||||
if issubclass(cls, BaseModelV1):
|
||||
return True
|
||||
else:
|
||||
msg = f"Unsupported Pydantic version: {PYDANTIC_VERSION.major}"
|
||||
raise ValueError(msg)
|
||||
return False
|
||||
return issubclass(cls, (BaseModel, BaseModelV1))
|
||||
|
||||
|
||||
def is_basemodel_instance(obj: Any) -> bool:
|
||||
@@ -147,28 +103,10 @@ def is_basemodel_instance(obj: Any) -> bool:
|
||||
|
||||
Check if the given class is an instance of any of the following:
|
||||
|
||||
* pydantic.BaseModel in Pydantic 1.x
|
||||
* pydantic.BaseModel in Pydantic 2.x
|
||||
* pydantic.v1.BaseModel in Pydantic 2.x
|
||||
"""
|
||||
if IS_PYDANTIC_V1:
|
||||
from pydantic import BaseModel as BaseModelV1Proper
|
||||
|
||||
if isinstance(obj, BaseModelV1Proper):
|
||||
return True
|
||||
elif IS_PYDANTIC_V2:
|
||||
from pydantic import BaseModel as BaseModelV2
|
||||
from pydantic.v1 import BaseModel as BaseModelV1
|
||||
|
||||
if isinstance(obj, BaseModelV2):
|
||||
return True
|
||||
|
||||
if isinstance(obj, BaseModelV1):
|
||||
return True
|
||||
else:
|
||||
msg = f"Unsupported Pydantic version: {PYDANTIC_VERSION.major}"
|
||||
raise ValueError(msg)
|
||||
return False
|
||||
return isinstance(obj, (BaseModel, BaseModelV1))
|
||||
|
||||
|
||||
# How to type hint this?
|
||||
@@ -184,6 +122,9 @@ def pre_init(func: Callable) -> Any:
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings(action="ignore", category=PydanticDeprecationWarning)
|
||||
|
||||
# Ideally we would use @model_validator(mode="before") but this would change the
|
||||
# order of the validators. See https://github.com/pydantic/pydantic/discussions/7434.
|
||||
# So we keep root_validator for backward compatibility.
|
||||
@root_validator(pre=True)
|
||||
@wraps(func)
|
||||
def wrapper(cls: type[BaseModel], values: dict[str, Any]) -> dict[str, Any]:
|
||||
@@ -244,26 +185,18 @@ class _IgnoreUnserializable(GenerateJsonSchema):
|
||||
|
||||
def _create_subset_model_v1(
|
||||
name: str,
|
||||
model: type[BaseModel],
|
||||
model: type[BaseModelV1],
|
||||
field_names: list,
|
||||
*,
|
||||
descriptions: Optional[dict] = None,
|
||||
fn_description: Optional[str] = None,
|
||||
) -> type[BaseModel]:
|
||||
"""Create a pydantic model with only a subset of model's fields."""
|
||||
if IS_PYDANTIC_V1:
|
||||
from pydantic import create_model
|
||||
elif IS_PYDANTIC_V2:
|
||||
from pydantic.v1 import create_model # type: ignore[no-redef]
|
||||
else:
|
||||
msg = f"Unsupported pydantic version: {PYDANTIC_VERSION.major}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
fields = {}
|
||||
|
||||
for field_name in field_names:
|
||||
# Using pydantic v1 so can access __fields__ as a dict.
|
||||
field = model.__fields__[field_name] # type: ignore[index]
|
||||
field = model.__fields__[field_name]
|
||||
t = (
|
||||
# this isn't perfect but should work for most functions
|
||||
field.outer_type_
|
||||
@@ -274,34 +207,31 @@ def _create_subset_model_v1(
|
||||
field.field_info.description = descriptions[field_name]
|
||||
fields[field_name] = (t, field.field_info)
|
||||
|
||||
rtn = create_model(name, **fields) # type: ignore[call-overload]
|
||||
rtn = create_model_v1(name, **fields) # type: ignore[call-overload]
|
||||
rtn.__doc__ = textwrap.dedent(fn_description or model.__doc__ or "")
|
||||
return rtn
|
||||
|
||||
|
||||
def _create_subset_model_v2(
|
||||
name: str,
|
||||
model: type[pydantic.BaseModel],
|
||||
model: type[BaseModel],
|
||||
field_names: list[str],
|
||||
*,
|
||||
descriptions: Optional[dict] = None,
|
||||
fn_description: Optional[str] = None,
|
||||
) -> type[pydantic.BaseModel]:
|
||||
) -> type[BaseModel]:
|
||||
"""Create a pydantic model with a subset of the model fields."""
|
||||
from pydantic import create_model
|
||||
from pydantic.fields import FieldInfo
|
||||
|
||||
descriptions_ = descriptions or {}
|
||||
fields = {}
|
||||
for field_name in field_names:
|
||||
field = model.model_fields[field_name]
|
||||
description = descriptions_.get(field_name, field.description)
|
||||
field_info = FieldInfo(description=description, default=field.default)
|
||||
field_info = FieldInfoV2(description=description, default=field.default)
|
||||
if field.metadata:
|
||||
field_info.metadata = field.metadata
|
||||
fields[field_name] = (field.annotation, field_info)
|
||||
|
||||
rtn = create_model( # type: ignore[call-overload]
|
||||
rtn = _create_model_base( # type: ignore[call-overload]
|
||||
name, **fields, __config__=ConfigDict(arbitrary_types_allowed=True)
|
||||
)
|
||||
|
||||
@@ -322,7 +252,7 @@ def _create_subset_model_v2(
|
||||
|
||||
# Private functionality to create a subset model that's compatible across
|
||||
# different versions of pydantic.
|
||||
# Handles pydantic versions 1.x and 2.x. including v1 of pydantic in 2.x.
|
||||
# Handles pydantic versions 2.x. including v1 of pydantic in 2.x.
|
||||
# However, can't find a way to type hint this.
|
||||
def _create_subset_model(
|
||||
name: str,
|
||||
@@ -333,7 +263,7 @@ def _create_subset_model(
|
||||
fn_description: Optional[str] = None,
|
||||
) -> type[BaseModel]:
|
||||
"""Create subset model using the same pydantic version as the input model."""
|
||||
if IS_PYDANTIC_V1:
|
||||
if issubclass(model, BaseModelV1):
|
||||
return _create_subset_model_v1(
|
||||
name,
|
||||
model,
|
||||
@@ -341,68 +271,43 @@ def _create_subset_model(
|
||||
descriptions=descriptions,
|
||||
fn_description=fn_description,
|
||||
)
|
||||
if IS_PYDANTIC_V2:
|
||||
from pydantic.v1 import BaseModel as BaseModelV1
|
||||
|
||||
if issubclass(model, BaseModelV1):
|
||||
return _create_subset_model_v1(
|
||||
name,
|
||||
model,
|
||||
field_names,
|
||||
descriptions=descriptions,
|
||||
fn_description=fn_description,
|
||||
)
|
||||
return _create_subset_model_v2(
|
||||
name,
|
||||
model,
|
||||
field_names,
|
||||
descriptions=descriptions,
|
||||
fn_description=fn_description,
|
||||
)
|
||||
msg = f"Unsupported pydantic version: {PYDANTIC_VERSION.major}"
|
||||
raise NotImplementedError(msg)
|
||||
return _create_subset_model_v2(
|
||||
name,
|
||||
model,
|
||||
field_names,
|
||||
descriptions=descriptions,
|
||||
fn_description=fn_description,
|
||||
)
|
||||
|
||||
|
||||
if IS_PYDANTIC_V2:
|
||||
from pydantic import BaseModel as BaseModelV2
|
||||
from pydantic.v1 import BaseModel as BaseModelV1
|
||||
@overload
|
||||
def get_fields(model: type[BaseModel]) -> dict[str, FieldInfoV2]: ...
|
||||
|
||||
@overload
|
||||
def get_fields(model: type[BaseModelV2]) -> dict[str, FieldInfoV2]: ...
|
||||
|
||||
@overload
|
||||
def get_fields(model: BaseModelV2) -> dict[str, FieldInfoV2]: ...
|
||||
@overload
|
||||
def get_fields(model: BaseModel) -> dict[str, FieldInfoV2]: ...
|
||||
|
||||
@overload
|
||||
def get_fields(model: type[BaseModelV1]) -> dict[str, ModelField]: ...
|
||||
|
||||
@overload
|
||||
def get_fields(model: BaseModelV1) -> dict[str, ModelField]: ...
|
||||
@overload
|
||||
def get_fields(model: type[BaseModelV1]) -> dict[str, ModelField]: ...
|
||||
|
||||
def get_fields(
|
||||
model: Union[type[Union[BaseModelV2, BaseModelV1]], BaseModelV2, BaseModelV1],
|
||||
) -> Union[dict[str, FieldInfoV2], dict[str, ModelField]]:
|
||||
"""Get the field names of a Pydantic model."""
|
||||
if hasattr(model, "model_fields"):
|
||||
return model.model_fields
|
||||
|
||||
if hasattr(model, "__fields__"):
|
||||
return model.__fields__
|
||||
msg = f"Expected a Pydantic model. Got {type(model)}"
|
||||
raise TypeError(msg)
|
||||
@overload
|
||||
def get_fields(model: BaseModelV1) -> dict[str, ModelField]: ...
|
||||
|
||||
elif IS_PYDANTIC_V1:
|
||||
from pydantic import BaseModel as BaseModelV1_
|
||||
|
||||
def get_fields( # type: ignore[no-redef]
|
||||
model: Union[type[BaseModelV1_], BaseModelV1_],
|
||||
) -> dict[str, FieldInfoV1]:
|
||||
"""Get the field names of a Pydantic model."""
|
||||
return model.__fields__ # type: ignore[return-value]
|
||||
def get_fields(
|
||||
model: Union[type[Union[BaseModel, BaseModelV1]], BaseModel, BaseModelV1],
|
||||
) -> Union[dict[str, FieldInfoV2], dict[str, ModelField]]:
|
||||
"""Get the field names of a Pydantic model."""
|
||||
if hasattr(model, "model_fields"):
|
||||
return model.model_fields
|
||||
|
||||
if hasattr(model, "__fields__"):
|
||||
return model.__fields__
|
||||
msg = f"Expected a Pydantic model. Got {type(model)}"
|
||||
raise TypeError(msg)
|
||||
|
||||
else:
|
||||
msg = f"Unsupported Pydantic version: {PYDANTIC_VERSION.major}"
|
||||
raise ValueError(msg)
|
||||
|
||||
_SchemaConfig = ConfigDict(
|
||||
arbitrary_types_allowed=True, frozen=True, protected_namespaces=()
|
||||
@@ -546,14 +451,11 @@ _RESERVED_NAMES = {key for key in dir(BaseModel) if not key.startswith("_")}
|
||||
|
||||
def _remap_field_definitions(field_definitions: dict[str, Any]) -> dict[str, Any]:
|
||||
"""This remaps fields to avoid colliding with internal pydantic fields."""
|
||||
from pydantic import Field
|
||||
from pydantic.fields import FieldInfo
|
||||
|
||||
remapped = {}
|
||||
for key, value in field_definitions.items():
|
||||
if key.startswith("_") or key in _RESERVED_NAMES:
|
||||
# Let's add a prefix to avoid colliding with internal pydantic fields
|
||||
if isinstance(value, FieldInfo):
|
||||
if isinstance(value, FieldInfoV2):
|
||||
msg = (
|
||||
f"Remapping for fields starting with '_' or fields with a name "
|
||||
f"matching a reserved name {_RESERVED_NAMES} is not supported if "
|
||||
|
||||
@@ -42,7 +42,7 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
|
||||
raise ImportError(msg) from e
|
||||
|
||||
if len(x) == 0 or len(y) == 0:
|
||||
return np.array([])
|
||||
return np.array([[]])
|
||||
|
||||
x = np.array(x)
|
||||
y = np.array(y)
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""langchain-core version information and utilities."""
|
||||
|
||||
VERSION = "0.3.64"
|
||||
VERSION = "0.3.66"
|
||||
|
||||
@@ -7,7 +7,7 @@ authors = []
|
||||
license = {text = "MIT"}
|
||||
requires-python = ">=3.9"
|
||||
dependencies = [
|
||||
"langsmith<0.4,>=0.3.45",
|
||||
"langsmith>=0.3.45",
|
||||
"tenacity!=8.4.0,<10.0.0,>=8.1.0",
|
||||
"jsonpatch<2.0,>=1.33",
|
||||
"PyYAML>=5.3",
|
||||
@@ -16,7 +16,7 @@ dependencies = [
|
||||
"pydantic>=2.7.4",
|
||||
]
|
||||
name = "langchain-core"
|
||||
version = "0.3.64"
|
||||
version = "0.3.66"
|
||||
description = "Building applications with LLMs through composability"
|
||||
readme = "README.md"
|
||||
|
||||
@@ -69,7 +69,6 @@ langchain-text-splitters = { path = "../text-splitters" }
|
||||
strict = "True"
|
||||
strict_bytes = "True"
|
||||
enable_error_code = "deprecated"
|
||||
report_deprecated_as_note = "True"
|
||||
|
||||
# TODO: activate for 'strict' checking
|
||||
disallow_any_generics = "False"
|
||||
|
||||
@@ -1,50 +1,65 @@
|
||||
import pytest
|
||||
from typing import Literal
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.indexing.api import _HashedDocument
|
||||
from langchain_core.indexing.api import _get_document_with_hash
|
||||
|
||||
|
||||
def test_hashed_document_hashing() -> None:
|
||||
hashed_document = _HashedDocument( # type: ignore[call-arg]
|
||||
document = Document(
|
||||
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
|
||||
)
|
||||
assert isinstance(hashed_document.hash_, str)
|
||||
|
||||
|
||||
def test_hashing_with_missing_content() -> None:
|
||||
"""Check that ValueError is raised if page_content is missing."""
|
||||
with pytest.raises(TypeError):
|
||||
_HashedDocument(
|
||||
metadata={"key": "value"},
|
||||
) # type: ignore[call-arg]
|
||||
|
||||
|
||||
def test_uid_auto_assigned_to_hash() -> None:
|
||||
"""Test uid is auto-assigned to the hashed_document hash."""
|
||||
hashed_document = _HashedDocument( # type: ignore[call-arg]
|
||||
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
|
||||
)
|
||||
assert hashed_document.uid == hashed_document.hash_
|
||||
hashed_document = _get_document_with_hash(document, key_encoder="sha1")
|
||||
assert isinstance(hashed_document.id, str)
|
||||
|
||||
|
||||
def test_to_document() -> None:
|
||||
"""Test to_document method."""
|
||||
hashed_document = _HashedDocument( # type: ignore[call-arg]
|
||||
original_doc = Document(
|
||||
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
|
||||
)
|
||||
doc = hashed_document.to_document()
|
||||
assert isinstance(doc, Document)
|
||||
assert doc.page_content == "Lorem ipsum dolor sit amet"
|
||||
assert doc.metadata == {"key": "value"}
|
||||
hashed_doc = _get_document_with_hash(original_doc, key_encoder="sha1")
|
||||
assert isinstance(hashed_doc, Document)
|
||||
assert hashed_doc is not original_doc
|
||||
assert hashed_doc.page_content == "Lorem ipsum dolor sit amet"
|
||||
assert hashed_doc.metadata["key"] == "value"
|
||||
|
||||
|
||||
def test_from_document() -> None:
|
||||
def test_hashing() -> None:
|
||||
"""Test from document class method."""
|
||||
document = Document(
|
||||
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
|
||||
)
|
||||
|
||||
hashed_document = _HashedDocument.from_document(document)
|
||||
hashed_document = _get_document_with_hash(document, key_encoder="sha1")
|
||||
# hash should be deterministic
|
||||
assert hashed_document.hash_ == "fd1dc827-051b-537d-a1fe-1fa043e8b276"
|
||||
assert hashed_document.uid == hashed_document.hash_
|
||||
assert hashed_document.id == "fd1dc827-051b-537d-a1fe-1fa043e8b276"
|
||||
|
||||
# Verify that hashing with sha1 is determinstic
|
||||
another_hashed_document = _get_document_with_hash(document, key_encoder="sha1")
|
||||
assert another_hashed_document.id == hashed_document.id
|
||||
|
||||
# Verify that the result is different from SHA256, SHA512, blake2b
|
||||
values: list[Literal["sha256", "sha512", "blake2b"]] = [
|
||||
"sha256",
|
||||
"sha512",
|
||||
"blake2b",
|
||||
]
|
||||
|
||||
for key_encoder in values:
|
||||
different_hashed_document = _get_document_with_hash(
|
||||
document, key_encoder=key_encoder
|
||||
)
|
||||
assert different_hashed_document.id != hashed_document.id
|
||||
|
||||
|
||||
def test_hashing_custom_key_encoder() -> None:
|
||||
"""Test hashing with a custom key encoder."""
|
||||
|
||||
def custom_key_encoder(doc: Document) -> str:
|
||||
return f"quack-{doc.metadata['key']}"
|
||||
|
||||
document = Document(
|
||||
page_content="Lorem ipsum dolor sit amet", metadata={"key": "like a duck"}
|
||||
)
|
||||
hashed_document = _get_document_with_hash(document, key_encoder=custom_key_encoder)
|
||||
assert hashed_document.id == "quack-like a duck"
|
||||
assert isinstance(hashed_document.id, str)
|
||||
|
||||
@@ -13,7 +13,11 @@ from langchain_core.document_loaders.base import BaseLoader
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.embeddings import DeterministicFakeEmbedding
|
||||
from langchain_core.indexing import InMemoryRecordManager, aindex, index
|
||||
from langchain_core.indexing.api import IndexingException, _abatch, _HashedDocument
|
||||
from langchain_core.indexing.api import (
|
||||
IndexingException,
|
||||
_abatch,
|
||||
_get_document_with_hash,
|
||||
)
|
||||
from langchain_core.indexing.in_memory import InMemoryDocumentIndex
|
||||
from langchain_core.vectorstores import InMemoryVectorStore, VectorStore
|
||||
|
||||
@@ -2222,7 +2226,7 @@ def test_indexing_custom_batch_size(
|
||||
metadata={"source": "1"},
|
||||
),
|
||||
]
|
||||
ids = [_HashedDocument.from_document(doc).uid for doc in docs]
|
||||
ids = [_get_document_with_hash(doc, key_encoder="sha256").id for doc in docs]
|
||||
|
||||
batch_size = 1
|
||||
|
||||
@@ -2232,7 +2236,13 @@ def test_indexing_custom_batch_size(
|
||||
mock_add_documents = MagicMock()
|
||||
vector_store.add_documents = mock_add_documents # type: ignore[method-assign]
|
||||
|
||||
index(docs, record_manager, vector_store, batch_size=batch_size)
|
||||
index(
|
||||
docs,
|
||||
record_manager,
|
||||
vector_store,
|
||||
batch_size=batch_size,
|
||||
key_encoder="sha256",
|
||||
)
|
||||
args, kwargs = mock_add_documents.call_args
|
||||
doc_with_id = Document(
|
||||
id=ids[0], page_content="This is a test document.", metadata={"source": "1"}
|
||||
@@ -2253,7 +2263,7 @@ async def test_aindexing_custom_batch_size(
|
||||
metadata={"source": "1"},
|
||||
),
|
||||
]
|
||||
ids = [_HashedDocument.from_document(doc).uid for doc in docs]
|
||||
ids = [_get_document_with_hash(doc, key_encoder="sha256").id for doc in docs]
|
||||
|
||||
batch_size = 1
|
||||
mock_add_documents = AsyncMock()
|
||||
@@ -2261,7 +2271,9 @@ async def test_aindexing_custom_batch_size(
|
||||
id=ids[0], page_content="This is a test document.", metadata={"source": "1"}
|
||||
)
|
||||
vector_store.aadd_documents = mock_add_documents # type: ignore[method-assign]
|
||||
await aindex(docs, arecord_manager, vector_store, batch_size=batch_size)
|
||||
await aindex(
|
||||
docs, arecord_manager, vector_store, batch_size=batch_size, key_encoder="sha256"
|
||||
)
|
||||
args, kwargs = mock_add_documents.call_args
|
||||
assert args == ([doc_with_id],)
|
||||
assert kwargs == {"ids": ids, "batch_size": batch_size}
|
||||
|
||||
@@ -3,7 +3,7 @@ from pydantic import BaseModel, ConfigDict, Field
|
||||
from langchain_core.load import Serializable, dumpd, load
|
||||
from langchain_core.load.serializable import _is_field_useful
|
||||
from langchain_core.messages import AIMessage
|
||||
from langchain_core.outputs import ChatGeneration
|
||||
from langchain_core.outputs import ChatGeneration, Generation
|
||||
|
||||
|
||||
class NonBoolObj:
|
||||
@@ -223,3 +223,8 @@ def test_serialization_with_pydantic() -> None:
|
||||
assert isinstance(deser, ChatGeneration)
|
||||
assert deser.message.content
|
||||
assert deser.message.additional_kwargs["parsed"] == my_model.model_dump()
|
||||
|
||||
|
||||
def test_serialization_with_generation() -> None:
|
||||
generation = Generation(text="hello-world")
|
||||
assert dumpd(generation)["kwargs"] == {"text": "hello-world", "type": "Generation"}
|
||||
|
||||
@@ -21,6 +21,7 @@ from langchain_core.messages.utils import (
|
||||
convert_to_openai_messages,
|
||||
count_tokens_approximately,
|
||||
filter_messages,
|
||||
get_buffer_string,
|
||||
merge_message_runs,
|
||||
trim_messages,
|
||||
)
|
||||
@@ -1395,3 +1396,64 @@ def test_count_tokens_approximately_mixed_content_types() -> None:
|
||||
|
||||
# Ensure that count is consistent if we do one message at a time
|
||||
assert sum(count_tokens_approximately([m]) for m in messages) == token_count
|
||||
|
||||
|
||||
def test_get_buffer_string_with_structured_content() -> None:
|
||||
"""Test get_buffer_string with structured content in messages."""
|
||||
messages = [
|
||||
HumanMessage(content=[{"type": "text", "text": "Hello, world!"}]),
|
||||
AIMessage(content=[{"type": "text", "text": "Hi there!"}]),
|
||||
SystemMessage(content=[{"type": "text", "text": "System message"}]),
|
||||
]
|
||||
expected = "Human: Hello, world!\nAI: Hi there!\nSystem: System message"
|
||||
actual = get_buffer_string(messages)
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def test_get_buffer_string_with_mixed_content() -> None:
|
||||
"""Test get_buffer_string with mixed content types in messages."""
|
||||
messages = [
|
||||
HumanMessage(content="Simple text"),
|
||||
AIMessage(content=[{"type": "text", "text": "Structured text"}]),
|
||||
SystemMessage(content=[{"type": "text", "text": "Another structured text"}]),
|
||||
]
|
||||
expected = (
|
||||
"Human: Simple text\nAI: Structured text\nSystem: Another structured text"
|
||||
)
|
||||
actual = get_buffer_string(messages)
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def test_get_buffer_string_with_function_call() -> None:
|
||||
"""Test get_buffer_string with function call in additional_kwargs."""
|
||||
messages = [
|
||||
HumanMessage(content="Hello"),
|
||||
AIMessage(
|
||||
content="Hi",
|
||||
additional_kwargs={
|
||||
"function_call": {
|
||||
"name": "test_function",
|
||||
"arguments": '{"arg": "value"}',
|
||||
}
|
||||
},
|
||||
),
|
||||
]
|
||||
# TODO: consider changing this
|
||||
expected = (
|
||||
"Human: Hello\n"
|
||||
"AI: Hi{'name': 'test_function', 'arguments': '{\"arg\": \"value\"}'}"
|
||||
)
|
||||
actual = get_buffer_string(messages)
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def test_get_buffer_string_with_empty_content() -> None:
|
||||
"""Test get_buffer_string with empty content in messages."""
|
||||
messages = [
|
||||
HumanMessage(content=[]),
|
||||
AIMessage(content=""),
|
||||
SystemMessage(content=[]),
|
||||
]
|
||||
expected = "Human: \nAI: \nSystem: "
|
||||
actual = get_buffer_string(messages)
|
||||
assert actual == expected
|
||||
|
||||
@@ -16,10 +16,6 @@ from langchain_core.output_parsers.openai_tools import (
|
||||
PydanticToolsParser,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration
|
||||
from langchain_core.utils.pydantic import (
|
||||
IS_PYDANTIC_V1,
|
||||
IS_PYDANTIC_V2,
|
||||
)
|
||||
|
||||
STREAMED_MESSAGES: list = [
|
||||
AIMessageChunk(content=""),
|
||||
@@ -532,7 +528,6 @@ async def test_partial_pydantic_output_parser_async() -> None:
|
||||
assert actual == EXPECTED_STREAMED_PYDANTIC
|
||||
|
||||
|
||||
@pytest.mark.skipif(not IS_PYDANTIC_V2, reason="This test is for pydantic 2")
|
||||
def test_parse_with_different_pydantic_2_v1() -> None:
|
||||
"""Test with pydantic.v1.BaseModel from pydantic 2."""
|
||||
import pydantic
|
||||
@@ -567,7 +562,6 @@ def test_parse_with_different_pydantic_2_v1() -> None:
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.skipif(not IS_PYDANTIC_V2, reason="This test is for pydantic 2")
|
||||
def test_parse_with_different_pydantic_2_proper() -> None:
|
||||
"""Test with pydantic.BaseModel from pydantic 2."""
|
||||
import pydantic
|
||||
@@ -602,41 +596,6 @@ def test_parse_with_different_pydantic_2_proper() -> None:
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.skipif(not IS_PYDANTIC_V1, reason="This test is for pydantic 1")
|
||||
def test_parse_with_different_pydantic_1_proper() -> None:
|
||||
"""Test with pydantic.BaseModel from pydantic 1."""
|
||||
import pydantic
|
||||
|
||||
class Forecast(pydantic.BaseModel):
|
||||
temperature: int
|
||||
forecast: str
|
||||
|
||||
# Can't get pydantic to work here due to the odd typing of tryig to support
|
||||
# both v1 and v2 in the same codebase.
|
||||
parser = PydanticToolsParser(tools=[Forecast])
|
||||
message = AIMessage(
|
||||
content="",
|
||||
tool_calls=[
|
||||
{
|
||||
"id": "call_OwL7f5PE",
|
||||
"name": "Forecast",
|
||||
"args": {"temperature": 20, "forecast": "Sunny"},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
generation = ChatGeneration(
|
||||
message=message,
|
||||
)
|
||||
|
||||
assert parser.parse_result([generation]) == [
|
||||
Forecast(
|
||||
temperature=20,
|
||||
forecast="Sunny",
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def test_max_tokens_error(caplog: Any) -> None:
|
||||
parser = PydanticToolsParser(tools=[NameCollector], first_tool_only=True)
|
||||
message = AIMessage(
|
||||
|
||||
@@ -702,6 +702,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
@@ -2132,6 +2134,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
|
||||
@@ -1105,6 +1105,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
|
||||
@@ -2650,6 +2650,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
@@ -4124,6 +4126,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
@@ -5629,6 +5633,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
@@ -7009,6 +7015,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
@@ -8525,6 +8533,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
@@ -9950,6 +9960,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
@@ -11374,6 +11386,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
@@ -12840,6 +12854,8 @@
|
||||
}
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
May also hold extra provider-specific keys.
|
||||
''',
|
||||
'properties': dict({
|
||||
'audio': dict({
|
||||
|
||||
@@ -553,7 +553,7 @@ def test_using_custom_config_specs() -> None:
|
||||
return store[(user_id, conversation_id)]
|
||||
|
||||
with_message_history = RunnableWithMessageHistory(
|
||||
runnable, # type: ignore[arg-type]
|
||||
runnable,
|
||||
get_session_history=get_session_history,
|
||||
input_messages_key="messages",
|
||||
history_messages_key="history",
|
||||
@@ -666,7 +666,7 @@ async def test_using_custom_config_specs_async() -> None:
|
||||
return store[(user_id, conversation_id)]
|
||||
|
||||
with_message_history = RunnableWithMessageHistory(
|
||||
runnable, # type: ignore[arg-type]
|
||||
runnable,
|
||||
get_session_history=get_session_history,
|
||||
input_messages_key="messages",
|
||||
history_messages_key="history",
|
||||
@@ -769,13 +769,13 @@ def test_ignore_session_id() -> None:
|
||||
|
||||
runnable = RunnableLambda(_fake_llm)
|
||||
history = InMemoryChatMessageHistory()
|
||||
with_message_history = RunnableWithMessageHistory(runnable, lambda: history) # type: ignore[arg-type]
|
||||
with_message_history = RunnableWithMessageHistory(runnable, lambda: history)
|
||||
_ = with_message_history.invoke("hello")
|
||||
_ = with_message_history.invoke("hello again")
|
||||
assert len(history.messages) == 4
|
||||
|
||||
|
||||
class _RunnableLambdaWithRaiseError(RunnableLambda):
|
||||
class _RunnableLambdaWithRaiseError(RunnableLambda[Input, Output]):
|
||||
from langchain_core.tracers.root_listeners import AsyncListener
|
||||
|
||||
def with_listeners(
|
||||
@@ -861,7 +861,7 @@ def test_get_output_messages_with_value_error() -> None:
|
||||
runnable = _RunnableLambdaWithRaiseError(lambda _: illegal_bool_message)
|
||||
store: dict = {}
|
||||
get_session_history = _get_get_session_history(store=store)
|
||||
with_history = RunnableWithMessageHistory(runnable, get_session_history)
|
||||
with_history = RunnableWithMessageHistory(runnable, get_session_history) # type: ignore[arg-type]
|
||||
config: RunnableConfig = {
|
||||
"configurable": {"session_id": "1", "message_history": get_session_history("1")}
|
||||
}
|
||||
@@ -876,8 +876,8 @@ def test_get_output_messages_with_value_error() -> None:
|
||||
with_history.bound.invoke([HumanMessage(content="hello")], config)
|
||||
|
||||
illegal_int_message = 123
|
||||
runnable = _RunnableLambdaWithRaiseError(lambda _: illegal_int_message)
|
||||
with_history = RunnableWithMessageHistory(runnable, get_session_history)
|
||||
runnable2 = _RunnableLambdaWithRaiseError(lambda _: illegal_int_message)
|
||||
with_history = RunnableWithMessageHistory(runnable2, get_session_history) # type: ignore[arg-type]
|
||||
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
|
||||
@@ -26,7 +26,6 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
from langchain_core.runnables import (
|
||||
ConfigurableField,
|
||||
Runnable,
|
||||
RunnableConfig,
|
||||
RunnableLambda,
|
||||
)
|
||||
@@ -1935,7 +1934,7 @@ async def test_runnable_with_message_history() -> None:
|
||||
)
|
||||
model = GenericFakeChatModel(messages=infinite_cycle)
|
||||
|
||||
chain: Runnable = prompt | model
|
||||
chain = prompt | model
|
||||
with_message_history = RunnableWithMessageHistory(
|
||||
chain,
|
||||
get_session_history=get_by_session_id,
|
||||
|
||||
@@ -1890,7 +1890,7 @@ async def test_runnable_with_message_history() -> None:
|
||||
)
|
||||
model = GenericFakeChatModel(messages=infinite_cycle)
|
||||
|
||||
chain: Runnable = prompt | model
|
||||
chain = prompt | model
|
||||
with_message_history = RunnableWithMessageHistory(
|
||||
chain,
|
||||
get_session_history=get_by_session_id,
|
||||
|
||||
@@ -65,8 +65,6 @@ from langchain_core.utils.function_calling import (
|
||||
convert_to_openai_tool,
|
||||
)
|
||||
from langchain_core.utils.pydantic import (
|
||||
IS_PYDANTIC_V1,
|
||||
IS_PYDANTIC_V2,
|
||||
_create_subset_model,
|
||||
create_model_v2,
|
||||
)
|
||||
@@ -79,9 +77,11 @@ def _get_tool_call_json_schema(tool: BaseTool) -> dict:
|
||||
if isinstance(tool_schema, dict):
|
||||
return tool_schema
|
||||
|
||||
if hasattr(tool_schema, "model_json_schema"):
|
||||
if issubclass(tool_schema, BaseModel):
|
||||
return tool_schema.model_json_schema()
|
||||
return tool_schema.schema()
|
||||
if issubclass(tool_schema, BaseModelV1):
|
||||
return tool_schema.schema()
|
||||
return {}
|
||||
|
||||
|
||||
def test_unnamed_decorator() -> None:
|
||||
@@ -1853,11 +1853,14 @@ def test_args_schema_as_pydantic(pydantic_model: Any) -> None:
|
||||
)
|
||||
|
||||
input_schema = tool.get_input_schema()
|
||||
input_json_schema = (
|
||||
input_schema.model_json_schema()
|
||||
if hasattr(input_schema, "model_json_schema")
|
||||
else input_schema.schema()
|
||||
)
|
||||
if issubclass(input_schema, BaseModel):
|
||||
input_json_schema = input_schema.model_json_schema()
|
||||
elif issubclass(input_schema, BaseModelV1):
|
||||
input_json_schema = input_schema.schema()
|
||||
else:
|
||||
msg = "Unknown input schema type"
|
||||
raise TypeError(msg)
|
||||
|
||||
assert input_json_schema == {
|
||||
"properties": {
|
||||
"a": {"title": "A", "type": "integer"},
|
||||
@@ -1943,12 +1946,14 @@ def test_structured_tool_with_different_pydantic_versions(pydantic_model: Any) -
|
||||
|
||||
assert foo_tool.invoke({"a": 5, "b": "hello"}) == "foo"
|
||||
|
||||
args_schema = cast("BaseModel", foo_tool.args_schema)
|
||||
args_json_schema = (
|
||||
args_schema.model_json_schema()
|
||||
if hasattr(args_schema, "model_json_schema")
|
||||
else args_schema.schema()
|
||||
)
|
||||
args_schema = cast("type[BaseModel]", foo_tool.args_schema)
|
||||
if issubclass(args_schema, BaseModel):
|
||||
args_json_schema = args_schema.model_json_schema()
|
||||
elif issubclass(args_schema, BaseModelV1):
|
||||
args_json_schema = args_schema.schema()
|
||||
else:
|
||||
msg = "Unknown input schema type"
|
||||
raise TypeError(msg)
|
||||
assert args_json_schema == {
|
||||
"properties": {
|
||||
"a": {"title": "A", "type": "integer"},
|
||||
@@ -1960,11 +1965,13 @@ def test_structured_tool_with_different_pydantic_versions(pydantic_model: Any) -
|
||||
}
|
||||
|
||||
input_schema = foo_tool.get_input_schema()
|
||||
input_json_schema = (
|
||||
input_schema.model_json_schema()
|
||||
if hasattr(input_schema, "model_json_schema")
|
||||
else input_schema.schema()
|
||||
)
|
||||
if issubclass(input_schema, BaseModel):
|
||||
input_json_schema = input_schema.model_json_schema()
|
||||
elif issubclass(input_schema, BaseModelV1):
|
||||
input_json_schema = input_schema.schema()
|
||||
else:
|
||||
msg = "Unknown input schema type"
|
||||
raise TypeError(msg)
|
||||
assert input_json_schema == {
|
||||
"properties": {
|
||||
"a": {"title": "A", "type": "integer"},
|
||||
@@ -2020,7 +2027,6 @@ def test__is_message_content_type(obj: Any, *, expected: bool) -> None:
|
||||
assert _is_message_content_type(obj) is expected
|
||||
|
||||
|
||||
@pytest.mark.skipif(not IS_PYDANTIC_V2, reason="Testing pydantic v2.")
|
||||
@pytest.mark.parametrize("use_v1_namespace", [True, False])
|
||||
def test__get_all_basemodel_annotations_v2(*, use_v1_namespace: bool) -> None:
|
||||
A = TypeVar("A")
|
||||
@@ -2089,63 +2095,6 @@ def test__get_all_basemodel_annotations_v2(*, use_v1_namespace: bool) -> None:
|
||||
assert actual == expected
|
||||
|
||||
|
||||
@pytest.mark.skipif(not IS_PYDANTIC_V1, reason="Testing pydantic v1.")
|
||||
def test__get_all_basemodel_annotations_v1() -> None:
|
||||
A = TypeVar("A")
|
||||
|
||||
class ModelA(BaseModel, Generic[A], extra="allow"):
|
||||
a: A
|
||||
|
||||
class ModelB(ModelA[str]):
|
||||
b: Annotated[ModelA[dict[str, Any]], "foo"]
|
||||
|
||||
class Mixin:
|
||||
def foo(self) -> str:
|
||||
return "foo"
|
||||
|
||||
class ModelC(Mixin, ModelB):
|
||||
c: dict
|
||||
|
||||
expected = {"a": str, "b": Annotated[ModelA[dict[str, Any]], "foo"], "c": dict}
|
||||
actual = get_all_basemodel_annotations(ModelC)
|
||||
assert actual == expected
|
||||
|
||||
expected = {"a": str, "b": Annotated[ModelA[dict[str, Any]], "foo"]}
|
||||
actual = get_all_basemodel_annotations(ModelB)
|
||||
assert actual == expected
|
||||
|
||||
expected = {"a": Any}
|
||||
actual = get_all_basemodel_annotations(ModelA)
|
||||
assert actual == expected
|
||||
|
||||
expected = {"a": int}
|
||||
actual = get_all_basemodel_annotations(ModelA[int])
|
||||
assert actual == expected
|
||||
|
||||
D = TypeVar("D", bound=Union[str, int])
|
||||
|
||||
class ModelD(ModelC, Generic[D]):
|
||||
d: Optional[D]
|
||||
|
||||
expected = {
|
||||
"a": str,
|
||||
"b": Annotated[ModelA[dict[str, Any]], "foo"],
|
||||
"c": dict,
|
||||
"d": Union[str, int, None],
|
||||
}
|
||||
actual = get_all_basemodel_annotations(ModelD)
|
||||
assert actual == expected
|
||||
|
||||
expected = {
|
||||
"a": str,
|
||||
"b": Annotated[ModelA[dict[str, Any]], "foo"],
|
||||
"c": dict,
|
||||
"d": Union[int, None],
|
||||
}
|
||||
actual = get_all_basemodel_annotations(ModelD[int])
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def test_get_all_basemodel_annotations_aliases() -> None:
|
||||
class CalculatorInput(BaseModel):
|
||||
a: int = Field(description="first number", alias="A")
|
||||
@@ -2226,7 +2175,6 @@ def test_create_retriever_tool() -> None:
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not IS_PYDANTIC_V2, reason="Testing pydantic v2.")
|
||||
def test_tool_args_schema_pydantic_v2_with_metadata() -> None:
|
||||
from pydantic import BaseModel as BaseModelV2
|
||||
from pydantic import Field as FieldV2
|
||||
|
||||
@@ -94,60 +94,54 @@ def test_log_lock() -> None:
|
||||
tracer.wait_for_futures()
|
||||
|
||||
|
||||
class LangChainProjectNameTest(unittest.TestCase):
|
||||
"""Test that the project name is set correctly for runs."""
|
||||
@pytest.mark.parametrize(
|
||||
("envvars", "expected_project_name"),
|
||||
[
|
||||
(
|
||||
{},
|
||||
"default",
|
||||
),
|
||||
(
|
||||
{"LANGCHAIN_SESSION": "old_timey_session"},
|
||||
"old_timey_session",
|
||||
),
|
||||
(
|
||||
{
|
||||
"LANGCHAIN_SESSION": "old_timey_session",
|
||||
"LANGCHAIN_PROJECT": "modern_session",
|
||||
},
|
||||
"modern_session",
|
||||
),
|
||||
],
|
||||
ids=[
|
||||
"default to 'default' when no project provided",
|
||||
"use session_name for legacy tracers",
|
||||
"use LANGCHAIN_PROJECT over SESSION_NAME",
|
||||
],
|
||||
)
|
||||
def test_correct_get_tracer_project(
|
||||
envvars: dict[str, str], expected_project_name: str
|
||||
) -> None:
|
||||
get_env_var.cache_clear()
|
||||
get_tracer_project.cache_clear()
|
||||
with pytest.MonkeyPatch.context() as mp:
|
||||
for k, v in envvars.items():
|
||||
mp.setenv(k, v)
|
||||
|
||||
class SetProperTracerProjectTestCase:
|
||||
def __init__(
|
||||
self, test_name: str, envvars: dict[str, str], expected_project_name: str
|
||||
):
|
||||
self.test_name = test_name
|
||||
self.envvars = envvars
|
||||
self.expected_project_name = expected_project_name
|
||||
client = unittest.mock.MagicMock(spec=Client)
|
||||
tracer = LangChainTracer(client=client)
|
||||
projects = []
|
||||
|
||||
def test_correct_get_tracer_project(self) -> None:
|
||||
cases = [
|
||||
self.SetProperTracerProjectTestCase(
|
||||
test_name="default to 'default' when no project provided",
|
||||
envvars={},
|
||||
expected_project_name="default",
|
||||
),
|
||||
self.SetProperTracerProjectTestCase(
|
||||
test_name="use session_name for legacy tracers",
|
||||
envvars={"LANGCHAIN_SESSION": "old_timey_session"},
|
||||
expected_project_name="old_timey_session",
|
||||
),
|
||||
self.SetProperTracerProjectTestCase(
|
||||
test_name="use LANGCHAIN_PROJECT over SESSION_NAME",
|
||||
envvars={
|
||||
"LANGCHAIN_SESSION": "old_timey_session",
|
||||
"LANGCHAIN_PROJECT": "modern_session",
|
||||
},
|
||||
expected_project_name="modern_session",
|
||||
),
|
||||
]
|
||||
def mock_create_run(**kwargs: Any) -> Any:
|
||||
projects.append(kwargs.get("session_name"))
|
||||
return unittest.mock.MagicMock()
|
||||
|
||||
for case in cases:
|
||||
get_env_var.cache_clear()
|
||||
get_tracer_project.cache_clear()
|
||||
with self.subTest(msg=case.test_name), pytest.MonkeyPatch.context() as mp:
|
||||
for k, v in case.envvars.items():
|
||||
mp.setenv(k, v)
|
||||
client.create_run = mock_create_run
|
||||
|
||||
client = unittest.mock.MagicMock(spec=Client)
|
||||
tracer = LangChainTracer(client=client)
|
||||
projects = []
|
||||
|
||||
def mock_create_run(**kwargs: Any) -> Any:
|
||||
projects.append(kwargs.get("session_name")) # noqa: B023
|
||||
return unittest.mock.MagicMock()
|
||||
|
||||
client.create_run = mock_create_run
|
||||
|
||||
tracer.on_llm_start(
|
||||
{"name": "example_1"},
|
||||
["foo"],
|
||||
run_id=UUID("9d878ab3-e5ca-4218-aef6-44cbdc90160a"),
|
||||
)
|
||||
tracer.wait_for_futures()
|
||||
assert projects == [case.expected_project_name]
|
||||
tracer.on_llm_start(
|
||||
{"name": "example_1"},
|
||||
["foo"],
|
||||
run_id=UUID("9d878ab3-e5ca-4218-aef6-44cbdc90160a"),
|
||||
)
|
||||
tracer.wait_for_futures()
|
||||
assert projects == [expected_project_name]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user