mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-19 13:25:35 +00:00
Compare commits
29 Commits
langchain-
...
cc/fix_mil
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ded43fe05 | ||
|
|
50484be330 | ||
|
|
9b82707ea6 | ||
|
|
505a2e8743 | ||
|
|
677408bfc9 | ||
|
|
883e90d06e | ||
|
|
2b08e9e265 | ||
|
|
ae4c0ed25a | ||
|
|
a34e650f8b | ||
|
|
1007a715a5 | ||
|
|
ca798bc6ea | ||
|
|
4fe8403bfb | ||
|
|
fe4f10047b | ||
|
|
a3bae56a48 | ||
|
|
a70b7a688e | ||
|
|
0c2ebe5f47 | ||
|
|
3d54784e6d | ||
|
|
9ab7a6df39 | ||
|
|
6b46b5e9ce | ||
|
|
109a70fc64 | ||
|
|
86ee4f0daa | ||
|
|
93d0ad97fe | ||
|
|
3dfd055411 | ||
|
|
90559fde70 | ||
|
|
e8a8286012 | ||
|
|
2ae718796e | ||
|
|
74749c909d | ||
|
|
cf38981bb7 | ||
|
|
b483bf5095 |
@@ -71,13 +71,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")"
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -95,19 +95,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I said \"J\\'adore la programmation,\" which means \"I love programming\" in French.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I said \"J'adore la programmation,\" which means \"I love programming\" in French.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@@ -115,23 +111,25 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"messages\"),\n",
|
||||
" (\"placeholder\", \"{messages}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
"ai_msg = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French: I love programming.\"\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"Translate this sentence from English to French: I love programming.\",\n",
|
||||
" ),\n",
|
||||
" AIMessage(content=\"J'adore la programmation.\"),\n",
|
||||
" HumanMessage(content=\"What did you just say?\"),\n",
|
||||
" (\"ai\", \"J'adore la programmation.\"),\n",
|
||||
" (\"human\", \"What did you just say?\"),\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
")"
|
||||
")\n",
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -193,7 +191,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.')"
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -250,7 +248,7 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
@@ -304,10 +302,17 @@
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run dc4e2f79-4bcd-4a36-9506-55ace9040588 not found for run 34b5773e-3ced-46a6-8daf-4d464c15c940. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The translation of \"I love programming\" in French is \"J\\'adore la programmation.\"')"
|
||||
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -327,10 +332,17 @@
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run cc14b9d8-c59e-40db-a523-d6ab3fc2fa4f not found for run 5b75e25c-131e-46ee-9982-68569db04330. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.')"
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -354,12 +366,12 @@
|
||||
"\n",
|
||||
"### Trimming messages\n",
|
||||
"\n",
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is to only load and store the most recent `n` messages. Let's use an example history with some preloaded messages:"
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the historic messages before passing them to the model. Let's use an example history with some preloaded messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -371,7 +383,7 @@
|
||||
" AIMessage(content='Fine thanks!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -396,34 +408,28 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 7ff2d8ec-65e2-4f67-8961-e498e2c4a591 not found for run 3881e990-6596-4326-84f6-2b76949e0657. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Nemo.')"
|
||||
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain_with_message_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
@@ -443,34 +449,33 @@
|
||||
"source": [
|
||||
"We can see the chain remembers the preloaded name.\n",
|
||||
"\n",
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the `clear` method to remove messages and re-add them to the history. We don't have to, but let's put this method at the front of our chain to ensure it's always called:"
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the built in [trim_messages](/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import trim_messages\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def trim_messages(chain_input):\n",
|
||||
" stored_messages = demo_ephemeral_chat_history.messages\n",
|
||||
" if len(stored_messages) <= 2:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
" demo_ephemeral_chat_history.clear()\n",
|
||||
"\n",
|
||||
" for message in stored_messages[-2:]:\n",
|
||||
" demo_ephemeral_chat_history.add_message(message)\n",
|
||||
"\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"trimmer = trim_messages(strategy=\"last\", max_tokens=2, token_counter=len)\n",
|
||||
"\n",
|
||||
"chain_with_trimming = (\n",
|
||||
" RunnablePassthrough.assign(messages_trimmed=trim_messages)\n",
|
||||
" | chain_with_message_history\n",
|
||||
" RunnablePassthrough.assign(chat_history=itemgetter(\"chat_history\") | trimmer)\n",
|
||||
" | prompt\n",
|
||||
" | chat\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain_with_trimmed_history = RunnableWithMessageHistory(\n",
|
||||
" chain_with_trimming,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -483,22 +488,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 775cde65-8d22-4c44-80bb-f0b9811c32ca not found for run 5cf71d0e-4663-41cd-8dbe-e9752689cfac. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\")"
|
||||
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimming.invoke(\n",
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"Where does P. Sherman live?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
@@ -506,19 +518,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.'),\n",
|
||||
"[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n",
|
||||
" AIMessage(content='Hello!'),\n",
|
||||
" HumanMessage(content='How are you today?'),\n",
|
||||
" AIMessage(content='Fine thanks!'),\n",
|
||||
" HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
|
||||
" HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\")]"
|
||||
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -536,48 +552,39 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run fde7123f-6fd3-421a-a3fc-2fb37dead119 not found for run 061a4563-2394-470d-a3ed-9bf1388ca431. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm sorry, I don't have access to your personal information.\")"
|
||||
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimming.invoke(\n",
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"What is my name?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\"),\n",
|
||||
" HumanMessage(content='What is my name?'),\n",
|
||||
" AIMessage(content=\"I'm sorry, I don't have access to your personal information.\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"demo_ephemeral_chat_history.messages"
|
||||
"Check out our [how to guide on trimming messages](/docs/how_to/trim_messages/) for more."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -638,7 +645,7 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"user\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
@@ -672,7 +679,7 @@
|
||||
" return False\n",
|
||||
" summarization_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" \"Distill the above chat messages into a single summary message. Include as many specific details as you can.\",\n",
|
||||
@@ -772,9 +779,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
429
docs/docs/integrations/chat/databricks.ipynb
Normal file
429
docs/docs/integrations/chat/databricks.ipynb
Normal file
@@ -0,0 +1,429 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Databricks\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatDatabricks\n",
|
||||
"\n",
|
||||
"> [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform. \n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with Databricks [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatDatabricks features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.databricks.ChatDatabricks.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"`ChatDatabricks` class wraps a chat model endpoint hosted on [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html). This example notebook shows how to wrap your serving endpoint and use it as a chat model in your LangChain application.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatDatabricks](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.databricks.ChatDatabricks.html) | [langchain-community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"### Supported Methods\n",
|
||||
"\n",
|
||||
"`ChatDatabricks` supports all methods of `ChatModel` including async APIs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Endpoint Requirement\n",
|
||||
"\n",
|
||||
"The serving endpoint `ChatDatabricks` wraps must have OpenAI-compatible chat input/output format ([reference](https://mlflow.org/docs/latest/llms/deployments/index.html#chat)). As long as the input format is compatible, `ChatDatabricks` can be used for any endpoint type hosted on [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html):\n",
|
||||
"\n",
|
||||
"1. Foundation Models - Curated list of state-of-the-art foundation models such as DRBX, Llama3, Mixtral-8x7B, and etc. These endpoint are ready to use in your Databricks workspace without any set up.\n",
|
||||
"2. Custom Models - You can also deploy custom models to a serving endpoint via MLflow with\n",
|
||||
"your choice of framework such as LangChain, Pytorch, Transformers, etc.\n",
|
||||
"3. External Models - Databricks endpoints can serve models that are hosted outside Databricks as a proxy, such as proprietary model service like OpenAI GPT4.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "plaintext"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Databricks models you'll need to create a Databricks account, set up credentials (only if you are outside Databricks workspace), and install required packages.\n",
|
||||
"\n",
|
||||
"### Credentials (only if you are outside Databricks)\n",
|
||||
"\n",
|
||||
"If you are running LangChain app inside Databricks, you can skip this step.\n",
|
||||
"\n",
|
||||
"Otherwise, you need manually set the Databricks workspace hostname and personal access token to `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables, respectively. See [Authentication Documentation](https://docs.databricks.com/en/dev-tools/auth/index.html#databricks-personal-access-tokens) for how to get an access token."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your Databricks access token: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n",
|
||||
"os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\"Enter your Databricks access token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Databricks integration lives in the `langchain-community` package. Also, `mlflow >= 2.9 ` is required to run the code in this notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community mlflow>=2.9.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We first demonstrates how to query DBRX-instruct model hosted as Foundation Models endpoint with `ChatDatabricks`.\n",
|
||||
"\n",
|
||||
"For other type of endpoints, there are some difference in how to set up the endpoint itself, however, once the endpoint is ready, there is no difference in how to query it with `ChatDatabricks`. Please refer to the bottom of this notebook for the examples with other type of endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatDatabricks\n",
|
||||
"\n",
|
||||
"chat_model = ChatDatabricks(\n",
|
||||
" endpoint=\"databricks-dbrx-instruct\",\n",
|
||||
" temperature=0.1,\n",
|
||||
" max_tokens=256,\n",
|
||||
" # See https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.databricks.ChatDatabricks.html for other supported parameters\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='MLflow is an open-source platform for managing end-to-end machine learning workflows. It was introduced by Databricks in 2018. MLflow provides tools for tracking experiments, packaging and sharing code, and deploying models. It is designed to work with any machine learning library and can be used in a variety of environments, including local machines, virtual machines, and cloud-based clusters. MLflow aims to streamline the machine learning development lifecycle, making it easier for data scientists and engineers to collaborate and deploy models into production.', response_metadata={'prompt_tokens': 229, 'completion_tokens': 104, 'total_tokens': 333}, id='run-d3fb4d06-3e10-4471-83c9-c282cc62b74d-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_model.invoke(\"What is MLflow?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Databricks Model Serving is a feature of the Databricks platform that allows data scientists and engineers to easily deploy machine learning models into production. With Model Serving, you can host, manage, and serve machine learning models as APIs, making it easy to integrate them into applications and business processes. It supports a variety of popular machine learning frameworks, including TensorFlow, PyTorch, and scikit-learn, and provides tools for monitoring and managing the performance of deployed models. Model Serving is designed to be scalable, secure, and easy to use, making it a great choice for organizations that want to quickly and efficiently deploy machine learning models into production.', response_metadata={'prompt_tokens': 35, 'completion_tokens': 130, 'total_tokens': 165}, id='run-b3feea21-223e-4105-8627-41d647d5ccab-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# You can also pass a list of messages\n",
|
||||
"messages = [\n",
|
||||
" (\"system\", \"You are a chatbot that can answer questions about Databricks.\"),\n",
|
||||
" (\"user\", \"What is Databricks Model Serving?\"),\n",
|
||||
"]\n",
|
||||
"chat_model.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"Similar to other chat models, `ChatDatabricks` can be used as a part of a complex chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Unity Catalog is a new data catalog feature in Databricks that allows you to discover, manage, and govern all your data assets across your data landscape, including data lakes, data warehouses, and data marts. It provides a centralized repository for storing and managing metadata, data lineage, and access controls for all your data assets. Unity Catalog enables data teams to easily discover and access the data they need, while ensuring compliance with data privacy and security regulations. It is designed to work seamlessly with Databricks' Lakehouse platform, providing a unified experience for managing and analyzing all your data.\", response_metadata={'prompt_tokens': 32, 'completion_tokens': 118, 'total_tokens': 150}, id='run-82d72624-f8df-4c0d-a976-919feec09a55-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a chatbot that can answer questions about {topic}.\",\n",
|
||||
" ),\n",
|
||||
" (\"user\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat_model\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"topic\": \"Databricks\",\n",
|
||||
" \"question\": \"What is Unity Catalog?\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation (streaming)\n",
|
||||
"\n",
|
||||
"`ChatDatabricks` supports streaming response by `stream` method since `langchain-community>=0.2.1`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I|'m| an| AI| and| don|'t| have| feelings|,| but| I|'m| here| and| ready| to| assist| you|.| How| can| I| help| you| today|?||"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in chat_model.stream(\"How are you?\"):\n",
|
||||
" print(chunk.content, end=\"|\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Async Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"\n",
|
||||
"country = [\"Japan\", \"Italy\", \"Australia\"]\n",
|
||||
"futures = [chat_model.ainvoke(f\"Where is the capital of {c}?\") for c in country]\n",
|
||||
"await asyncio.gather(*futures)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping Custom Model Endpoint\n",
|
||||
"\n",
|
||||
"Prerequisites:\n",
|
||||
"\n",
|
||||
"* An LLM was registered and deployed to [a Databricks serving endpoint](https://docs.databricks.com/machine-learning/model-serving/index.html) via MLflow. The endpoint must have OpenAI-compatible chat input/output format ([reference](https://mlflow.org/docs/latest/llms/deployments/index.html#chat))\n",
|
||||
"* You have [\"Can Query\" permission](https://docs.databricks.com/security/auth-authz/access-control/serving-endpoint-acl.html) to the endpoint.\n",
|
||||
"\n",
|
||||
"Once the endpoint is ready, the usage pattern is completely same as Foundation Models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_model_custom = ChatDatabricks(\n",
|
||||
" endpoint=\"YOUR_ENDPOINT_NAME\",\n",
|
||||
" temperature=0.1,\n",
|
||||
" max_tokens=256,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chat_model_custom.invoke(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping External Models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Prerequisite: Create Proxy Endpoint\n",
|
||||
"\n",
|
||||
"First, create a new Databricks serving endpoint that proxies requests to the target external model. The endpoint creation should be fairy quick for proxying external models.\n",
|
||||
"\n",
|
||||
"This requires registering OpenAI API Key in Databricks secret manager with the following comment:\n",
|
||||
"```sh\n",
|
||||
"# Replace `<scope>` with your scope\n",
|
||||
"databricks secrets create-scope <scope>\n",
|
||||
"databricks secrets put-secret <scope> openai-api-key --string-value $OPENAI_API_KEY\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"For how to set up Databricks CLI and manage secrets, please refer to https://docs.databricks.com/en/security/secrets/secrets.html"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from mlflow.deployments import get_deploy_client\n",
|
||||
"\n",
|
||||
"client = get_deploy_client(\"databricks\")\n",
|
||||
"\n",
|
||||
"secret = \"secrets/<scope>/openai-api-key\" # replace `<scope>` with your scope\n",
|
||||
"endpoint_name = \"my-chat\" # rename this if my-chat already exists\n",
|
||||
"client.create_endpoint(\n",
|
||||
" name=endpoint_name,\n",
|
||||
" config={\n",
|
||||
" \"served_entities\": [\n",
|
||||
" {\n",
|
||||
" \"name\": \"my-chat\",\n",
|
||||
" \"external_model\": {\n",
|
||||
" \"name\": \"gpt-3.5-turbo\",\n",
|
||||
" \"provider\": \"openai\",\n",
|
||||
" \"task\": \"llm/v1/chat\",\n",
|
||||
" \"openai_config\": {\n",
|
||||
" \"openai_api_key\": \"{{\" + secret + \"}}\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once the endpoint status has become \"Ready\", you can query the endpoint in the same way as other types of endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_model_external = ChatDatabricks(\n",
|
||||
" endpoint=endpoint_name,\n",
|
||||
" temperature=0.1,\n",
|
||||
" max_tokens=256,\n",
|
||||
")\n",
|
||||
"chat_model_external.invoke(\"How to use Databricks?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatDatabricks features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ChatDatabricks.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -7,141 +7,112 @@
|
||||
"source": [
|
||||
"# Databricks\n",
|
||||
"\n",
|
||||
"The [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform.\n",
|
||||
"> [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform.\n",
|
||||
"\n",
|
||||
"This example notebook shows how to wrap Databricks endpoints as LLMs in LangChain.\n",
|
||||
"It supports two endpoint types:\n",
|
||||
"\n",
|
||||
"* Serving endpoint, recommended for production and development,\n",
|
||||
"* Cluster driver proxy app, recommended for interactive development."
|
||||
"This notebook provides a quick overview for getting started with Databricks [LLM models](https://python.langchain.com/v0.2/docs/concepts/#llms). For detailed documentation of all features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.databricks.Databricks.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"`Databricks` LLM class wraps a completion endpoint hosted as either of these two endpoint types:\n",
|
||||
"\n",
|
||||
"* [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html), recommended for production and development,\n",
|
||||
"* Cluster driver proxy app, recommended for interactive development.\n",
|
||||
"\n",
|
||||
"This example notebook shows how to wrap your LLM endpoint and use it as an LLM in your LangChain application."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"## Limitations\n",
|
||||
"\n",
|
||||
"`mlflow >= 2.9 ` is required to run the code in this notebook. If it's not installed, please install it using this command:\n",
|
||||
"The `Databricks` LLM class is *legacy* implementation and has several limitations in the feature compatibility.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install mlflow>=2.9\n",
|
||||
"```\n",
|
||||
"* Only supports synchronous invocation. Streaming or async APIs are not supported.\n",
|
||||
"* `batch` API is not supported.\n",
|
||||
"\n",
|
||||
"Also, we need `dbutils` for this example.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install dbutils\n",
|
||||
"```\n"
|
||||
"To use those features, please use the new [ChatDatabricks](https://python.langchain.com/v0.2/docs/integrations/chat/databricks) class instead. `ChatDatabricks` supports all APIs of `ChatModel` including streaming, async, batch, etc.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping a serving endpoint: External model\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Prerequisite: Register an OpenAI API key as a secret:\n",
|
||||
"To access Databricks models you'll need to create a Databricks account, set up credentials (only if you are outside Databricks workspace), and install required packages.\n",
|
||||
"\n",
|
||||
" ```bash\n",
|
||||
" databricks secrets create-scope <scope>\n",
|
||||
" databricks secrets put-secret <scope> openai-api-key --string-value $OPENAI_API_KEY\n",
|
||||
" ```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following code creates a new serving endpoint with OpenAI's GPT-4 model for chat and generates a response using the endpoint."
|
||||
"### Credentials (only if you are outside Databricks)\n",
|
||||
"\n",
|
||||
"If you are running LangChain app inside Databricks, you can skip this step.\n",
|
||||
"\n",
|
||||
"Otherwise, you need manually set the Databricks workspace hostname and personal access token to `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables, respectively. See [Authentication Documentation](https://docs.databricks.com/en/dev-tools/auth/index.html#databricks-personal-access-tokens) for how to get an access token."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='Hello! How can I assist you today?'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatDatabricks\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from mlflow.deployments import get_deploy_client\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"client = get_deploy_client(\"databricks\")\n",
|
||||
"\n",
|
||||
"secret = \"secrets/<scope>/openai-api-key\" # replace `<scope>` with your scope\n",
|
||||
"name = \"my-chat\" # rename this if my-chat already exists\n",
|
||||
"client.create_endpoint(\n",
|
||||
" name=name,\n",
|
||||
" config={\n",
|
||||
" \"served_entities\": [\n",
|
||||
" {\n",
|
||||
" \"name\": \"my-chat\",\n",
|
||||
" \"external_model\": {\n",
|
||||
" \"name\": \"gpt-4\",\n",
|
||||
" \"provider\": \"openai\",\n",
|
||||
" \"task\": \"llm/v1/chat\",\n",
|
||||
" \"openai_config\": {\n",
|
||||
" \"openai_api_key\": \"{{\" + secret + \"}}\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chat = ChatDatabricks(\n",
|
||||
" target_uri=\"databricks\",\n",
|
||||
" endpoint=name,\n",
|
||||
" temperature=0.1,\n",
|
||||
")\n",
|
||||
"chat([HumanMessage(content=\"hello\")])"
|
||||
"os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n",
|
||||
"os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\"Enter your Databricks access token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping a serving endpoint: Foundation model\n",
|
||||
"\n",
|
||||
"The following code uses the `databricks-bge-large-en` serving endpoint (no endpoint creation is required) to generate embeddings from input text."
|
||||
"Alternatively, you can pass those parameters when initializing the `Databricks` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.051055908203125, 0.007221221923828125, 0.003879547119140625]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import DatabricksEmbeddings\n",
|
||||
"from langchain_community.llms import Databricks\n",
|
||||
"\n",
|
||||
"embeddings = DatabricksEmbeddings(endpoint=\"databricks-bge-large-en\")\n",
|
||||
"embeddings.embed_query(\"hello\")[:3]"
|
||||
"databricks = Databricks(\n",
|
||||
" host=\"https://your-workspace.cloud.databricks.com\",\n",
|
||||
" # We strongly recommend NOT to hardcode your access token in your code, instead use secret management tools\n",
|
||||
" # or environment variables to store your access token securely. The following example uses Databricks Secrets\n",
|
||||
" # to retrieve the access token that is available within the Databricks notebook.\n",
|
||||
" token=dbutils.secrets.get(scope=\"YOUR_SECRET_SCOPE\", key=\"databricks-token\"), # noqa: F821\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping a serving endpoint: Custom model\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Prerequisites:\n",
|
||||
"The LangChain Databricks integration lives in the `langchain-community` package. Also, `mlflow >= 2.9 ` is required to run the code in this notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community mlflow>=2.9.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping Model Serving Endpoint\n",
|
||||
"\n",
|
||||
"### Prerequisites:\n",
|
||||
"\n",
|
||||
"* An LLM was registered and deployed to [a Databricks serving endpoint](https://docs.databricks.com/machine-learning/model-serving/index.html).\n",
|
||||
"* You have [\"Can Query\" permission](https://docs.databricks.com/security/auth-authz/access-control/serving-endpoint-acl.html) to the endpoint.\n",
|
||||
@@ -149,9 +120,14 @@
|
||||
"The expected MLflow model signature is:\n",
|
||||
"\n",
|
||||
" * inputs: `[{\"name\": \"prompt\", \"type\": \"string\"}, {\"name\": \"stop\", \"type\": \"list[string]\"}]`\n",
|
||||
" * outputs: `[{\"type\": \"string\"}]`\n",
|
||||
"\n",
|
||||
"If the model signature is incompatible or you want to insert extra configs, you can set `transform_input_fn` and `transform_output_fn` accordingly."
|
||||
" * outputs: `[{\"type\": \"string\"}]`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -173,12 +149,8 @@
|
||||
"source": [
|
||||
"from langchain_community.llms import Databricks\n",
|
||||
"\n",
|
||||
"# If running a Databricks notebook attached to an interactive cluster in \"single user\"\n",
|
||||
"# or \"no isolation shared\" mode, you only need to specify the endpoint name to create\n",
|
||||
"# a `Databricks` instance to query a serving endpoint in the same workspace.\n",
|
||||
"llm = Databricks(endpoint_name=\"dolly\")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
"llm = Databricks(endpoint_name=\"YOUR_ENDPOINT_NAME\")\n",
|
||||
"llm.invoke(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -198,245 +170,16 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm(\"How are you?\", stop=[\".\"])"
|
||||
"llm.invoke(\"How are you?\", stop=[\".\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I am fine. Thank you!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Otherwise, you can manually specify the Databricks workspace hostname and personal access token\n",
|
||||
"# or set `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables, respectively.\n",
|
||||
"# See https://docs.databricks.com/dev-tools/auth.html#databricks-personal-access-tokens\n",
|
||||
"# We strongly recommend not exposing the API token explicitly inside a notebook.\n",
|
||||
"# You can use Databricks secret manager to store your API token securely.\n",
|
||||
"# See https://docs.databricks.com/dev-tools/databricks-utils.html#secrets-utility-dbutilssecrets\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import dbutils\n",
|
||||
"\n",
|
||||
"os.environ[\"DATABRICKS_TOKEN\"] = dbutils.secrets.get(\"myworkspace\", \"api_token\")\n",
|
||||
"\n",
|
||||
"llm = Databricks(host=\"myworkspace.cloud.databricks.com\", endpoint_name=\"dolly\")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I am fine.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# If the serving endpoint accepts extra parameters like `temperature`,\n",
|
||||
"# you can set them in `model_kwargs`.\n",
|
||||
"llm = Databricks(endpoint_name=\"dolly\", model_kwargs={\"temperature\": 0.1})\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I’m Excellent. You?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Use `transform_input_fn` and `transform_output_fn` if the serving endpoint\n",
|
||||
"# expects a different input schema and does not return a JSON string,\n",
|
||||
"# respectively, or you want to apply a prompt template on top.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def transform_input(**request):\n",
|
||||
" full_prompt = f\"\"\"{request[\"prompt\"]}\n",
|
||||
" Be Concise.\n",
|
||||
" \"\"\"\n",
|
||||
" request[\"prompt\"] = full_prompt\n",
|
||||
" return request\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = Databricks(endpoint_name=\"dolly\", transform_input_fn=transform_input)\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping a cluster driver proxy app\n",
|
||||
"### Transform Input and Output\n",
|
||||
"\n",
|
||||
"Prerequisites:\n",
|
||||
"\n",
|
||||
"* An LLM loaded on a Databricks interactive cluster in \"single user\" or \"no isolation shared\" mode.\n",
|
||||
"* A local HTTP server running on the driver node to serve the model at `\"/\"` using HTTP POST with JSON input/output.\n",
|
||||
"* It uses a port number between `[3000, 8000]` and listens to the driver IP address or simply `0.0.0.0` instead of localhost only.\n",
|
||||
"* You have \"Can Attach To\" permission to the cluster.\n",
|
||||
"\n",
|
||||
"The expected server schema (using JSON schema) is:\n",
|
||||
"\n",
|
||||
"* inputs:\n",
|
||||
" ```json\n",
|
||||
" {\"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"prompt\": {\"type\": \"string\"},\n",
|
||||
" \"stop\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}}},\n",
|
||||
" \"required\": [\"prompt\"]}\n",
|
||||
" ```\n",
|
||||
"* outputs: `{\"type\": \"string\"}`\n",
|
||||
"\n",
|
||||
"If the server schema is incompatible or you want to insert extra configs, you can use `transform_input_fn` and `transform_output_fn` accordingly.\n",
|
||||
"\n",
|
||||
"The following is a minimal example for running a driver proxy app to serve an LLM:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from flask import Flask, request, jsonify\n",
|
||||
"import torch\n",
|
||||
"from transformers import pipeline, AutoTokenizer, StoppingCriteria\n",
|
||||
"\n",
|
||||
"model = \"databricks/dolly-v2-3b\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model, padding_side=\"left\")\n",
|
||||
"dolly = pipeline(model=model, tokenizer=tokenizer, trust_remote_code=True, device_map=\"auto\")\n",
|
||||
"device = dolly.device\n",
|
||||
"\n",
|
||||
"class CheckStop(StoppingCriteria):\n",
|
||||
" def __init__(self, stop=None):\n",
|
||||
" super().__init__()\n",
|
||||
" self.stop = stop or []\n",
|
||||
" self.matched = \"\"\n",
|
||||
" self.stop_ids = [tokenizer.encode(s, return_tensors='pt').to(device) for s in self.stop]\n",
|
||||
" def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs):\n",
|
||||
" for i, s in enumerate(self.stop_ids):\n",
|
||||
" if torch.all((s == input_ids[0][-s.shape[1]:])).item():\n",
|
||||
" self.matched = self.stop[i]\n",
|
||||
" return True\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"def llm(prompt, stop=None, **kwargs):\n",
|
||||
" check_stop = CheckStop(stop)\n",
|
||||
" result = dolly(prompt, stopping_criteria=[check_stop], **kwargs)\n",
|
||||
" return result[0][\"generated_text\"].rstrip(check_stop.matched)\n",
|
||||
"\n",
|
||||
"app = Flask(\"dolly\")\n",
|
||||
"\n",
|
||||
"@app.route('/', methods=['POST'])\n",
|
||||
"def serve_llm():\n",
|
||||
" resp = llm(**request.json)\n",
|
||||
" return jsonify(resp)\n",
|
||||
"\n",
|
||||
"app.run(host=\"0.0.0.0\", port=\"7777\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Once the server is running, you can create a `Databricks` instance to wrap it as an LLM."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello, thank you for asking. It is wonderful to hear that you are well.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# If running a Databricks notebook attached to the same cluster that runs the app,\n",
|
||||
"# you only need to specify the driver port to create a `Databricks` instance.\n",
|
||||
"llm = Databricks(cluster_driver_port=\"7777\")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I am well. You?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Otherwise, you can manually specify the cluster ID to use,\n",
|
||||
"# as well as Databricks workspace hostname and personal access token.\n",
|
||||
"\n",
|
||||
"llm = Databricks(cluster_id=\"0000-000000-xxxxxxxx\", cluster_driver_port=\"7777\")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I am very well. It is a pleasure to meet you.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# If the app accepts extra parameters like `temperature`,\n",
|
||||
"# you can set them in `model_kwargs`.\n",
|
||||
"llm = Databricks(cluster_driver_port=\"7777\", model_kwargs={\"temperature\": 0.1})\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
"Sometimes you may want to wrap a serving endpoint that has imcompatible model signature or you want to insert extra configs. You can use the `transform_input_fn` and `transform_output_fn` arguments to define additional pre/post process."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -456,7 +199,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Use `transform_input_fn` and `transform_output_fn` if the app\n",
|
||||
"# Use `transform_input_fn` and `transform_output_fn` if the serving endpoint\n",
|
||||
"# expects a different input schema and does not return a JSON string,\n",
|
||||
"# respectively, or you want to apply a prompt template on top.\n",
|
||||
"\n",
|
||||
@@ -474,12 +217,12 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = Databricks(\n",
|
||||
" cluster_driver_port=\"7777\",\n",
|
||||
" endpoint_name=\"YOUR_ENDPOINT_NAME\",\n",
|
||||
" transform_input_fn=transform_input,\n",
|
||||
" transform_output_fn=transform_output,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm(\"How are you?\")"
|
||||
"llm.invoke(\"How are you?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -312,7 +312,7 @@ from langchain.retrievers import AzureAISearchRetriever
|
||||
### Azure Container Apps dynamic sessions
|
||||
|
||||
We need to get the `POOL_MANAGEMENT_ENDPOINT` environment variable from the Azure Container Apps service.
|
||||
See the instructions [here](https://python.langchain.com/v0.2/docs/integrations/tools/azure_dynamic_sessions/#setup).
|
||||
See the instructions [here](/docs/integrations/tools/azure_dynamic_sessions/#setup).
|
||||
|
||||
We need to install a python package.
|
||||
|
||||
@@ -326,6 +326,19 @@ See a [usage example](/docs/integrations/tools/azure_dynamic_sessions).
|
||||
from langchain_azure_dynamic_sessions import SessionsPythonREPLTool
|
||||
```
|
||||
|
||||
### Bing Search
|
||||
|
||||
Follow the documentation [here](/docs/integrations/tools/bing_search) to get a detail explanations and instructions of this tool.
|
||||
|
||||
The environment variable `BING_SUBSCRIPTION_KEY` and `BING_SEARCH_URL` are required from Bing Search resource.
|
||||
|
||||
```bash
|
||||
from langchain_community.tools.bing_search import BingSearchResults
|
||||
from langchain_community.utilities import BingSearchAPIWrapper
|
||||
|
||||
api_wrapper = BingSearchAPIWrapper()
|
||||
tool = BingSearchResults(api_wrapper=api_wrapper)
|
||||
```
|
||||
|
||||
## Toolkits
|
||||
|
||||
|
||||
199
docs/docs/integrations/text_embedding/databricks.ipynb
Normal file
199
docs/docs/integrations/text_embedding/databricks.ipynb
Normal file
@@ -0,0 +1,199 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Databricks\n",
|
||||
"\n",
|
||||
"> [Databricks](https://www.databricks.com/) Lakehouse Platform unifies data, analytics, and AI on one platform.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with Databricks [embedding models](/docs/concepts/#embedding-models). For detailed documentation of all DatabricksEmbeddings features and configurations head to the [API reference](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.databricks.DatabricksEmbeddings.html).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"`DatabricksEmbeddings` class wraps an embedding model endpoint hosted on [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html). This example notebook shows how to wrap your serving endpoint and use it as a embedding model in your LangChain application.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Supported Methods\n",
|
||||
"\n",
|
||||
"`DatabricksEmbeddings` supports all methods of `Embeddings` class including async APIs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Endpoint Requirement\n",
|
||||
"\n",
|
||||
"The serving endpoint `DatabricksEmbeddings` wraps must have OpenAI-compatible embedding input/output format ([reference](https://mlflow.org/docs/latest/llms/deployments/index.html#embeddings)). As long as the input format is compatible, `DatabricksEmbeddings` can be used for any endpoint type hosted on [Databricks Model Serving](https://docs.databricks.com/en/machine-learning/model-serving/index.html):\n",
|
||||
"\n",
|
||||
"1. Foundation Models - Curated list of state-of-the-art foundation models such as BAAI General Embedding (BGE). These endpoint are ready to use in your Databricks workspace without any set up.\n",
|
||||
"2. Custom Models - You can also deploy custom embedding models to a serving endpoint via MLflow with\n",
|
||||
"your choice of framework such as LangChain, Pytorch, Transformers, etc.\n",
|
||||
"3. External Models - Databricks endpoints can serve models that are hosted outside Databricks as a proxy, such as proprietary model service like OpenAI text-embedding-3.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Databricks models you'll need to create a Databricks account, set up credentials (only if you are outside Databricks workspace), and install required packages.\n",
|
||||
"\n",
|
||||
"### Credentials (only if you are outside Databricks)\n",
|
||||
"\n",
|
||||
"If you are running LangChain app inside Databricks, you can skip this step.\n",
|
||||
"\n",
|
||||
"Otherwise, you need manually set the Databricks workspace hostname and personal access token to `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables, respectively. See [Authentication Documentation](https://docs.databricks.com/en/dev-tools/auth/index.html#databricks-personal-access-tokens) for how to get an access token."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n",
|
||||
"os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\"Enter your Databricks access token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Databricks integration lives in the `langchain-community` package. Also, `mlflow >= 2.9 ` is required to run the code in this notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community mlflow>=2.9.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We first demonstrates how to query BGE model hosted as Foundation Models endpoint with `DatabricksEmbeddings`.\n",
|
||||
"\n",
|
||||
"For other type of endpoints, there are some difference in how to set up the endpoint itself, however, once the endpoint is ready, there is no difference in how to query it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import DatabricksEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = DatabricksEmbeddings(\n",
|
||||
" endpoint=\"databricks-bge-large-en\",\n",
|
||||
" # Specify parameters for embedding queries and documents if needed\n",
|
||||
" # query_params={...},\n",
|
||||
" # document_params={...},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embed single text"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.051055908203125, 0.007221221923828125, 0.003879547119140625]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings.embed_query(\"hello\")[:3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embed documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"documents = [\"This is a dummy document.\", \"This is another dummy document.\"]\n",
|
||||
"response = embeddings.embed_documents(documents)\n",
|
||||
"print([e[:3] for e in response]) # Show first 3 elements of each embedding"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Wrapping Other Types of Endpoints\n",
|
||||
"\n",
|
||||
"The example above uses an embedding model hosted as a Foundation Models API. To learn about how to use the other endpoint types, please refer to the documentation for `ChatDatabricks`. While the model type is different, required steps are the same.\n",
|
||||
"\n",
|
||||
"* [Custom Model Endpoint](https://python.langchain.com/v0.2/docs/integrations/chat/databricks/#wrapping-custom-model-endpoint)\n",
|
||||
"* [External Models](https://python.langchain.com/v0.2/docs/integrations/chat/databricks/#wrapping-external-models)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatDatabricks features and configurations head to the API reference: https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.databricks.DatabricksEmbeddings.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# Bing Search\n",
|
||||
"\n",
|
||||
"[Microsoft Bing](https://www.bing.com/), commonly referred to as `Bing` or `Bing Search`, is a web search engine owned and operated by `Microsoft`."
|
||||
"> [Bing Search](https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/) is an Azure service and enables safe, ad-free, location-aware search results, surfacing relevant information from billions of web documents. Help your users find what they're looking for from the world-wide-web by harnessing Bing's ability to comb billions of webpages, images, videos, and news with a single API call. "
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -21,6 +21,16 @@
|
||||
"source": [
|
||||
"# Build an Agent\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Chat Models](/docs/concepts/#chat-models)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Agents](/docs/concepts/#agents)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"By themselves, language models can't take actions - they just output text.\n",
|
||||
"A big use case for LangChain is creating **agents**.\n",
|
||||
"Agents are systems that use LLMs as reasoning engines to determine which actions to take and the inputs to pass them.\n",
|
||||
@@ -28,16 +38,6 @@
|
||||
"\n",
|
||||
"In this tutorial we will build an agent that can interact with a search engine. You will be able to ask this agent questions, watch it call the search tool, and have conversations with it.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"In following this tutorial, you will learn how to:\n",
|
||||
"\n",
|
||||
"- Use [language models](/docs/concepts/#chat-models), in particular their tool calling ability\n",
|
||||
"- Use a Search [Tool](/docs/concepts/#tools) to look up information from the Internet\n",
|
||||
"- Compose a [LangGraph Agent](/docs/concepts/#agents), which use an LLM to determine actions and then execute them\n",
|
||||
"- Debug and trace your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"\n",
|
||||
"## End-to-end agent\n",
|
||||
"\n",
|
||||
"The code snippet below represents a fully functional agent that uses an LLM to decide which tools to use. It is equipped with a generic search tool. It has conversational memory - meaning that it can be used as a multi-turn chatbot.\n",
|
||||
|
||||
@@ -25,6 +25,16 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Chat Models](/docs/concepts/#chat-models)\n",
|
||||
"- [Prompt Templates](/docs/concepts/#prompt-templates)\n",
|
||||
"- [Chat History](/docs/concepts/#chat-history)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"We'll go over an example of how to design and implement an LLM-powered chatbot. \n",
|
||||
@@ -39,18 +49,6 @@
|
||||
"\n",
|
||||
"This tutorial will cover the basics which will be helpful for those two more advanced topics, but feel free to skip directly to there should you choose.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"Here are a few of the high-level components we'll be working with:\n",
|
||||
"\n",
|
||||
"- [`Chat Models`](/docs/concepts/#chat-models). The chatbot interface is based around messages rather than raw text, and therefore is best suited to Chat Models rather than text LLMs.\n",
|
||||
"- [`Prompt Templates`](/docs/concepts/#prompt-templates), which simplify the process of assembling prompts that combine default messages, user input, chat history, and (optionally) additional retrieved context.\n",
|
||||
"- [`Chat History`](/docs/concepts/#chat-history), which allows a chatbot to \"remember\" past interactions and take them into account when responding to followup questions. \n",
|
||||
"- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"\n",
|
||||
"We'll cover how to fit the above components together to create a powerful conversational chatbot.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Jupyter Notebook\n",
|
||||
|
||||
@@ -17,18 +17,21 @@
|
||||
"source": [
|
||||
"# Build an Extraction Chain\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Chat Models](/docs/concepts/#chat-models)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Tool calling](/docs/concepts/#function-tool-calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In this tutorial, we will build a chain to extract structured information from unstructured text. \n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"This tutorial will only work with models that support **function/tool calling**\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
"Concepts we will cover are:\n",
|
||||
"- Using [language models](/docs/concepts/#chat-models)\n",
|
||||
"- Using [function/tool calling](/docs/concepts/#function-tool-calling)\n",
|
||||
"- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n"
|
||||
"This tutorial will only work with models that support **tool calling**\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,6 +7,18 @@
|
||||
"source": [
|
||||
"# Build a Local RAG Application\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Chat Models](/docs/concepts/#chat-models)\n",
|
||||
"- [Chaining runnables](/docs/how_to/sequence/)\n",
|
||||
"- [Embeddings](/docs/concepts/#embedding-models)\n",
|
||||
"- [Vector stores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Retrieval-augmented generation](/docs/tutorials/rag/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), [GPT4All](https://github.com/nomic-ai/gpt4all), and [llamafile](https://github.com/Mozilla-Ocho/llamafile) underscore the importance of running LLMs locally.\n",
|
||||
"\n",
|
||||
"LangChain has [integrations](https://integrations.langchain.com/) with many open-source LLMs that can be run locally.\n",
|
||||
|
||||
@@ -19,6 +19,18 @@
|
||||
"source": [
|
||||
"# Build a PDF ingestion and Question/Answering system\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Document loaders](/docs/concepts/#document-loaders)\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [Embeddings](/docs/concepts/#embedding-models)\n",
|
||||
"- [Vector stores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Retrieval-augmented generation](/docs/tutorials/rag/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"PDF files often hold crucial unstructured data unavailable from other sources. They can be quite lengthy, and unlike plain text files, cannot generally be fed directly into the prompt of a language model.\n",
|
||||
"\n",
|
||||
"In this tutorial, you'll create a system that can answer questions about PDF files. More specifically, you'll use a [Document Loader](/docs/concepts/#document-loaders) to load text in a format usable by an LLM, then build a retrieval-augmented generation (RAG) pipeline to answer questions, including citations from the source material.\n",
|
||||
|
||||
@@ -17,6 +17,20 @@
|
||||
"source": [
|
||||
"# Conversational RAG\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Chat history](/docs/concepts/#chat-history)\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [Embeddings](/docs/concepts/#embedding-models)\n",
|
||||
"- [Vector stores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Retrieval-augmented generation](/docs/tutorials/rag/)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Agents](/docs/concepts/#agents)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n",
|
||||
"\n",
|
||||
"In this guide we focus on **adding logic for incorporating historical messages.** Further details on chat history management is [covered here](/docs/how_to/message_history).\n",
|
||||
|
||||
@@ -17,6 +17,18 @@
|
||||
"source": [
|
||||
"# Build a Query Analysis System\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Document loaders](/docs/concepts/#document-loaders)\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [Embeddings](/docs/concepts/#embedding-models)\n",
|
||||
"- [Vector stores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Retrieval](/docs/concepts/#retrieval)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"This page will show how to use query analysis in a basic end-to-end example. This will cover creating a simple search engine, showing a failure mode that occurs when passing a raw user question to that search, and then an example of how query analysis can help address that issue. There are MANY different query analysis techniques and this end-to-end example will not show all of them.\n",
|
||||
"\n",
|
||||
"For the purpose of this example, we will do retrieval over the LangChain YouTube videos."
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
"LangSmith will become increasingly helpful as our application grows in\n",
|
||||
"complexity.\n",
|
||||
"\n",
|
||||
"If you're already familiar with basic retrieval, you might also be interested in\n",
|
||||
"this [high-level overview of different retrieval techinques](/docs/concepts/#retrieval).\n",
|
||||
"\n",
|
||||
"## What is RAG?\n",
|
||||
"\n",
|
||||
"RAG is a technique for augmenting LLM knowledge with additional data.\n",
|
||||
@@ -36,7 +39,7 @@
|
||||
"The most common full sequence from raw data to answer looks like:\n",
|
||||
"\n",
|
||||
"### Indexing\n",
|
||||
"1. **Load**: First we need to load our data. This is done with [DocumentLoaders](/docs/concepts/#document-loaders).\n",
|
||||
"1. **Load**: First we need to load our data. This is done with [Document Loaders](/docs/concepts/#document-loaders).\n",
|
||||
"2. **Split**: [Text splitters](/docs/concepts/#text-splitters) break large `Documents` into smaller chunks. This is useful both for indexing data and for passing it in to a model, since large chunks are harder to search over and won't fit in a model's finite context window.\n",
|
||||
"3. **Store**: We need somewhere to store and index our splits, so that they can later be searched over. This is often done using a [VectorStore](/docs/concepts/#vectorstores) and [Embeddings](/docs/concepts/#embedding-models) model.\n",
|
||||
"\n",
|
||||
@@ -930,14 +933,10 @@
|
||||
"the above sections. Along from the **Go deeper** sources mentioned\n",
|
||||
"above, good next steps include:\n",
|
||||
"\n",
|
||||
"- [Return\n",
|
||||
" sources](/docs/how_to/qa_sources): Learn\n",
|
||||
" how to return source documents\n",
|
||||
"- [Streaming](/docs/how_to/streaming):\n",
|
||||
" Learn how to stream outputs and intermediate steps\n",
|
||||
"- [Add chat\n",
|
||||
" history](/docs/how_to/message_history):\n",
|
||||
" Learn how to add chat history to your app"
|
||||
"- [Return sources](/docs/how_to/qa_sources): Learn how to return source documents\n",
|
||||
"- [Streaming](/docs/how_to/streaming): Learn how to stream outputs and intermediate steps\n",
|
||||
"- [Add chat history](/docs/how_to/message_history): Learn how to add chat history to your app\n",
|
||||
"- [Retrieval conceptual guide](/docs/concepts/#retrieval): A high-level overview of specific retrieval techniques"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -6,6 +6,17 @@
|
||||
"source": [
|
||||
"# Build a Question/Answering system over SQL data\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Chaining runnables](/docs/how_to/sequence/)\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Agents](/docs/concepts/#agents)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Enabling a LLM system to query structured data can be qualitatively different from unstructured text data. Whereas in the latter it is common to generate text that can be searched against a vector database, the approach for structured data is often for the LLM to write and execute queries in a DSL, such as SQL. In this guide we'll go over the basic ways to create a Q&A system over tabular data in databases. We will cover implementations using both chains and agents. These systems will allow us to ask a question about the data in a database and get back a natural language answer. The main difference between the two is that our agent can query the database in a loop as many times as it needs to answer the question.\n",
|
||||
"\n",
|
||||
"## ⚠️ Security note ⚠️\n",
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
"typescript": "^5.1.3",
|
||||
"uuid": "^9.0.0",
|
||||
"webpack": "^5.75.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import React, { useState, useEffect } from "react";
|
||||
import { createClient } from "@supabase/supabase-js";
|
||||
import useDocusaurusContext from "@docusaurus/useDocusaurusContext";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
|
||||
const useCookie = () => {
|
||||
/**
|
||||
@@ -110,7 +111,9 @@ const getIpAddress = async () => {
|
||||
|
||||
export default function Feedback() {
|
||||
const { setCookie, checkCookie } = useCookie();
|
||||
const [feedbackId, setFeedbackId] = useState(null);
|
||||
const [feedbackSent, setFeedbackSent] = useState(false);
|
||||
const [feedbackDetailsSent, setFeedbackDetailsSent] = useState(false);
|
||||
const { siteConfig } = useDocusaurusContext();
|
||||
const [pathname, setPathname] = useState("");
|
||||
|
||||
@@ -133,26 +136,22 @@ export default function Feedback() {
|
||||
);
|
||||
try {
|
||||
const ipAddress = await getIpAddress();
|
||||
|
||||
/**
|
||||
* "id" and "created_at" are automatically generated by Supabase
|
||||
* @type {Omit<Database["public"]["Tables"]["feedback"]["Row"], "id" | "created_at">}
|
||||
*/
|
||||
const data = {
|
||||
const rowId = uuidv4();
|
||||
setFeedbackId(rowId);
|
||||
const params = {
|
||||
id: rowId,
|
||||
is_good: feedback === "good",
|
||||
url: window.location.pathname,
|
||||
user_ip: ipAddress,
|
||||
project: LANGCHAIN_PROJECT_NAME,
|
||||
};
|
||||
|
||||
const { error } = await supabase.from("feedback").insert(data);
|
||||
const { error } = await supabase.from("feedback").insert(params);
|
||||
if (error) {
|
||||
throw error;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Failed to send feedback", {
|
||||
e,
|
||||
});
|
||||
console.error("Failed to send feedback", e);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -161,6 +160,33 @@ export default function Feedback() {
|
||||
setFeedbackSent(true);
|
||||
};
|
||||
|
||||
const handleFeedbackDetails = async (e) => {
|
||||
e.preventDefault();
|
||||
if (!feedbackId) {
|
||||
setFeedbackDetailsSent(true);
|
||||
return;
|
||||
}
|
||||
const details = e.target.elements
|
||||
.namedItem("details")
|
||||
?.value.slice(0, 1024);
|
||||
if (!details) {
|
||||
return;
|
||||
}
|
||||
const supabase = createClient(
|
||||
siteConfig.customFields.supabaseUrl,
|
||||
siteConfig.customFields.supabasePublicKey
|
||||
);
|
||||
const { error } = await supabase.from("feedback_details").insert({
|
||||
feedback_id: feedbackId,
|
||||
details,
|
||||
});
|
||||
if (error) {
|
||||
console.error("Failed to add feedback details", error);
|
||||
return;
|
||||
}
|
||||
setFeedbackDetailsSent(true);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (typeof window !== "undefined") {
|
||||
// If the cookie exists, set feedback sent to
|
||||
@@ -202,7 +228,31 @@ export default function Feedback() {
|
||||
<div style={{ display: "flex", flexDirection: "column" }}>
|
||||
<hr />
|
||||
{feedbackSent ? (
|
||||
<h4>Thanks for your feedback!</h4>
|
||||
<>
|
||||
<h4>Thanks for your feedback!</h4>
|
||||
{!feedbackDetailsSent && feedbackId && (
|
||||
<form
|
||||
style={{ display: "flex", flexDirection: "column" }}
|
||||
onSubmit={handleFeedbackDetails}
|
||||
>
|
||||
<h4>Do you have any specific comments?</h4>
|
||||
<textarea
|
||||
name="details"
|
||||
style={{ width: "480px", height: "120px" }}
|
||||
/>
|
||||
<button
|
||||
style={{
|
||||
width: "72px",
|
||||
marginLeft: "408px",
|
||||
marginTop: "12px",
|
||||
}}
|
||||
type="submit"
|
||||
>
|
||||
Submit
|
||||
</button>
|
||||
</form>
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<h4>Was this page helpful?</h4>
|
||||
@@ -248,7 +298,7 @@ export default function Feedback() {
|
||||
)}
|
||||
<br />
|
||||
<h4>
|
||||
You can leave detailed feedback{" "}
|
||||
You can also leave detailed feedback{" "}
|
||||
<a target="_blank" href={newGithubIssueURL}>
|
||||
on GitHub
|
||||
</a>
|
||||
|
||||
@@ -5990,6 +5990,7 @@ __metadata:
|
||||
typedoc: ^0.24.4
|
||||
typedoc-plugin-markdown: next
|
||||
typescript: ^5.1.3
|
||||
uuid: ^9.0.0
|
||||
webpack: ^5.75.0
|
||||
yaml-loader: ^0.8.0
|
||||
languageName: unknown
|
||||
|
||||
@@ -15,17 +15,20 @@ coverage:
|
||||
--cov-report term-missing:skip-covered \
|
||||
$(TEST_FILE)
|
||||
|
||||
test tests integration_tests:
|
||||
test tests:
|
||||
poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE)
|
||||
|
||||
integration_tests:
|
||||
poetry run pytest $(TEST_FILE)
|
||||
|
||||
test_watch:
|
||||
poetry run ptw --snapshot-update --now . -- -vv -x tests/unit_tests
|
||||
poetry run ptw --disable-socket --allow-unix-socket --snapshot-update --now . -- -vv -x tests/unit_tests
|
||||
|
||||
check_imports: $(shell find langchain_community -name '*.py')
|
||||
poetry run python ./scripts/check_imports.py $^
|
||||
|
||||
extended_tests:
|
||||
poetry run pytest --only-extended tests/unit_tests
|
||||
poetry run pytest --disable-socket --allow-unix-socket --only-extended tests/unit_tests
|
||||
|
||||
|
||||
######################
|
||||
|
||||
@@ -2,7 +2,10 @@
|
||||
from typing import Dict
|
||||
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
from langchain_core.utils import (
|
||||
convert_to_secret_str,
|
||||
get_from_dict_or_env,
|
||||
)
|
||||
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_community.llms.moonshot import MOONSHOT_SERVICE_URL_BASE, MoonshotCommon
|
||||
@@ -28,8 +31,8 @@ class MoonshotChat(MoonshotCommon, ChatOpenAI): # type: ignore[misc]
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that the environment is set up correctly."""
|
||||
values["moonshot_api_key"] = get_from_dict_or_env(
|
||||
values, "moonshot_api_key", "MOONSHOT_API_KEY"
|
||||
values["moonshot_api_key"] = convert_to_secret_str(
|
||||
get_from_dict_or_env(values, "moonshot_api_key", "MOONSHOT_API_KEY")
|
||||
)
|
||||
|
||||
try:
|
||||
|
||||
@@ -137,6 +137,7 @@ class AsyncHtmlLoader(BaseLoader):
|
||||
url,
|
||||
headers=self.session.headers,
|
||||
ssl=None if self.session.verify else False,
|
||||
**self.requests_kwargs,
|
||||
) as response:
|
||||
try:
|
||||
text = await response.text()
|
||||
@@ -144,7 +145,7 @@ class AsyncHtmlLoader(BaseLoader):
|
||||
logger.error(f"Failed to decode content from {url}")
|
||||
text = ""
|
||||
return text
|
||||
except aiohttp.ClientConnectionError as e:
|
||||
except (aiohttp.ClientConnectionError, TimeoutError) as e:
|
||||
if i == retries - 1 and self.ignore_load_errors:
|
||||
logger.warning(f"Error fetching {url} after {retries} retries.")
|
||||
return ""
|
||||
|
||||
8
libs/community/tests/integration_tests/test_ovhcloud.py
Normal file
8
libs/community/tests/integration_tests/test_ovhcloud.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from langchain_community.embeddings.ovhcloud import OVHCloudEmbeddings
|
||||
|
||||
|
||||
def test_ovhcloud_embed_documents() -> None:
|
||||
llm = OVHCloudEmbeddings(model_name="multilingual-e5-base")
|
||||
docs = ["Hello", "World"]
|
||||
output = llm.embed_documents(docs)
|
||||
assert len(output) == len(docs)
|
||||
@@ -23,10 +23,3 @@ def test_ovhcloud_empty_access_token_should_not_raise_error() -> None:
|
||||
model_name="multilingual-e5-base", region="kepler", access_token=""
|
||||
)
|
||||
assert isinstance(llm, OVHCloudEmbeddings)
|
||||
|
||||
|
||||
def test_ovhcloud_embed_documents() -> None:
|
||||
llm = OVHCloudEmbeddings(model_name="multilingual-e5-base")
|
||||
docs = ["Hello", "World"]
|
||||
output = llm.embed_documents(docs)
|
||||
assert len(output) == len(docs)
|
||||
|
||||
@@ -26,7 +26,6 @@ from langchain_core.messages import (
|
||||
get_buffer_string,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain_core.runnables import run_in_executor
|
||||
|
||||
|
||||
class BaseChatMessageHistory(ABC):
|
||||
@@ -102,6 +101,8 @@ class BaseChatMessageHistory(ABC):
|
||||
In general, fetching messages may involve IO to the underlying
|
||||
persistence layer.
|
||||
"""
|
||||
from langchain_core.runnables.config import run_in_executor
|
||||
|
||||
return await run_in_executor(None, lambda: self.messages)
|
||||
|
||||
def add_user_message(self, message: Union[HumanMessage, str]) -> None:
|
||||
@@ -172,6 +173,8 @@ class BaseChatMessageHistory(ABC):
|
||||
Args:
|
||||
messages: A list of BaseMessage objects to store.
|
||||
"""
|
||||
from langchain_core.runnables.config import run_in_executor
|
||||
|
||||
await run_in_executor(None, self.add_messages, messages)
|
||||
|
||||
@abstractmethod
|
||||
@@ -180,6 +183,8 @@ class BaseChatMessageHistory(ABC):
|
||||
|
||||
async def aclear(self) -> None:
|
||||
"""Remove all messages from the store"""
|
||||
from langchain_core.runnables.config import run_in_executor
|
||||
|
||||
await run_in_executor(None, self.clear)
|
||||
|
||||
def __str__(self) -> str:
|
||||
|
||||
@@ -7,7 +7,19 @@ from langchain_core.pydantic_v1 import Field
|
||||
|
||||
|
||||
class Document(Serializable):
|
||||
"""Class for storing a piece of text and associated metadata."""
|
||||
"""Class for storing a piece of text and associated metadata.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
document = Document(
|
||||
page_content="Hello, world!",
|
||||
metadata={"source": "https://example.com"}
|
||||
)
|
||||
"""
|
||||
|
||||
page_content: str
|
||||
"""String text."""
|
||||
|
||||
@@ -10,7 +10,20 @@ from langchain_core.runnables import run_in_executor
|
||||
|
||||
|
||||
class BaseDocumentCompressor(BaseModel, ABC):
|
||||
"""Base class for document compressors."""
|
||||
"""Base class for document compressors.
|
||||
|
||||
This abstraction is primarily used for
|
||||
post-processing of retrieved documents.
|
||||
|
||||
Documents matching a given query are first retrieved.
|
||||
Then the list of documents can be further processed.
|
||||
|
||||
For example, one could re-rank the retrieved documents
|
||||
using an LLM.
|
||||
|
||||
**Note** users should favor using a RunnableLambda
|
||||
instead of sub-classing from this interface.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def compress_documents(
|
||||
|
||||
@@ -6,7 +6,32 @@ from langchain_core.runnables.config import run_in_executor
|
||||
|
||||
|
||||
class Embeddings(ABC):
|
||||
"""Interface for embedding models."""
|
||||
"""An interface for embedding models.
|
||||
|
||||
This is an interface meant for implementing text embedding models.
|
||||
|
||||
Text embedding models are used to map text to a vector (a point in n-dimensional
|
||||
space).
|
||||
|
||||
Texts that are similar will usually be mapped to points that are close to each
|
||||
other in this space. The exact details of what's considered "similar" and how
|
||||
"distance" is measured in this space are dependent on the specific embedding model.
|
||||
|
||||
This abstraction contains a method for embedding a list of documents and a method
|
||||
for embedding a query text. The embedding of a query text is expected to be a single
|
||||
vector, while the embedding of a list of documents is expected to be a list of
|
||||
vectors.
|
||||
|
||||
Usually the query embedding is identical to the document embedding, but the
|
||||
abstraction allows treating them independently.
|
||||
|
||||
In addition to the synchronous methods, this interface also provides asynchronous
|
||||
versions of the methods.
|
||||
|
||||
By default, the asynchronous methods are implemented using the synchronous methods;
|
||||
however, implementations may choose to override the asynchronous methods with
|
||||
an async native implementation for performance reasons.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
"""Module contains a few fake embedding models for testing purposes."""
|
||||
# Please do not add additional fake embedding model implementations here.
|
||||
import hashlib
|
||||
from typing import List
|
||||
|
||||
@@ -6,7 +8,21 @@ from langchain_core.pydantic_v1 import BaseModel
|
||||
|
||||
|
||||
class FakeEmbeddings(Embeddings, BaseModel):
|
||||
"""Fake embedding model."""
|
||||
"""Fake embedding model for unit testing purposes.
|
||||
|
||||
This embedding model creates embeddings by sampling from a normal distribution.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.embeddings import FakeEmbeddings
|
||||
|
||||
fake_embeddings = FakeEmbeddings(size=100)
|
||||
fake_embeddings.embed_documents(["hello world", "foo bar"])
|
||||
"""
|
||||
|
||||
size: int
|
||||
"""The size of the embedding vector."""
|
||||
@@ -24,9 +40,21 @@ class FakeEmbeddings(Embeddings, BaseModel):
|
||||
|
||||
|
||||
class DeterministicFakeEmbedding(Embeddings, BaseModel):
|
||||
"""
|
||||
Fake embedding model that always returns
|
||||
the same embedding vector for the same text.
|
||||
"""Deterministic fake embedding model for unit testing purposes.
|
||||
|
||||
This embedding model creates embeddings by sampling from a normal distribution
|
||||
with a seed based on the hash of the text.
|
||||
|
||||
Do not use this outside of testing, as it is not a real embedding model.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.embeddings import DeterministicFakeEmbedding
|
||||
|
||||
fake_embeddings = DeterministicFakeEmbedding(size=100)
|
||||
fake_embeddings.embed_documents(["hello world", "foo bar"])
|
||||
"""
|
||||
|
||||
size: int
|
||||
@@ -40,9 +68,7 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
|
||||
return list(np.random.normal(size=self.size))
|
||||
|
||||
def _get_seed(self, text: str) -> int:
|
||||
"""
|
||||
Get a seed for the random generator, using the hash of the text.
|
||||
"""
|
||||
"""Get a seed for the random generator, using the hash of the text."""
|
||||
return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
|
||||
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
|
||||
@@ -214,10 +214,18 @@ def index(
|
||||
are not able to specify the uid of the document.
|
||||
|
||||
IMPORTANT:
|
||||
if auto_cleanup is set to True, the loader should be returning
|
||||
the entire dataset, and not just a subset of the dataset.
|
||||
Otherwise, the auto_cleanup will remove documents that it is not
|
||||
supposed to.
|
||||
* if auto_cleanup is set to True, the loader should be returning
|
||||
the entire dataset, and not just a subset of the dataset.
|
||||
Otherwise, the auto_cleanup will remove documents that it is not
|
||||
supposed to.
|
||||
* In incremental mode, if documents associated with a particular
|
||||
source id appear across different batches, the indexing API
|
||||
will do some redundant work. This will still result in the
|
||||
correct end state of the index, but will unfortunately not be
|
||||
100% efficient. For example, if a given document is split into 15
|
||||
chunks, and we index them using a batch size of 5, we'll have 3 batches
|
||||
all with the same source id. In general, to avoid doing too much
|
||||
redundant work select as big a batch size as possible.
|
||||
|
||||
Args:
|
||||
docs_source: Data loader or iterable of documents to index.
|
||||
|
||||
@@ -5,7 +5,39 @@ from typing import List, Optional, Sequence
|
||||
|
||||
|
||||
class RecordManager(ABC):
|
||||
"""Abstract base class representing the interface for a record manager."""
|
||||
"""Abstract base class representing the interface for a record manager.
|
||||
|
||||
The record manager abstraction is used by the langchain indexing API.
|
||||
|
||||
The record manager keeps track of which documents have been
|
||||
written into a vectorstore and when they were written.
|
||||
|
||||
The indexing API computes hashes for each document and stores the hash
|
||||
together with the write time and the source id in the record manager.
|
||||
|
||||
On subsequent indexing runs, the indexing API can check the record manager
|
||||
to determine which documents have already been indexed and which have not.
|
||||
|
||||
This allows the indexing API to avoid re-indexing documents that have
|
||||
already been indexed, and to only index new documents.
|
||||
|
||||
The main benefit of this abstraction is that it works across many vectorstores.
|
||||
To be supported, a vectorstore needs to only support the ability to add and
|
||||
delete documents by ID. Using the record manager, the indexing API will
|
||||
be able to delete outdated documents and avoid redundant indexing of documents
|
||||
that have already been indexed.
|
||||
|
||||
The main constraints of this abstraction are:
|
||||
|
||||
1. It relies on the time-stamps to determine which documents have been
|
||||
indexed and which have not. This means that the time-stamps must be
|
||||
monotonically increasing. The timestamp should be the timestamp
|
||||
as measured by the server to minimize issues.
|
||||
2. The record manager is currently implemented separately from the
|
||||
vectorstore, which means that the overall system becomes distributed
|
||||
and may create issues with consistency. For example, writing to
|
||||
record manager succeeds but corresponding writing to vectorstore fails.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
@@ -1,25 +1,40 @@
|
||||
"""**Language Model** is a type of model that can generate text or complete
|
||||
text prompts.
|
||||
|
||||
LangChain has two main classes to work with language models:
|
||||
- **LLM** classes provide access to the large language model (**LLM**) APIs and services.
|
||||
- **Chat Models** are a variation on language models.
|
||||
LangChain has two main classes to work with language models: **Chat Models**
|
||||
and "old-fashioned" **LLMs**.
|
||||
|
||||
**Class hierarchy:**
|
||||
## Chat Models
|
||||
|
||||
.. code-block::
|
||||
Language models that use a sequence of messages as inputs and return chat messages
|
||||
as outputs (as opposed to using plain text). These are traditionally newer models (
|
||||
older models are generally LLMs, see below). Chat models support the assignment of
|
||||
distinct roles to conversation messages, helping to distinguish messages from the AI,
|
||||
users, and instructions such as system messages.
|
||||
|
||||
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
|
||||
--> BaseChatModel --> <name> # Examples: ChatOpenAI, ChatGooglePalm
|
||||
The key abstraction for chat models is `BaseChatModel`. Implementations
|
||||
should inherit from this class. Please see LangChain how-to guides with more
|
||||
information on how to implement a custom chat model.
|
||||
|
||||
**Main helpers:**
|
||||
To implement a custom Chat Model, inherit from `BaseChatModel`. See
|
||||
the following guide for more information on how to implement a custom Chat Model:
|
||||
|
||||
.. code-block::
|
||||
https://python.langchain.com/v0.2/docs/how_to/custom_chat_model/
|
||||
|
||||
LLMResult, PromptValue,
|
||||
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
|
||||
CallbackManager, AsyncCallbackManager,
|
||||
AIMessage, BaseMessage, HumanMessage
|
||||
## LLMs
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are Chat Models, see below).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers
|
||||
also allow these models to take messages as input. This gives them the same interface
|
||||
as Chat Models. When messages are passed in as input, they will be formatted into a
|
||||
string under the hood before being passed to the underlying model.
|
||||
|
||||
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
|
||||
Please see the following guide for more information on how to implement a custom LLM:
|
||||
|
||||
https://python.langchain.com/v0.2/docs/how_to/custom_llm/
|
||||
""" # noqa: E501
|
||||
|
||||
from langchain_core.language_models.base import (
|
||||
|
||||
@@ -115,7 +115,26 @@ async def agenerate_from_stream(
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
"""Base class for Chat models."""
|
||||
"""Base class for Chat models.
|
||||
|
||||
Custom chat model implementations should inherit from this class.
|
||||
|
||||
Follow the guide for more information on how to implement a
|
||||
custom Chat Model:
|
||||
[Guide](https://python.langchain.com/v0.2/docs/how_to/custom_chat_model/).
|
||||
|
||||
Please reference the table below for information about which
|
||||
methods and properties are required or optional for implementations.
|
||||
|
||||
| Method/Property | Description | Required/Optional |
|
||||
|----------------------------------|--------------------------------------------------------------------|-------------------|
|
||||
| `_generate` | Use to generate a chat result from a prompt | Required |
|
||||
| `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
|
||||
| `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
|
||||
| `_stream` | Use to implement streaming | Optional |
|
||||
| `_agenerate` | Use to implement a native async method | Optional |
|
||||
| `_astream` | Use to implement async version of `_stream` | Optional |
|
||||
""" # noqa: E501
|
||||
|
||||
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
|
||||
"""[DEPRECATED] Callback manager to add to the run trace."""
|
||||
@@ -952,7 +971,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
|
||||
|
||||
class SimpleChatModel(BaseChatModel):
|
||||
"""Simplified implementation for a chat model to inherit from."""
|
||||
"""Simplified implementation for a chat model to inherit from.
|
||||
|
||||
**Note** This implementation is primarily here for backwards compatibility.
|
||||
For new implementations, please use `BaseChatModel` directly.
|
||||
"""
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
|
||||
@@ -15,8 +15,19 @@ class FakeListLLM(LLM):
|
||||
"""Fake LLM for testing purposes."""
|
||||
|
||||
responses: List[str]
|
||||
"""List of responses to return in order."""
|
||||
# This parameter should be removed from FakeListLLM since
|
||||
# it's only used by sub-classes.
|
||||
sleep: Optional[float] = None
|
||||
"""Sleep time in seconds between responses.
|
||||
|
||||
Ignored by FakeListLLM, but used by sub-classes.
|
||||
"""
|
||||
i: int = 0
|
||||
"""Internally incremented after every model invocation.
|
||||
|
||||
Useful primarily for testing purposes.
|
||||
"""
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
@@ -59,9 +70,16 @@ class FakeListLLM(LLM):
|
||||
|
||||
|
||||
class FakeStreamingListLLM(FakeListLLM):
|
||||
"""Fake streaming list LLM for testing purposes."""
|
||||
"""Fake streaming list LLM for testing purposes.
|
||||
|
||||
An LLM that will return responses from a list in order.
|
||||
|
||||
This model also supports optionally sleeping between successive
|
||||
chunks in a streaming implementation.
|
||||
"""
|
||||
|
||||
error_on_chunk_number: Optional[int] = None
|
||||
"""If set, will raise an exception on the specified chunk number."""
|
||||
|
||||
def stream(
|
||||
self,
|
||||
|
||||
@@ -17,8 +17,11 @@ class FakeMessagesListChatModel(BaseChatModel):
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
|
||||
responses: List[BaseMessage]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
sleep: Optional[float] = None
|
||||
"""Sleep time in seconds between responses."""
|
||||
i: int = 0
|
||||
"""Internally incremented after every model invocation."""
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
@@ -43,10 +46,13 @@ class FakeMessagesListChatModel(BaseChatModel):
|
||||
class FakeListChatModel(SimpleChatModel):
|
||||
"""Fake ChatModel for testing purposes."""
|
||||
|
||||
responses: List
|
||||
responses: List[str]
|
||||
"""List of responses to **cycle** through in order."""
|
||||
sleep: Optional[float] = None
|
||||
i: int = 0
|
||||
"""List of responses to **cycle** through in order."""
|
||||
error_on_chunk_number: Optional[int] = None
|
||||
"""Internally incremented after every model invocation."""
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
|
||||
@@ -1240,6 +1240,11 @@ class LLM(BaseLLM):
|
||||
`astream` will use `_astream` if provided, otherwise it will implement
|
||||
a fallback behavior that will use `_stream` if `_stream` is implemented,
|
||||
and use `_acall` if `_stream` is not implemented.
|
||||
|
||||
Please see the following guide for more information on how to
|
||||
implement a custom LLM:
|
||||
|
||||
https://python.langchain.com/v0.2/docs/how_to/custom_llm/
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -14,7 +14,17 @@ def default(obj: Any) -> Any:
|
||||
|
||||
|
||||
def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
||||
"""Return a json string representation of an object."""
|
||||
"""Return a json string representation of an object.
|
||||
|
||||
Args:
|
||||
obj: The object to dump
|
||||
pretty: Whether to pretty print the json. If true, the json will be
|
||||
indented with 2 spaces (if no indent is provided as part of kwargs)
|
||||
**kwargs: Additional arguments to pass to json.dumps
|
||||
|
||||
Returns:
|
||||
A json string representation of the object
|
||||
"""
|
||||
if "default" in kwargs:
|
||||
raise ValueError("`default` should not be passed to dumps")
|
||||
try:
|
||||
@@ -25,11 +35,24 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
||||
return json.dumps(obj, default=default, **kwargs)
|
||||
except TypeError:
|
||||
if pretty:
|
||||
indent = kwargs.pop("indent", 2)
|
||||
return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs)
|
||||
else:
|
||||
return json.dumps(to_json_not_implemented(obj), **kwargs)
|
||||
|
||||
|
||||
def dumpd(obj: Any) -> Dict[str, Any]:
|
||||
"""Return a json dict representation of an object."""
|
||||
"""Return a dict representation of an object.
|
||||
|
||||
Note:
|
||||
Unfortunately this function is not as efficient as it could be
|
||||
because it first dumps the object to a json string and then loads it
|
||||
back into a dictionary.
|
||||
|
||||
Args:
|
||||
obj: The object to dump.
|
||||
|
||||
Returns:
|
||||
dictionary that can be serialized to json using json.dumps
|
||||
"""
|
||||
return json.loads(dumps(obj))
|
||||
|
||||
@@ -1,3 +1,22 @@
|
||||
"""
|
||||
This file contains a mapping between the lc_namespace path for a given
|
||||
subclass that implements from Serializable to the namespace
|
||||
where that class is actually located.
|
||||
|
||||
This mapping helps maintain the ability to serialize and deserialize
|
||||
well-known LangChain objects even if they are moved around in the codebase
|
||||
across different LangChain versions.
|
||||
|
||||
For example,
|
||||
|
||||
The code for AIMessage class is located in langchain_core.messages.ai.AIMessage,
|
||||
This message is associated with the lc_namespace
|
||||
["langchain", "schema", "messages", "AIMessage"],
|
||||
because this code was originally in langchain.schema.messages.AIMessage.
|
||||
|
||||
The mapping allows us to deserialize an AIMessage created with an older
|
||||
version of LangChain where the code was in a different location.
|
||||
"""
|
||||
from typing import Dict, Tuple
|
||||
|
||||
# First value is the value that it is serialized as
|
||||
|
||||
@@ -62,7 +62,26 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
||||
|
||||
|
||||
class Serializable(BaseModel, ABC):
|
||||
"""Serializable base class."""
|
||||
"""Serializable base class.
|
||||
|
||||
This class is used to serialize objects to JSON.
|
||||
|
||||
It relies on the following methods and properties:
|
||||
|
||||
- `is_lc_serializable`: Is this class serializable?
|
||||
By design even if a class inherits from Serializable, it is not serializable by
|
||||
default. This is to prevent accidental serialization of objects that should not
|
||||
be serialized.
|
||||
- `get_lc_namespace`: Get the namespace of the langchain object.
|
||||
During de-serialization this namespace is used to identify
|
||||
the correct class to instantiate.
|
||||
Please see the `Reviver` class in `langchain_core.load.load` for more details.
|
||||
During de-serialization an additional mapping is handle
|
||||
classes that have moved or been renamed across package versions.
|
||||
- `lc_secrets`: A map of constructor argument names to secret ids.
|
||||
- `lc_attributes`: List of additional attribute names that should be included
|
||||
as part of the serialized representation..
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
|
||||
@@ -40,8 +40,9 @@ class AIMessage(BaseMessage):
|
||||
"""Message from an AI."""
|
||||
|
||||
example: bool = False
|
||||
"""Whether this Message is being passed in to the model as part of an example
|
||||
conversation.
|
||||
"""Use to denote that a message is part of an example conversation.
|
||||
|
||||
At the moment, this is ignored by most models. Usage is discouraged.
|
||||
"""
|
||||
|
||||
tool_calls: List[ToolCall] = []
|
||||
|
||||
@@ -7,8 +7,9 @@ class HumanMessage(BaseMessage):
|
||||
"""Message from a human."""
|
||||
|
||||
example: bool = False
|
||||
"""Whether this Message is being passed in to the model as part of an example
|
||||
conversation.
|
||||
"""Use to denote that a message is part of an example conversation.
|
||||
|
||||
At the moment, this is ignored by most models. Usage is discouraged.
|
||||
"""
|
||||
|
||||
type: Literal["human"] = "human"
|
||||
|
||||
@@ -25,12 +25,12 @@ from langchain_core.messages.function import FunctionMessage, FunctionMessageChu
|
||||
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
|
||||
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
|
||||
from langchain_core.messages.tool import ToolMessage, ToolMessageChunk
|
||||
from langchain_core.runnables import Runnable, RunnableLambda
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_text_splitters import TextSplitter
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.runnables.base import Runnable
|
||||
|
||||
AnyMessage = Union[
|
||||
AIMessage, HumanMessage, ChatMessage, SystemMessage, FunctionMessage, ToolMessage
|
||||
@@ -279,6 +279,8 @@ def _runnable_support(func: Callable) -> Callable:
|
||||
List[BaseMessage],
|
||||
Runnable[Sequence[MessageLikeRepresentation], List[BaseMessage]],
|
||||
]:
|
||||
from langchain_core.runnables.base import RunnableLambda
|
||||
|
||||
if messages is not None:
|
||||
return func(messages, **kwargs)
|
||||
else:
|
||||
@@ -486,9 +488,7 @@ def trim_messages(
|
||||
] = None,
|
||||
include_system: bool = False,
|
||||
text_splitter: Optional[Union[Callable[[str], List[str]], TextSplitter]] = None,
|
||||
) -> Union[
|
||||
List[BaseMessage], Runnable[Sequence[MessageLikeRepresentation], List[BaseMessage]]
|
||||
]:
|
||||
) -> List[BaseMessage]:
|
||||
"""Trim messages to be below a token count.
|
||||
|
||||
Args:
|
||||
@@ -734,53 +734,6 @@ def trim_messages(
|
||||
|
||||
|
||||
""" # noqa: E501
|
||||
if messages is not None:
|
||||
return _trim_messages_helper(
|
||||
messages,
|
||||
max_tokens=max_tokens,
|
||||
token_counter=token_counter,
|
||||
strategy=strategy,
|
||||
allow_partial=allow_partial,
|
||||
end_on=end_on,
|
||||
start_on=start_on,
|
||||
include_system=include_system,
|
||||
text_splitter=text_splitter,
|
||||
)
|
||||
else:
|
||||
trimmer = partial(
|
||||
_trim_messages_helper,
|
||||
max_tokens=max_tokens,
|
||||
token_counter=token_counter,
|
||||
strategy=strategy,
|
||||
allow_partial=allow_partial,
|
||||
end_on=end_on,
|
||||
start_on=start_on,
|
||||
include_system=include_system,
|
||||
text_splitter=text_splitter,
|
||||
)
|
||||
return RunnableLambda(trimmer)
|
||||
|
||||
|
||||
def _trim_messages_helper(
|
||||
messages: Sequence[MessageLikeRepresentation],
|
||||
*,
|
||||
max_tokens: int,
|
||||
token_counter: Union[
|
||||
Callable[[List[BaseMessage]], int],
|
||||
Callable[[BaseMessage], int],
|
||||
BaseLanguageModel,
|
||||
],
|
||||
strategy: Literal["first", "last"] = "last",
|
||||
allow_partial: bool = False,
|
||||
end_on: Optional[
|
||||
Union[str, Type[BaseMessage], Sequence[Union[str, Type[BaseMessage]]]]
|
||||
] = None,
|
||||
start_on: Optional[
|
||||
Union[str, Type[BaseMessage], Sequence[Union[str, Type[BaseMessage]]]]
|
||||
] = None,
|
||||
include_system: bool = False,
|
||||
text_splitter: Optional[Union[Callable[[str], List[str]], TextSplitter]] = None,
|
||||
) -> List[BaseMessage]:
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
if start_on and strategy == "first":
|
||||
|
||||
@@ -36,6 +36,7 @@ from langchain_core.runnables.config import (
|
||||
run_in_executor,
|
||||
)
|
||||
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
|
||||
from langchain_core.runnables.history import RunnableWithMessageHistory
|
||||
from langchain_core.runnables.passthrough import (
|
||||
RunnableAssign,
|
||||
RunnablePassthrough,
|
||||
@@ -78,6 +79,7 @@ __all__ = [
|
||||
"RunnablePick",
|
||||
"RunnableSequence",
|
||||
"RunnableWithFallbacks",
|
||||
"RunnableWithMessageHistory",
|
||||
"get_config_list",
|
||||
"aadd",
|
||||
"add",
|
||||
|
||||
@@ -11,6 +11,7 @@ from typing import (
|
||||
List,
|
||||
NamedTuple,
|
||||
Optional,
|
||||
Protocol,
|
||||
Tuple,
|
||||
Type,
|
||||
TypedDict,
|
||||
@@ -25,6 +26,11 @@ if TYPE_CHECKING:
|
||||
from langchain_core.runnables.base import Runnable as RunnableType
|
||||
|
||||
|
||||
class Stringifiable(Protocol):
|
||||
def __str__(self) -> str:
|
||||
...
|
||||
|
||||
|
||||
class LabelsDict(TypedDict):
|
||||
"""Dictionary of labels for nodes and edges in a graph."""
|
||||
|
||||
@@ -55,7 +61,7 @@ class Edge(NamedTuple):
|
||||
|
||||
source: str
|
||||
target: str
|
||||
data: Optional[str] = None
|
||||
data: Optional[Stringifiable] = None
|
||||
conditional: bool = False
|
||||
|
||||
|
||||
@@ -253,7 +259,7 @@ class Graph:
|
||||
self,
|
||||
source: Node,
|
||||
target: Node,
|
||||
data: Optional[str] = None,
|
||||
data: Optional[Stringifiable] = None,
|
||||
conditional: bool = False,
|
||||
) -> Edge:
|
||||
"""Add an edge to the graph and return it."""
|
||||
|
||||
@@ -81,7 +81,7 @@ def draw_mermaid(
|
||||
# Add BR every wrap_label_n_words words
|
||||
if edge.data is not None:
|
||||
edge_data = edge.data
|
||||
words = edge_data.split() # Split the string into words
|
||||
words = str(edge_data).split() # Split the string into words
|
||||
# Group words into chunks of wrap_label_n_words size
|
||||
if len(words) > wrap_label_n_words:
|
||||
edge_data = "<br>".join(
|
||||
|
||||
@@ -160,8 +160,8 @@ class PngDrawer:
|
||||
self.add_node(viz, node)
|
||||
|
||||
def add_edges(self, viz: Any, graph: Graph) -> None:
|
||||
for start, end, label, cond in graph.edges:
|
||||
self.add_edge(viz, start, end, label, cond)
|
||||
for start, end, data, cond in graph.edges:
|
||||
self.add_edge(viz, start, end, str(data), cond)
|
||||
|
||||
def update_styles(self, viz: Any, graph: Graph) -> None:
|
||||
if first := graph.first_node():
|
||||
|
||||
@@ -25,8 +25,8 @@ from langchain_core.runnables.utils import (
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.language_models import LanguageModelLike
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.language_models.base import LanguageModelLike
|
||||
from langchain_core.messages.base import BaseMessage
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
from langchain_core.tracers.schemas import Run
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ EXPECTED_ALL = [
|
||||
"RunnablePick",
|
||||
"RunnableSequence",
|
||||
"RunnableWithFallbacks",
|
||||
"RunnableWithMessageHistory",
|
||||
"get_config_list",
|
||||
"aadd",
|
||||
"add",
|
||||
|
||||
@@ -306,7 +306,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
|
||||
"definitions": {
|
||||
"Document": {
|
||||
"title": "Document",
|
||||
"description": "Class for storing a piece of text and associated metadata.", # noqa: E501
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"page_content": {"title": "Page Content", "type": "string"},
|
||||
@@ -396,7 +396,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
|
||||
},
|
||||
"AIMessage": {
|
||||
"title": "AIMessage",
|
||||
"description": "Message from an AI.",
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
@@ -450,7 +450,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
|
||||
},
|
||||
"HumanMessage": {
|
||||
"title": "HumanMessage",
|
||||
"description": "Message from a human.",
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
@@ -491,7 +491,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
|
||||
},
|
||||
"ChatMessage": {
|
||||
"title": "ChatMessage",
|
||||
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
@@ -528,7 +528,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
|
||||
},
|
||||
"SystemMessage": {
|
||||
"title": "SystemMessage",
|
||||
"description": "Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa: E501
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
@@ -564,7 +564,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
|
||||
},
|
||||
"FunctionMessage": {
|
||||
"title": "FunctionMessage",
|
||||
"description": "Message for passing the result of executing a function back to a model.", # noqa: E501
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
@@ -600,7 +600,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
|
||||
},
|
||||
"ToolMessage": {
|
||||
"title": "ToolMessage",
|
||||
"description": "Message for passing the result of executing a tool back to a model.", # noqa: E501
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import glob
|
||||
import importlib
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def test_importable_all() -> None:
|
||||
for path in glob.glob("../core/langchain_core/*"):
|
||||
@@ -13,3 +16,12 @@ def test_importable_all() -> None:
|
||||
all_ = getattr(module, "__all__", [])
|
||||
for cls_ in all_:
|
||||
getattr(module, cls_)
|
||||
|
||||
# Test import in isolation
|
||||
# Note: ImportErrors due to circular imports can be raised
|
||||
# for one sequence of imports but not another.
|
||||
result = subprocess.run(
|
||||
["python", "-c", f"import langchain_core.{module_name}"],
|
||||
)
|
||||
if result.returncode != 0:
|
||||
pytest.fail(f"Failed to import {module_name}.")
|
||||
|
||||
@@ -9,6 +9,8 @@ _DEFAULT_TIMEOUT_SEC = 300
|
||||
|
||||
|
||||
class AI21Base(BaseModel):
|
||||
"""Base class for AI21 models."""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ _ROLE_TYPE = Union[str, RoleType]
|
||||
|
||||
|
||||
class ChatAdapter(ABC):
|
||||
"""
|
||||
Provides a common interface for the different Chat models available in AI21.
|
||||
"""Common interface for the different Chat models available in AI21.
|
||||
|
||||
It converts LangChain messages to AI21 messages.
|
||||
Calls the appropriate AI21 model API with the converted messages.
|
||||
"""
|
||||
@@ -77,6 +77,8 @@ class ChatAdapter(ABC):
|
||||
|
||||
|
||||
class J2ChatAdapter(ChatAdapter):
|
||||
"""Adapter for J2Chat models."""
|
||||
|
||||
def convert_messages(self, messages: List[BaseMessage]) -> Dict[str, Any]:
|
||||
system_message = ""
|
||||
converted_messages = [] # type: ignore
|
||||
@@ -107,6 +109,8 @@ class J2ChatAdapter(ChatAdapter):
|
||||
|
||||
|
||||
class JambaChatCompletionsAdapter(ChatAdapter):
|
||||
"""Adapter for Jamba Chat Completions."""
|
||||
|
||||
def convert_messages(self, messages: List[BaseMessage]) -> Dict[str, Any]:
|
||||
return {
|
||||
"messages": [
|
||||
|
||||
@@ -6,6 +6,14 @@ from langchain_ai21.chat.chat_adapter import (
|
||||
|
||||
|
||||
def create_chat_adapter(model: str) -> ChatAdapter:
|
||||
"""Create a chat adapter based on the model.
|
||||
|
||||
Args:
|
||||
model: The model to create the chat adapter for.
|
||||
|
||||
Returns:
|
||||
The chat adapter.
|
||||
"""
|
||||
if "j2" in model:
|
||||
return J2ChatAdapter()
|
||||
|
||||
|
||||
@@ -19,11 +19,15 @@ ContextType = Union[str, List[Union[Document, str]]]
|
||||
|
||||
|
||||
class ContextualAnswerInput(TypedDict):
|
||||
"""Input for the ContextualAnswers runnable."""
|
||||
|
||||
context: ContextType
|
||||
question: str
|
||||
|
||||
|
||||
class AI21ContextualAnswers(RunnableSerializable[ContextualAnswerInput, str], AI21Base):
|
||||
"""Runnable for the AI21 Contextual Answers API."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
|
||||
@@ -15,7 +15,8 @@ def _split_texts_into_batches(texts: List[str], batch_size: int) -> Iterator[Lis
|
||||
|
||||
|
||||
class AI21Embeddings(Embeddings, AI21Base):
|
||||
"""AI21 Embeddings embedding model.
|
||||
"""AI21 embedding model.
|
||||
|
||||
To use, you should have the 'AI21_API_KEY' environment variable set
|
||||
or pass as a named parameter to the constructor.
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ from langchain_ai21.ai21_base import AI21Base
|
||||
|
||||
|
||||
class AI21LLM(BaseLLM, AI21Base):
|
||||
"""AI21LLM large language models.
|
||||
"""AI21 large language models.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
@@ -20,7 +20,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class AI21SemanticTextSplitter(TextSplitter):
|
||||
"""Splitting text into coherent and readable units,
|
||||
based on distinct topics and lines
|
||||
based on distinct topics and lines.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
||||
20
libs/partners/ai21/poetry.lock
generated
20
libs/partners/ai21/poetry.lock
generated
@@ -479,6 +479,7 @@ files = []
|
||||
develop = true
|
||||
|
||||
[package.dependencies]
|
||||
httpx = "^0.27.0"
|
||||
langchain-core = ">=0.1.40,<0.3"
|
||||
pytest = ">=7,<9"
|
||||
|
||||
@@ -679,25 +680,6 @@ files = [
|
||||
dev = ["pre-commit", "tox"]
|
||||
testing = ["pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.7.3"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.7.3-py3-none-any.whl", hash = "sha256:ea91b002777bf643bb20dd717c028ec43216b24a6001a280f83877fd2655d0b4"},
|
||||
{file = "pydantic-2.7.3.tar.gz", hash = "sha256:c46c76a40bb1296728d7a8b99aa73dd70a48c3510111ff290034f860c99c419e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.4.0"
|
||||
pydantic-core = "2.18.4"
|
||||
typing-extensions = ">=4.6.1"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.7.4"
|
||||
|
||||
@@ -231,7 +231,7 @@ def _format_messages(messages: List[BaseMessage]) -> Tuple[Optional[str], List[D
|
||||
|
||||
|
||||
class ChatAnthropic(BaseChatModel):
|
||||
"""Anthropic chat model integration.
|
||||
"""Anthropic chat models.
|
||||
|
||||
See https://docs.anthropic.com/en/docs/models-overview for a list of the latest models.
|
||||
|
||||
@@ -1008,6 +1008,8 @@ class ChatAnthropic(BaseChatModel):
|
||||
|
||||
|
||||
class AnthropicTool(TypedDict):
|
||||
"""Anthropic tool definition."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
input_schema: Dict[str, Any]
|
||||
@@ -1016,6 +1018,7 @@ class AnthropicTool(TypedDict):
|
||||
def convert_to_anthropic_tool(
|
||||
tool: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool],
|
||||
) -> AnthropicTool:
|
||||
"""Convert a tool-like object to an Anthropic tool definition."""
|
||||
# already in Anthropic tool format
|
||||
if isinstance(tool, dict) and all(
|
||||
k in tool for k in ("name", "description", "input_schema")
|
||||
|
||||
@@ -55,6 +55,7 @@ def _get_type(parameter: Dict[str, Any]) -> str:
|
||||
|
||||
|
||||
def get_system_message(tools: List[Dict]) -> str:
|
||||
"""Generate a system message that describes the available tools."""
|
||||
tools_data: List[Dict] = [
|
||||
{
|
||||
"tool_name": tool["name"],
|
||||
|
||||
@@ -362,4 +362,6 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
|
||||
@deprecated(since="0.1.0", removal="0.3.0", alternative="AnthropicLLM")
|
||||
class Anthropic(AnthropicLLM):
|
||||
"""Anthropic large language model."""
|
||||
|
||||
pass
|
||||
|
||||
@@ -7,9 +7,14 @@ from langchain_core.pydantic_v1 import BaseModel
|
||||
|
||||
|
||||
class ToolsOutputParser(BaseGenerationOutputParser):
|
||||
"""Output parser for tool calls."""
|
||||
|
||||
first_tool_only: bool = False
|
||||
"""Whether to return only the first tool call."""
|
||||
args_only: bool = False
|
||||
"""Whether to return only the arguments of the tool calls."""
|
||||
pydantic_schemas: Optional[List[Type[BaseModel]]] = None
|
||||
"""Pydantic schemas to parse tool calls into."""
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
@@ -59,6 +64,7 @@ class ToolsOutputParser(BaseGenerationOutputParser):
|
||||
|
||||
|
||||
def extract_tool_calls(content: List[dict]) -> List[ToolCall]:
|
||||
"""Extract tool calls from a list of content blocks."""
|
||||
tool_calls = []
|
||||
for block in content:
|
||||
if block["type"] != "tool_use":
|
||||
|
||||
135
libs/partners/anthropic/poetry.lock
generated
135
libs/partners/anthropic/poetry.lock
generated
@@ -511,7 +511,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.3"
|
||||
version = "0.2.9"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -520,14 +520,14 @@ develop = true
|
||||
|
||||
[package.dependencies]
|
||||
jsonpatch = "^1.33"
|
||||
langsmith = "^0.1.65"
|
||||
packaging = "^23.2"
|
||||
pydantic = ">=1,<3"
|
||||
langsmith = "^0.1.75"
|
||||
packaging = ">=23.2,<25"
|
||||
pydantic = [
|
||||
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
||||
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
||||
]
|
||||
PyYAML = ">=5.3"
|
||||
tenacity = "^8.1.0"
|
||||
|
||||
[package.extras]
|
||||
extended-testing = ["jinja2 (>=3,<4)"]
|
||||
tenacity = "^8.1.0,!=8.4.0"
|
||||
|
||||
[package.source]
|
||||
type = "directory"
|
||||
@@ -535,7 +535,7 @@ url = "../../core"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-standard-tests"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
description = "Standard tests for LangChain implementations"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -543,6 +543,7 @@ files = []
|
||||
develop = true
|
||||
|
||||
[package.dependencies]
|
||||
httpx = "^0.27.0"
|
||||
langchain-core = ">=0.1.40,<0.3"
|
||||
pytest = ">=7,<9"
|
||||
|
||||
@@ -552,13 +553,13 @@ url = "../../standard-tests"
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.1.67"
|
||||
version = "0.1.80"
|
||||
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8.1"
|
||||
files = [
|
||||
{file = "langsmith-0.1.67-py3-none-any.whl", hash = "sha256:7eb2e1c1b375925ff47700ed8071e10c15e942e9d1d634b4a449a9060364071a"},
|
||||
{file = "langsmith-0.1.67.tar.gz", hash = "sha256:149558669a2ac4f21471cd964e61072687bba23b7c1ccb51f190a8f59b595b39"},
|
||||
{file = "langsmith-0.1.80-py3-none-any.whl", hash = "sha256:951fc29576b52afd8378d41f6db343090fea863e3620f0ca97e83b221f93c94d"},
|
||||
{file = "langsmith-0.1.80.tar.gz", hash = "sha256:a29b1dde27612308beee424f1388ad844c8e7e375bf2ac8bdf4da174013f279d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -724,6 +725,25 @@ typing-extensions = ">=4.6.1"
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.7.4"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"},
|
||||
{file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.4.0"
|
||||
pydantic-core = "2.18.4"
|
||||
typing-extensions = ">=4.6.1"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.2"
|
||||
@@ -815,6 +835,97 @@ files = [
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.4"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"},
|
||||
{file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "7.4.4"
|
||||
|
||||
@@ -16,3 +16,7 @@ class TestAnthropicStandard(ChatModelIntegrationTests):
|
||||
@property
|
||||
def chat_model_params(self) -> dict:
|
||||
return {"model": "claude-3-haiku-20240307"}
|
||||
|
||||
@property
|
||||
def supports_image_inputs(self) -> bool:
|
||||
return True
|
||||
|
||||
122
libs/partners/fireworks/poetry.lock
generated
122
libs/partners/fireworks/poetry.lock
generated
@@ -572,7 +572,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.6"
|
||||
version = "0.2.9"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -583,9 +583,12 @@ develop = true
|
||||
jsonpatch = "^1.33"
|
||||
langsmith = "^0.1.75"
|
||||
packaging = ">=23.2,<25"
|
||||
pydantic = ">=1,<3"
|
||||
pydantic = [
|
||||
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
||||
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
||||
]
|
||||
PyYAML = ">=5.3"
|
||||
tenacity = "^8.1.0"
|
||||
tenacity = "^8.1.0,!=8.4.0"
|
||||
|
||||
[package.source]
|
||||
type = "directory"
|
||||
@@ -593,7 +596,7 @@ url = "../../core"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-standard-tests"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
description = "Standard tests for LangChain implementations"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -601,6 +604,7 @@ files = []
|
||||
develop = true
|
||||
|
||||
[package.dependencies]
|
||||
httpx = "^0.27.0"
|
||||
langchain-core = ">=0.1.40,<0.3"
|
||||
pytest = ">=7,<9"
|
||||
|
||||
@@ -993,6 +997,25 @@ typing-extensions = ">=4.6.1"
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.7.4"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"},
|
||||
{file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.4.0"
|
||||
pydantic-core = "2.18.4"
|
||||
typing-extensions = ">=4.6.1"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.2"
|
||||
@@ -1084,6 +1107,97 @@ files = [
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.4"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"},
|
||||
{file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "7.4.4"
|
||||
|
||||
124
libs/partners/groq/poetry.lock
generated
124
libs/partners/groq/poetry.lock
generated
@@ -323,7 +323,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.5"
|
||||
version = "0.2.9"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -333,10 +333,13 @@ develop = true
|
||||
[package.dependencies]
|
||||
jsonpatch = "^1.33"
|
||||
langsmith = "^0.1.75"
|
||||
packaging = "^23.2"
|
||||
pydantic = ">=1,<3"
|
||||
packaging = ">=23.2,<25"
|
||||
pydantic = [
|
||||
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
||||
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
||||
]
|
||||
PyYAML = ">=5.3"
|
||||
tenacity = "^8.1.0"
|
||||
tenacity = "^8.1.0,!=8.4.0"
|
||||
|
||||
[package.source]
|
||||
type = "directory"
|
||||
@@ -344,7 +347,7 @@ url = "../../core"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-standard-tests"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
description = "Standard tests for LangChain implementations"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -352,6 +355,7 @@ files = []
|
||||
develop = true
|
||||
|
||||
[package.dependencies]
|
||||
httpx = "^0.27.0"
|
||||
langchain-core = ">=0.1.40,<0.3"
|
||||
pytest = ">=7,<9"
|
||||
|
||||
@@ -536,6 +540,25 @@ typing-extensions = ">=4.6.1"
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.7.4"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"},
|
||||
{file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.4.0"
|
||||
pydantic-core = "2.18.4"
|
||||
typing-extensions = ">=4.6.1"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.2"
|
||||
@@ -627,6 +650,97 @@ files = [
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.4"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"},
|
||||
{file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "7.4.4"
|
||||
|
||||
@@ -15,6 +15,8 @@ from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
|
||||
|
||||
|
||||
class WatsonxEmbeddings(BaseModel, LangChainEmbeddings):
|
||||
"""IBM WatsonX.ai embedding models."""
|
||||
|
||||
model_id: str = ""
|
||||
"""Type of model to use."""
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class WatsonxLLM(BaseLLM):
|
||||
"""
|
||||
IBM watsonx.ai large language models.
|
||||
IBM WatsonX.ai large language models.
|
||||
|
||||
To use, you should have ``langchain_ibm`` python package installed,
|
||||
and the environment variable ``WATSONX_APIKEY`` set with your API key, or pass
|
||||
|
||||
@@ -13,7 +13,7 @@ license = "MIT"
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
langchain-core = "^0.2.0"
|
||||
pymilvus = "^2.4.3"
|
||||
pymilvus = "^2.4.3" #
|
||||
scipy = "^1.7"
|
||||
|
||||
[tool.poetry.group.test]
|
||||
|
||||
124
libs/partners/mistralai/poetry.lock
generated
124
libs/partners/mistralai/poetry.lock
generated
@@ -392,7 +392,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.5"
|
||||
version = "0.2.9"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -402,10 +402,13 @@ develop = true
|
||||
[package.dependencies]
|
||||
jsonpatch = "^1.33"
|
||||
langsmith = "^0.1.75"
|
||||
packaging = "^23.2"
|
||||
pydantic = ">=1,<3"
|
||||
packaging = ">=23.2,<25"
|
||||
pydantic = [
|
||||
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
||||
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
||||
]
|
||||
PyYAML = ">=5.3"
|
||||
tenacity = "^8.1.0"
|
||||
tenacity = "^8.1.0,!=8.4.0"
|
||||
|
||||
[package.source]
|
||||
type = "directory"
|
||||
@@ -413,7 +416,7 @@ url = "../../core"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-standard-tests"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
description = "Standard tests for LangChain implementations"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -421,6 +424,7 @@ files = []
|
||||
develop = true
|
||||
|
||||
[package.dependencies]
|
||||
httpx = "^0.27.0"
|
||||
langchain-core = ">=0.1.40,<0.3"
|
||||
pytest = ">=7,<9"
|
||||
|
||||
@@ -605,6 +609,25 @@ typing-extensions = ">=4.6.1"
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.7.4"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"},
|
||||
{file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.4.0"
|
||||
pydantic-core = "2.18.4"
|
||||
typing-extensions = ">=4.6.1"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.2"
|
||||
@@ -696,6 +719,97 @@ files = [
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.4"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"},
|
||||
{file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "7.4.4"
|
||||
|
||||
@@ -443,11 +443,12 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
"stream": self.streaming,
|
||||
"n": self.n,
|
||||
"temperature": self.temperature,
|
||||
"stop": self.stop,
|
||||
**self.model_kwargs,
|
||||
}
|
||||
if self.max_tokens is not None:
|
||||
params["max_tokens"] = self.max_tokens
|
||||
if self.stop:
|
||||
params["stop"] = self.stop
|
||||
return params
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
|
||||
@@ -12,3 +12,11 @@ class TestOpenAIStandard(ChatModelIntegrationTests):
|
||||
@property
|
||||
def chat_model_class(self) -> Type[BaseChatModel]:
|
||||
return ChatOpenAI
|
||||
|
||||
@property
|
||||
def chat_model_params(self) -> dict:
|
||||
return {"model": "gpt-4o"}
|
||||
|
||||
@property
|
||||
def supports_image_inputs(self) -> bool:
|
||||
return True
|
||||
|
||||
@@ -14,10 +14,14 @@ T = TypeVar("T")
|
||||
|
||||
|
||||
class SimpleModel(BaseModel, Generic[T]):
|
||||
"""Simple model for a single item."""
|
||||
|
||||
item: T
|
||||
|
||||
|
||||
class PropertySettings(BaseModel):
|
||||
"""Property settings for a prompty model."""
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
type: Literal["string", "number", "array", "object", "boolean"]
|
||||
default: Union[str, int, float, List, Dict, bool] = Field(default=None)
|
||||
@@ -25,6 +29,8 @@ class PropertySettings(BaseModel):
|
||||
|
||||
|
||||
class ModelSettings(BaseModel):
|
||||
"""Model settings for a prompty model."""
|
||||
|
||||
api: str = Field(default="")
|
||||
configuration: dict = Field(default={})
|
||||
parameters: dict = Field(default={})
|
||||
@@ -40,11 +46,15 @@ class ModelSettings(BaseModel):
|
||||
|
||||
|
||||
class TemplateSettings(BaseModel):
|
||||
"""Template settings for a prompty model."""
|
||||
|
||||
type: str = Field(default="mustache")
|
||||
parser: str = Field(default="")
|
||||
|
||||
|
||||
class Prompty(BaseModel):
|
||||
"""Base Prompty model."""
|
||||
|
||||
# metadata
|
||||
name: str = Field(default="")
|
||||
description: str = Field(default="")
|
||||
@@ -147,6 +157,16 @@ class Prompty(BaseModel):
|
||||
def param_hoisting(
|
||||
top: Dict[str, Any], bottom: Dict[str, Any], top_key: Any = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Merge two dictionaries with hoisting of parameters from bottom to top.
|
||||
|
||||
Args:
|
||||
top: The top dictionary.
|
||||
bottom: The bottom dictionary.
|
||||
top_key: The key to hoist from the bottom to the top.
|
||||
|
||||
Returns:
|
||||
The merged dictionary.
|
||||
"""
|
||||
if top_key:
|
||||
new_dict = {**top[top_key]} if top_key in top else {}
|
||||
else:
|
||||
@@ -158,6 +178,8 @@ def param_hoisting(
|
||||
|
||||
|
||||
class Invoker(abc.ABC):
|
||||
"""Base class for all invokers."""
|
||||
|
||||
def __init__(self, prompty: Prompty) -> None:
|
||||
self.prompty = prompty
|
||||
|
||||
@@ -170,11 +192,15 @@ class Invoker(abc.ABC):
|
||||
|
||||
|
||||
class NoOpParser(Invoker):
|
||||
"""NoOp parser for invokers."""
|
||||
|
||||
def invoke(self, data: BaseModel) -> BaseModel:
|
||||
return data
|
||||
|
||||
|
||||
class InvokerFactory(object):
|
||||
"""Factory for creating invokers."""
|
||||
|
||||
_instance = None
|
||||
_renderers: Dict[str, Type[Invoker]] = {}
|
||||
_parsers: Dict[str, Type[Invoker]] = {}
|
||||
@@ -259,6 +285,8 @@ class InvokerFactory(object):
|
||||
|
||||
|
||||
class Frontmatter:
|
||||
"""Class for reading frontmatter from a string or file."""
|
||||
|
||||
_yaml_delim = r"(?:---|\+\+\+)"
|
||||
_yaml = r"(.*?)"
|
||||
_content = r"\s*(.+)$"
|
||||
|
||||
@@ -9,6 +9,8 @@ from .utils import load, prepare
|
||||
def create_chat_prompt(
|
||||
path: str, input_name_agent_scratchpad: str = "agent_scratchpad"
|
||||
) -> Runnable[Dict[str, Any], ChatPromptTemplate]:
|
||||
"""Create a chat prompt from a Langchain schema."""
|
||||
|
||||
def runnable_chat_lambda(inputs: Dict[str, Any]) -> ChatPromptTemplate:
|
||||
p = load(path)
|
||||
parsed = prepare(p, inputs)
|
||||
|
||||
@@ -8,6 +8,8 @@ from .core import Invoker, Prompty, SimpleModel
|
||||
|
||||
|
||||
class PromptyChatParser(Invoker):
|
||||
"""Parse a chat prompt into a list of messages."""
|
||||
|
||||
def __init__(self, prompty: Prompty) -> None:
|
||||
self.prompty = prompty
|
||||
self.roles = ["assistant", "function", "system", "user", "human", "ai"]
|
||||
|
||||
@@ -5,6 +5,8 @@ from .core import Invoker, Prompty, SimpleModel
|
||||
|
||||
|
||||
class MustacheRenderer(Invoker):
|
||||
"""Render a mustache template."""
|
||||
|
||||
def __init__(self, prompty: Prompty) -> None:
|
||||
self.prompty = prompty
|
||||
|
||||
|
||||
@@ -15,6 +15,15 @@ from .core import (
|
||||
|
||||
|
||||
def load(prompt_path: str, configuration: str = "default") -> Prompty:
|
||||
"""Load a prompty file and return a Prompty object.
|
||||
|
||||
Args:
|
||||
prompt_path: The path to the prompty file.
|
||||
configuration: The configuration to use. Defaults to "default".
|
||||
|
||||
Returns:
|
||||
The Prompty object.
|
||||
"""
|
||||
file_path = Path(prompt_path)
|
||||
if not file_path.is_absolute():
|
||||
# get caller's path (take into account trace frame)
|
||||
@@ -113,6 +122,15 @@ def prepare(
|
||||
prompt: Prompty,
|
||||
inputs: Dict[str, Any] = {},
|
||||
) -> Any:
|
||||
"""Prepare the inputs for the prompty.
|
||||
|
||||
Args:
|
||||
prompt: The Prompty object.
|
||||
inputs: The inputs to the prompty. Defaults to {}.
|
||||
|
||||
Returns:
|
||||
The prepared inputs.
|
||||
"""
|
||||
invoker = InvokerFactory()
|
||||
|
||||
inputs = param_hoisting(inputs, prompt.sample)
|
||||
@@ -153,6 +171,18 @@ def run(
|
||||
parameters: Dict[str, Any] = {},
|
||||
raw: bool = False,
|
||||
) -> Any:
|
||||
"""Run the prompty.
|
||||
|
||||
Args:
|
||||
prompt: The Prompty object.
|
||||
content: The content to run the prompty on.
|
||||
configuration: The configuration to use. Defaults to {}.
|
||||
parameters: The parameters to use. Defaults to {}.
|
||||
raw: Whether to return the raw output. Defaults to False.
|
||||
|
||||
Returns:
|
||||
The result of running the prompty.
|
||||
"""
|
||||
invoker = InvokerFactory()
|
||||
|
||||
if configuration != {}:
|
||||
@@ -195,6 +225,21 @@ def execute(
|
||||
raw: bool = False,
|
||||
connection: str = "default",
|
||||
) -> Any:
|
||||
"""Execute a prompty.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to execute.
|
||||
Can be a path to a prompty file or a Prompty object.
|
||||
configuration: The configuration to use. Defaults to {}.
|
||||
parameters: The parameters to use. Defaults to {}.
|
||||
inputs: The inputs to the prompty. Defaults to {}.
|
||||
raw: Whether to return the raw output. Defaults to False.
|
||||
connection: The connection to use. Defaults to "default".
|
||||
|
||||
Returns:
|
||||
The result of executing the prompty.
|
||||
"""
|
||||
|
||||
if isinstance(prompt, str):
|
||||
prompt = load(prompt, connection)
|
||||
|
||||
|
||||
264
libs/partners/prompty/poetry.lock
generated
264
libs/partners/prompty/poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohttp"
|
||||
@@ -292,21 +292,6 @@ files = [
|
||||
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dataclasses-json"
|
||||
version = "0.6.6"
|
||||
description = "Easily serialize dataclasses to and from JSON."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.7"
|
||||
files = [
|
||||
{file = "dataclasses_json-0.6.6-py3-none-any.whl", hash = "sha256:e54c5c87497741ad454070ba0ed411523d46beb5da102e221efb873801b0ba85"},
|
||||
{file = "dataclasses_json-0.6.6.tar.gz", hash = "sha256:0c09827d26fffda27f1be2fed7a7a01a29c5ddcd2eb6393ad5ebf9d77e9deae8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
marshmallow = ">=3.18.0,<4.0.0"
|
||||
typing-inspect = ">=0.4.0,<1"
|
||||
|
||||
[[package]]
|
||||
name = "exceptiongroup"
|
||||
version = "1.2.1"
|
||||
@@ -541,7 +526,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "0.2.0rc2"
|
||||
version = "0.2.5"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -551,31 +536,18 @@ develop = true
|
||||
[package.dependencies]
|
||||
aiohttp = "^3.8.3"
|
||||
async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""}
|
||||
dataclasses-json = ">= 0.5.7, < 0.7"
|
||||
langchain-core = ">=0.1.52,<0.3"
|
||||
langchain-text-splitters = ">=0.0.1,<0.1"
|
||||
langchain-core = "^0.2.7"
|
||||
langchain-text-splitters = "^0.2.0"
|
||||
langsmith = "^0.1.17"
|
||||
numpy = "^1"
|
||||
numpy = [
|
||||
{version = ">=1,<2", markers = "python_version < \"3.12\""},
|
||||
{version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""},
|
||||
]
|
||||
pydantic = ">=1,<3"
|
||||
PyYAML = ">=5.3"
|
||||
requests = "^2"
|
||||
SQLAlchemy = ">=1.4,<3"
|
||||
tenacity = "^8.1.0"
|
||||
|
||||
[package.extras]
|
||||
all = []
|
||||
azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"]
|
||||
clarifai = ["clarifai (>=9.1.0)"]
|
||||
cli = ["typer (>=0.9.0,<0.10.0)"]
|
||||
cohere = ["cohere (>=4,<6)"]
|
||||
docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"]
|
||||
embeddings = ["sentence-transformers (>=2,<3)"]
|
||||
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
|
||||
javascript = ["esprima (>=4.0.1,<5.0.0)"]
|
||||
llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
|
||||
openai = ["openai (<2)", "tiktoken (>=0.3.2,<0.6.0)"]
|
||||
qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"]
|
||||
text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
|
||||
tenacity = "^8.1.0,!=8.4.0"
|
||||
|
||||
[package.source]
|
||||
type = "directory"
|
||||
@@ -583,7 +555,7 @@ url = "../../langchain"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.0rc1"
|
||||
version = "0.2.9"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -592,14 +564,14 @@ develop = true
|
||||
|
||||
[package.dependencies]
|
||||
jsonpatch = "^1.33"
|
||||
langsmith = "^0.1.0"
|
||||
packaging = "^23.2"
|
||||
pydantic = ">=1,<3"
|
||||
langsmith = "^0.1.75"
|
||||
packaging = ">=23.2,<25"
|
||||
pydantic = [
|
||||
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
||||
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
||||
]
|
||||
PyYAML = ">=5.3"
|
||||
tenacity = "^8.1.0"
|
||||
|
||||
[package.extras]
|
||||
extended-testing = ["jinja2 (>=3,<4)"]
|
||||
tenacity = "^8.1.0,!=8.4.0"
|
||||
|
||||
[package.source]
|
||||
type = "directory"
|
||||
@@ -607,7 +579,7 @@ url = "../../core"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-text-splitters"
|
||||
version = "0.0.2"
|
||||
version = "0.2.1"
|
||||
description = "LangChain text splitting utilities"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -615,10 +587,7 @@ files = []
|
||||
develop = true
|
||||
|
||||
[package.dependencies]
|
||||
langchain-core = ">=0.1.28,<0.3"
|
||||
|
||||
[package.extras]
|
||||
extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"]
|
||||
langchain-core = "^0.2.0"
|
||||
|
||||
[package.source]
|
||||
type = "directory"
|
||||
@@ -626,13 +595,13 @@ url = "../../text-splitters"
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.1.58"
|
||||
version = "0.1.80"
|
||||
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8.1"
|
||||
files = [
|
||||
{file = "langsmith-0.1.58-py3-none-any.whl", hash = "sha256:1148cc836ec99d1b2f37cd2fa3014fcac213bb6bad798a2b21bb9111c18c9768"},
|
||||
{file = "langsmith-0.1.58.tar.gz", hash = "sha256:a5060933c1fb3006b498ec849677993329d7e6138bdc2ec044068ab806e09c39"},
|
||||
{file = "langsmith-0.1.80-py3-none-any.whl", hash = "sha256:951fc29576b52afd8378d41f6db343090fea863e3620f0ca97e83b221f93c94d"},
|
||||
{file = "langsmith-0.1.80.tar.gz", hash = "sha256:a29b1dde27612308beee424f1388ad844c8e7e375bf2ac8bdf4da174013f279d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -640,25 +609,6 @@ orjson = ">=3.9.14,<4.0.0"
|
||||
pydantic = ">=1,<3"
|
||||
requests = ">=2,<3"
|
||||
|
||||
[[package]]
|
||||
name = "marshmallow"
|
||||
version = "3.21.2"
|
||||
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "marshmallow-3.21.2-py3-none-any.whl", hash = "sha256:70b54a6282f4704d12c0a41599682c5c5450e843b9ec406308653b47c59648a1"},
|
||||
{file = "marshmallow-3.21.2.tar.gz", hash = "sha256:82408deadd8b33d56338d2182d455db632c6313aa2af61916672146bb32edc56"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
packaging = ">=17.0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"]
|
||||
docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"]
|
||||
tests = ["pytest", "pytz", "simplejson"]
|
||||
|
||||
[[package]]
|
||||
name = "multidict"
|
||||
version = "6.0.5"
|
||||
@@ -856,6 +806,51 @@ files = [
|
||||
{file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
version = "1.26.4"
|
||||
description = "Fundamental package for array computing in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"},
|
||||
{file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"},
|
||||
{file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"},
|
||||
{file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"},
|
||||
{file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"},
|
||||
{file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"},
|
||||
{file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"},
|
||||
{file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"},
|
||||
{file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"},
|
||||
{file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"},
|
||||
{file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"},
|
||||
{file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"},
|
||||
{file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"},
|
||||
{file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"},
|
||||
{file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"},
|
||||
{file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"},
|
||||
{file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"},
|
||||
{file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"},
|
||||
{file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"},
|
||||
{file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"},
|
||||
{file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"},
|
||||
{file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"},
|
||||
{file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"},
|
||||
{file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"},
|
||||
{file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"},
|
||||
{file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"},
|
||||
{file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"},
|
||||
{file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"},
|
||||
{file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"},
|
||||
{file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"},
|
||||
{file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"},
|
||||
{file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"},
|
||||
{file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"},
|
||||
{file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"},
|
||||
{file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"},
|
||||
{file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "orjson"
|
||||
version = "3.10.3"
|
||||
@@ -956,6 +951,25 @@ typing-extensions = ">=4.6.1"
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.7.4"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"},
|
||||
{file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.4.0"
|
||||
pydantic-core = "2.18.4"
|
||||
typing-extensions = ">=4.6.1"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.2"
|
||||
@@ -1047,6 +1061,97 @@ files = [
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.4"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"},
|
||||
{file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "7.4.4"
|
||||
@@ -1400,21 +1505,6 @@ files = [
|
||||
{file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing-inspect"
|
||||
version = "0.9.0"
|
||||
description = "Runtime inspection utilities for typing module."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"},
|
||||
{file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
mypy-extensions = ">=0.3.0"
|
||||
typing-extensions = ">=3.7.4"
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.2.1"
|
||||
|
||||
133
libs/partners/together/poetry.lock
generated
133
libs/partners/together/poetry.lock
generated
@@ -110,6 +110,17 @@ files = [
|
||||
[package.dependencies]
|
||||
frozenlist = ">=1.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
version = "0.7.0"
|
||||
description = "Reusable constraint types to use with typing.Annotated"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
|
||||
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anyio"
|
||||
version = "4.3.0"
|
||||
@@ -566,7 +577,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.7"
|
||||
version = "0.2.9"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
@@ -577,9 +588,12 @@ develop = true
|
||||
jsonpatch = "^1.33"
|
||||
langsmith = "^0.1.75"
|
||||
packaging = ">=23.2,<25"
|
||||
pydantic = ">=1,<3"
|
||||
pydantic = [
|
||||
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
||||
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
||||
]
|
||||
PyYAML = ">=5.3"
|
||||
tenacity = "^8.1.0"
|
||||
tenacity = "^8.1.0,!=8.4.0"
|
||||
|
||||
[package.source]
|
||||
type = "directory"
|
||||
@@ -613,6 +627,7 @@ files = []
|
||||
develop = true
|
||||
|
||||
[package.dependencies]
|
||||
httpx = "^0.27.0"
|
||||
langchain-core = ">=0.1.40,<0.3"
|
||||
pytest = ">=7,<9"
|
||||
|
||||
@@ -1069,6 +1084,116 @@ typing-extensions = ">=4.2.0"
|
||||
dotenv = ["python-dotenv (>=0.10.4)"]
|
||||
email = ["email-validator (>=1.0.3)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.7.4"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"},
|
||||
{file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.4.0"
|
||||
pydantic-core = "2.18.4"
|
||||
typing-extensions = ">=4.6.1"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.4"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"},
|
||||
{file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.18.0"
|
||||
@@ -1721,4 +1846,4 @@ multidict = ">=4.0"
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "8a868382f8f3b693dccc1ce99428cdf9d6f8b6f77b3403c342c2bcc7b8526db9"
|
||||
content-hash = "a7b827cdc4ecd3543b7aa86c9f153345314d9be05fadba64d27259c804ee69a1"
|
||||
|
||||
@@ -30,7 +30,6 @@ pytest-asyncio = "^0.21.1"
|
||||
langchain-openai = { path = "../openai", develop = true }
|
||||
langchain-core = { path = "../../core", develop = true }
|
||||
docarray = "^0.32.1"
|
||||
pydantic = "^1.10.9"
|
||||
langchain-standard-tests = { path = "../../standard-tests", develop = true }
|
||||
|
||||
[tool.poetry.group.codespell]
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import base64
|
||||
import json
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage, ToolMessage
|
||||
@@ -176,10 +178,7 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
result_list_content = model_with_tools.invoke(messages_list_content)
|
||||
assert isinstance(result_list_content, AIMessage)
|
||||
|
||||
def test_structured_few_shot_examples(
|
||||
self,
|
||||
model: BaseChatModel,
|
||||
) -> None:
|
||||
def test_structured_few_shot_examples(self, model: BaseChatModel) -> None:
|
||||
"""
|
||||
Test that model can process few-shot examples with tool calls.
|
||||
"""
|
||||
@@ -212,3 +211,19 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
]
|
||||
result_string_content = model_with_tools.invoke(messages_string_content)
|
||||
assert isinstance(result_string_content, AIMessage)
|
||||
|
||||
def test_image_inputs(self, model: BaseChatModel) -> None:
|
||||
if not self.supports_image_inputs:
|
||||
return
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "describe the weather in this image"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
||||
},
|
||||
],
|
||||
)
|
||||
model.invoke([message])
|
||||
|
||||
93
libs/standard-tests/poetry.lock
generated
93
libs/standard-tests/poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
@@ -14,6 +14,28 @@ files = [
|
||||
[package.dependencies]
|
||||
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
|
||||
|
||||
[[package]]
|
||||
name = "anyio"
|
||||
version = "4.4.0"
|
||||
description = "High level compatibility layer for multiple asynchronous event loop implementations"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
|
||||
{file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
|
||||
idna = ">=2.8"
|
||||
sniffio = ">=1.1"
|
||||
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
|
||||
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
|
||||
trio = ["trio (>=0.23)"]
|
||||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2024.2.2"
|
||||
@@ -166,6 +188,62 @@ files = [
|
||||
[package.extras]
|
||||
test = ["pytest (>=6)"]
|
||||
|
||||
[[package]]
|
||||
name = "h11"
|
||||
version = "0.14.0"
|
||||
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
|
||||
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "httpcore"
|
||||
version = "1.0.5"
|
||||
description = "A minimal low-level HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
|
||||
{file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = "*"
|
||||
h11 = ">=0.13,<0.15"
|
||||
|
||||
[package.extras]
|
||||
asyncio = ["anyio (>=4.0,<5.0)"]
|
||||
http2 = ["h2 (>=3,<5)"]
|
||||
socks = ["socksio (==1.*)"]
|
||||
trio = ["trio (>=0.22.0,<0.26.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "httpx"
|
||||
version = "0.27.0"
|
||||
description = "The next generation HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
|
||||
{file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
anyio = "*"
|
||||
certifi = "*"
|
||||
httpcore = "==1.*"
|
||||
idna = "*"
|
||||
sniffio = "*"
|
||||
|
||||
[package.extras]
|
||||
brotli = ["brotli", "brotlicffi"]
|
||||
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
|
||||
http2 = ["h2 (>=3,<5)"]
|
||||
socks = ["socksio (==1.*)"]
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "3.7"
|
||||
@@ -634,6 +712,17 @@ files = [
|
||||
{file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sniffio"
|
||||
version = "1.3.1"
|
||||
description = "Sniff out which async library your code is running under"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
|
||||
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tenacity"
|
||||
version = "8.3.0"
|
||||
@@ -691,4 +780,4 @@ zstd = ["zstandard (>=0.18.0)"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "1c457c23b53cf97bcf624fa86fd6d18078c3978fe24fda98c2136c73791c1acb"
|
||||
content-hash = "a27025e6afa0275f153a9fd98c890c16187f9d01f5ca0b60aae23cee1a7d9dcc"
|
||||
|
||||
@@ -14,6 +14,7 @@ license = "MIT"
|
||||
python = ">=3.8.1,<4.0"
|
||||
langchain-core = ">=0.1.40,<0.3"
|
||||
pytest = ">=7,<9"
|
||||
httpx = "^0.27.0"
|
||||
|
||||
[tool.poetry.group.test]
|
||||
optional = true
|
||||
|
||||
@@ -55,7 +55,7 @@ class RecursiveJsonSplitter:
|
||||
Split json into maximum size dictionaries while preserving structure.
|
||||
"""
|
||||
current_path = current_path or []
|
||||
chunks = chunks or [{}]
|
||||
chunks = chunks if chunks is not None else [{}]
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
new_path = current_path + [key]
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Tuple, TypedDict
|
||||
import re
|
||||
from typing import Any, Dict, List, Tuple, TypedDict, Union
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
@@ -221,3 +222,161 @@ class HeaderType(TypedDict):
|
||||
level: int
|
||||
name: str
|
||||
data: str
|
||||
|
||||
|
||||
class ExperimentalMarkdownSyntaxTextSplitter:
|
||||
"""
|
||||
An experimental text splitter for handling Markdown syntax.
|
||||
|
||||
This splitter aims to retain the exact whitespace of the original text while
|
||||
extracting structured metadata, such as headers. It is a re-implementation of the
|
||||
MarkdownHeaderTextSplitter with notable changes to the approach and
|
||||
additional features.
|
||||
|
||||
Key Features:
|
||||
- Retains the original whitespace and formatting of the Markdown text.
|
||||
- Extracts headers, code blocks, and horizontal rules as metadata.
|
||||
- Splits out code blocks and includes the language in the "Code" metadata key.
|
||||
- Splits text on horizontal rules (`---`) as well.
|
||||
- Defaults to sensible splitting behavior, which can be overridden using the
|
||||
`headers_to_split_on` parameter.
|
||||
|
||||
Parameters:
|
||||
----------
|
||||
headers_to_split_on : List[Tuple[str, str]], optional
|
||||
Headers to split on, defaulting to common Markdown headers if not specified.
|
||||
return_each_line : bool, optional
|
||||
When set to True, returns each line as a separate chunk. Default is False.
|
||||
|
||||
Usage example:
|
||||
--------------
|
||||
>>> headers_to_split_on = [
|
||||
>>> ("#", "Header 1"),
|
||||
>>> ("##", "Header 2"),
|
||||
>>> ]
|
||||
>>> splitter = ExperimentalMarkdownSyntaxTextSplitter(
|
||||
>>> headers_to_split_on=headers_to_split_on
|
||||
>>> )
|
||||
>>> chunks = splitter.split(text)
|
||||
>>> for chunk in chunks:
|
||||
>>> print(chunk)
|
||||
|
||||
This class is currently experimental and subject to change based on feedback and
|
||||
further development.
|
||||
"""
|
||||
|
||||
DEFAULT_HEADER_KEYS = {
|
||||
"#": "Header 1",
|
||||
"##": "Header 2",
|
||||
"###": "Header 3",
|
||||
"####": "Header 4",
|
||||
"#####": "Header 5",
|
||||
"######": "Header 6",
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
headers_to_split_on: Union[List[Tuple[str, str]], None] = None,
|
||||
return_each_line: bool = False,
|
||||
strip_headers: bool = True,
|
||||
):
|
||||
self.chunks: List[Document] = []
|
||||
self.current_chunk = Document(page_content="")
|
||||
self.current_header_stack: List[Tuple[int, str]] = []
|
||||
self.strip_headers = strip_headers
|
||||
if headers_to_split_on:
|
||||
self.splittable_headers = dict(headers_to_split_on)
|
||||
else:
|
||||
self.splittable_headers = self.DEFAULT_HEADER_KEYS
|
||||
|
||||
self.return_each_line = return_each_line
|
||||
|
||||
def split_text(self, text: str) -> List[Document]:
|
||||
raw_lines = text.splitlines(keepends=True)
|
||||
|
||||
while raw_lines:
|
||||
raw_line = raw_lines.pop(0)
|
||||
header_match = self._match_header(raw_line)
|
||||
code_match = self._match_code(raw_line)
|
||||
horz_match = self._match_horz(raw_line)
|
||||
if header_match:
|
||||
self._complete_chunk_doc()
|
||||
|
||||
if not self.strip_headers:
|
||||
self.current_chunk.page_content += raw_line
|
||||
|
||||
# add the header to the stack
|
||||
header_depth = len(header_match.group(1))
|
||||
header_text = header_match.group(2)
|
||||
self._resolve_header_stack(header_depth, header_text)
|
||||
elif code_match:
|
||||
self._complete_chunk_doc()
|
||||
self.current_chunk.page_content = self._resolve_code_chunk(
|
||||
raw_line, raw_lines
|
||||
)
|
||||
self.current_chunk.metadata["Code"] = code_match.group(1)
|
||||
self._complete_chunk_doc()
|
||||
elif horz_match:
|
||||
self._complete_chunk_doc()
|
||||
else:
|
||||
self.current_chunk.page_content += raw_line
|
||||
|
||||
self._complete_chunk_doc()
|
||||
# I don't see why `return_each_line` is a necessary feature of this splitter.
|
||||
# It's easy enough to to do outside of the class and the caller can have more
|
||||
# control over it.
|
||||
if self.return_each_line:
|
||||
return [
|
||||
Document(page_content=line, metadata=chunk.metadata)
|
||||
for chunk in self.chunks
|
||||
for line in chunk.page_content.splitlines()
|
||||
if line and not line.isspace()
|
||||
]
|
||||
return self.chunks
|
||||
|
||||
def _resolve_header_stack(self, header_depth: int, header_text: str) -> None:
|
||||
for i, (depth, _) in enumerate(self.current_header_stack):
|
||||
if depth == header_depth:
|
||||
self.current_header_stack[i] = (header_depth, header_text)
|
||||
self.current_header_stack = self.current_header_stack[: i + 1]
|
||||
return
|
||||
self.current_header_stack.append((header_depth, header_text))
|
||||
|
||||
def _resolve_code_chunk(self, current_line: str, raw_lines: List[str]) -> str:
|
||||
chunk = current_line
|
||||
while raw_lines:
|
||||
raw_line = raw_lines.pop(0)
|
||||
chunk += raw_line
|
||||
if self._match_code(raw_line):
|
||||
return chunk
|
||||
return ""
|
||||
|
||||
def _complete_chunk_doc(self) -> None:
|
||||
chunk_content = self.current_chunk.page_content
|
||||
# Discard any empty documents
|
||||
if chunk_content and not chunk_content.isspace():
|
||||
# Apply the header stack as metadata
|
||||
for depth, value in self.current_header_stack:
|
||||
header_key = self.splittable_headers.get("#" * depth)
|
||||
self.current_chunk.metadata[header_key] = value
|
||||
self.chunks.append(self.current_chunk)
|
||||
# Reset the current chunk
|
||||
self.current_chunk = Document(page_content="")
|
||||
|
||||
# Match methods
|
||||
def _match_header(self, line: str) -> Union[re.Match, None]:
|
||||
match = re.match(r"^(#{1,6}) (.*)", line)
|
||||
# Only matches on the configured headers
|
||||
if match and match.group(1) in self.splittable_headers:
|
||||
return match
|
||||
return None
|
||||
|
||||
def _match_code(self, line: str) -> Union[re.Match, None]:
|
||||
matches = [re.match(rule, line) for rule in [r"^```(.*)", r"^~~~(.*)"]]
|
||||
return next((match for match in matches if match), None)
|
||||
|
||||
def _match_horz(self, line: str) -> Union[re.Match, None]:
|
||||
matches = [
|
||||
re.match(rule, line) for rule in [r"^\*\*\*+\n", r"^---+\n", r"^___+\n"]
|
||||
]
|
||||
return next((match for match in matches if match), None)
|
||||
|
||||
@@ -19,7 +19,10 @@ from langchain_text_splitters.base import split_text_on_tokens
|
||||
from langchain_text_splitters.character import CharacterTextSplitter
|
||||
from langchain_text_splitters.html import HTMLHeaderTextSplitter, HTMLSectionSplitter
|
||||
from langchain_text_splitters.json import RecursiveJsonSplitter
|
||||
from langchain_text_splitters.markdown import MarkdownHeaderTextSplitter
|
||||
from langchain_text_splitters.markdown import (
|
||||
ExperimentalMarkdownSyntaxTextSplitter,
|
||||
MarkdownHeaderTextSplitter,
|
||||
)
|
||||
from langchain_text_splitters.python import PythonCodeTextSplitter
|
||||
|
||||
FAKE_PYTHON_TEXT = """
|
||||
@@ -1296,6 +1299,210 @@ def test_md_header_text_splitter_with_invisible_characters(characters: str) -> N
|
||||
assert output == expected_output
|
||||
|
||||
|
||||
EXPERIMENTAL_MARKDOWN_DOCUMENT = (
|
||||
"# My Header 1\n"
|
||||
"Content for header 1\n"
|
||||
"## Header 2\n"
|
||||
"Content for header 2\n"
|
||||
"```python\n"
|
||||
"def func_definition():\n"
|
||||
" print('Keep the whitespace consistent')\n"
|
||||
"```\n"
|
||||
"# Header 1 again\n"
|
||||
"We should also split on the horizontal line\n"
|
||||
"----\n"
|
||||
"This will be a new doc but with the same header metadata\n\n"
|
||||
"And it includes a new paragraph"
|
||||
)
|
||||
|
||||
|
||||
def test_experimental_markdown_syntax_text_splitter() -> None:
|
||||
"""Test experimental markdown syntax splitter."""
|
||||
|
||||
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter()
|
||||
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
|
||||
|
||||
expected_output = [
|
||||
Document(
|
||||
page_content="Content for header 1\n",
|
||||
metadata={"Header 1": "My Header 1"},
|
||||
),
|
||||
Document(
|
||||
page_content="Content for header 2\n",
|
||||
metadata={"Header 1": "My Header 1", "Header 2": "Header 2"},
|
||||
),
|
||||
Document(
|
||||
page_content=(
|
||||
"```python\ndef func_definition():\n "
|
||||
"print('Keep the whitespace consistent')\n```\n"
|
||||
),
|
||||
metadata={
|
||||
"Code": "python",
|
||||
"Header 1": "My Header 1",
|
||||
"Header 2": "Header 2",
|
||||
},
|
||||
),
|
||||
Document(
|
||||
page_content="We should also split on the horizontal line\n",
|
||||
metadata={"Header 1": "Header 1 again"},
|
||||
),
|
||||
Document(
|
||||
page_content=(
|
||||
"This will be a new doc but with the same header metadata\n\n"
|
||||
"And it includes a new paragraph"
|
||||
),
|
||||
metadata={"Header 1": "Header 1 again"},
|
||||
),
|
||||
]
|
||||
|
||||
assert output == expected_output
|
||||
|
||||
|
||||
def test_experimental_markdown_syntax_text_splitter_header_configuration() -> None:
|
||||
"""Test experimental markdown syntax splitter."""
|
||||
|
||||
headers_to_split_on = [("#", "Encabezamiento 1")]
|
||||
|
||||
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(
|
||||
headers_to_split_on=headers_to_split_on
|
||||
)
|
||||
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
|
||||
|
||||
expected_output = [
|
||||
Document(
|
||||
page_content="Content for header 1\n## Header 2\nContent for header 2\n",
|
||||
metadata={"Encabezamiento 1": "My Header 1"},
|
||||
),
|
||||
Document(
|
||||
page_content=(
|
||||
"```python\ndef func_definition():\n "
|
||||
"print('Keep the whitespace consistent')\n```\n"
|
||||
),
|
||||
metadata={"Code": "python", "Encabezamiento 1": "My Header 1"},
|
||||
),
|
||||
Document(
|
||||
page_content="We should also split on the horizontal line\n",
|
||||
metadata={"Encabezamiento 1": "Header 1 again"},
|
||||
),
|
||||
Document(
|
||||
page_content=(
|
||||
"This will be a new doc but with the same header metadata\n\n"
|
||||
"And it includes a new paragraph"
|
||||
),
|
||||
metadata={"Encabezamiento 1": "Header 1 again"},
|
||||
),
|
||||
]
|
||||
|
||||
assert output == expected_output
|
||||
|
||||
|
||||
def test_experimental_markdown_syntax_text_splitter_with_headers() -> None:
|
||||
"""Test experimental markdown syntax splitter."""
|
||||
|
||||
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(strip_headers=False)
|
||||
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
|
||||
|
||||
expected_output = [
|
||||
Document(
|
||||
page_content="# My Header 1\nContent for header 1\n",
|
||||
metadata={"Header 1": "My Header 1"},
|
||||
),
|
||||
Document(
|
||||
page_content="## Header 2\nContent for header 2\n",
|
||||
metadata={"Header 1": "My Header 1", "Header 2": "Header 2"},
|
||||
),
|
||||
Document(
|
||||
page_content=(
|
||||
"```python\ndef func_definition():\n "
|
||||
"print('Keep the whitespace consistent')\n```\n"
|
||||
),
|
||||
metadata={
|
||||
"Code": "python",
|
||||
"Header 1": "My Header 1",
|
||||
"Header 2": "Header 2",
|
||||
},
|
||||
),
|
||||
Document(
|
||||
page_content=(
|
||||
"# Header 1 again\nWe should also split on the horizontal line\n"
|
||||
),
|
||||
metadata={"Header 1": "Header 1 again"},
|
||||
),
|
||||
Document(
|
||||
page_content=(
|
||||
"This will be a new doc but with the same header metadata\n\n"
|
||||
"And it includes a new paragraph"
|
||||
),
|
||||
metadata={"Header 1": "Header 1 again"},
|
||||
),
|
||||
]
|
||||
|
||||
assert output == expected_output
|
||||
|
||||
|
||||
def test_experimental_markdown_syntax_text_splitter_split_lines() -> None:
|
||||
"""Test experimental markdown syntax splitter."""
|
||||
|
||||
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(return_each_line=True)
|
||||
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
|
||||
|
||||
expected_output = [
|
||||
Document(
|
||||
page_content="Content for header 1", metadata={"Header 1": "My Header 1"}
|
||||
),
|
||||
Document(
|
||||
page_content="Content for header 2",
|
||||
metadata={"Header 1": "My Header 1", "Header 2": "Header 2"},
|
||||
),
|
||||
Document(
|
||||
page_content="```python",
|
||||
metadata={
|
||||
"Code": "python",
|
||||
"Header 1": "My Header 1",
|
||||
"Header 2": "Header 2",
|
||||
},
|
||||
),
|
||||
Document(
|
||||
page_content="def func_definition():",
|
||||
metadata={
|
||||
"Code": "python",
|
||||
"Header 1": "My Header 1",
|
||||
"Header 2": "Header 2",
|
||||
},
|
||||
),
|
||||
Document(
|
||||
page_content=" print('Keep the whitespace consistent')",
|
||||
metadata={
|
||||
"Code": "python",
|
||||
"Header 1": "My Header 1",
|
||||
"Header 2": "Header 2",
|
||||
},
|
||||
),
|
||||
Document(
|
||||
page_content="```",
|
||||
metadata={
|
||||
"Code": "python",
|
||||
"Header 1": "My Header 1",
|
||||
"Header 2": "Header 2",
|
||||
},
|
||||
),
|
||||
Document(
|
||||
page_content="We should also split on the horizontal line",
|
||||
metadata={"Header 1": "Header 1 again"},
|
||||
),
|
||||
Document(
|
||||
page_content="This will be a new doc but with the same header metadata",
|
||||
metadata={"Header 1": "Header 1 again"},
|
||||
),
|
||||
Document(
|
||||
page_content="And it includes a new paragraph",
|
||||
metadata={"Header 1": "Header 1 again"},
|
||||
),
|
||||
]
|
||||
|
||||
assert output == expected_output
|
||||
|
||||
|
||||
def test_solidity_code_splitter() -> None:
|
||||
splitter = RecursiveCharacterTextSplitter.from_language(
|
||||
Language.SOL, chunk_size=CHUNK_SIZE, chunk_overlap=0
|
||||
@@ -1746,3 +1953,24 @@ def test_split_json_with_lists() -> None:
|
||||
texts_list = splitter.split_text(json_data=test_data_list, convert_lists=True)
|
||||
|
||||
assert len(texts_list) >= len(texts)
|
||||
|
||||
|
||||
def test_split_json_many_calls() -> None:
|
||||
x = {"a": 1, "b": 2}
|
||||
y = {"c": 3, "d": 4}
|
||||
|
||||
splitter = RecursiveJsonSplitter()
|
||||
chunk0 = splitter.split_json(x)
|
||||
assert chunk0 == [{"a": 1, "b": 2}]
|
||||
|
||||
chunk1 = splitter.split_json(y)
|
||||
assert chunk1 == [{"c": 3, "d": 4}]
|
||||
|
||||
# chunk0 is now altered by creating chunk1
|
||||
assert chunk0 == [{"a": 1, "b": 2}]
|
||||
|
||||
chunk0_output = [{"a": 1, "b": 2}]
|
||||
chunk1_output = [{"c": 3, "d": 4}]
|
||||
|
||||
assert chunk0 == chunk0_output
|
||||
assert chunk1 == chunk1_output
|
||||
|
||||
Reference in New Issue
Block a user