mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-09 10:41:52 +00:00
Compare commits
3 Commits
v0.0.333
...
wfh/ossinv
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6ae65b94a5 | ||
|
|
331915f9f0 | ||
|
|
ab5bede903 |
File diff suppressed because one or more lines are too long
@@ -38,7 +38,6 @@ Notebook | Description
|
||||
[multiagent_bidding.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multiagent_bidding.ipynb) | Implement a multi-agent simulation where agents bid to speak, with the highest bidder speaking next, demonstrated through a fictitious presidential debate example.
|
||||
[myscale_vector_sql.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/myscale_vector_sql.ipynb) | Access and interact with the myscale integrated vector database, which can enhance the performance of language model (llm) applications.
|
||||
[openai_functions_retrieval_qa....](https://github.com/langchain-ai/langchain/tree/master/cookbook/openai_functions_retrieval_qa.ipynb) | Structure response output in a question-answering system by incorporating openai functions into a retrieval pipeline.
|
||||
[openai_v1_cookbook.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/openai_v1_cookbook.ipynb) | Explore new functionality released alongside the V1 release of the OpenAI Python library.
|
||||
[petting_zoo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/petting_zoo.ipynb) | Create multi-agent simulations with simulated environments using the petting zoo library.
|
||||
[plan_and_execute_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/plan_and_execute_agent.ipynb) | Create plan-and-execute agents that accomplish objectives by planning tasks with a language model (llm) and executing them with a separate agent.
|
||||
[press_releases.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/press_releases.ipynb) | Retrieve and query company press release data powered by [Kay.ai](https://kay.ai).
|
||||
|
||||
@@ -1,210 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2def22ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Extration with OpenAI Tools\n",
|
||||
"\n",
|
||||
"Performing extraction has never been easier! OpenAI's tool calling ability is the perfect thing to use as it allows for extracting multiple different elements from text that are different types. \n",
|
||||
"\n",
|
||||
"Models after 1106 use tools and support \"parallel function calling\" which makes this super easy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "5c628496",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.pydantic_v1 import BaseModel\n",
|
||||
"from typing import Optional, List\n",
|
||||
"from langchain.chains.openai_tools import create_extraction_chain_pydantic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "afe9657b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Make sure to use a recent model that supports tools\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "bc0ca3b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Pydantic is an easy way to define a schema\n",
|
||||
"class Person(BaseModel):\n",
|
||||
" \"\"\"Information about people to extract.\"\"\"\n",
|
||||
" name: str\n",
|
||||
" age: Optional[int] = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "2036af68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain_pydantic(Person, model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "1748ad21",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Person(name='jane', age=2), Person(name='bob', age=3)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"input\": \"jane is 2 and bob is 3\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "c8262ce5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Let's define another element\n",
|
||||
"class Class(BaseModel):\n",
|
||||
" \"\"\"Information about classes to extract.\"\"\"\n",
|
||||
" teacher: str\n",
|
||||
" students: List[str]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "4973c104",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain_pydantic([Person, Class], model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "e976a15e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Person(name='jane', age=2),\n",
|
||||
" Person(name='bob', age=3),\n",
|
||||
" Class(teacher='Mrs Sampson', students=['jane', 'bob'])]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"input\": \"jane is 2 and bob is 3 and they are in Mrs Sampson's class\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6575a7d6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Under the hood\n",
|
||||
"\n",
|
||||
"Under the hood, this is a simple chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b8ba83e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"from langchain.output_parsers import PydanticToolsParser\n",
|
||||
"from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n",
|
||||
"from langchain.schema.runnable import Runnable\n",
|
||||
"from typing import Union, List, Type, Optional\n",
|
||||
"from langchain.pydantic_v1 import BaseModel\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema.messages import SystemMessage\n",
|
||||
"from langchain.schema.language_model import BaseLanguageModel\n",
|
||||
"_EXTRACTION_TEMPLATE = \"\"\"Extract and save the relevant entities mentioned \\\n",
|
||||
"in the following passage together with their properties.\n",
|
||||
"\n",
|
||||
"If a property is not present and is not required in the function parameters, do not include it in the output.\"\"\" # noqa: E501\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def create_extraction_chain_pydantic(\n",
|
||||
" pydantic_schemas: Union[List[Type[BaseModel]], Type[BaseModel]],\n",
|
||||
" llm: BaseLanguageModel,\n",
|
||||
" system_message: Optional[str] = _EXTRACTION_TEMPLATE,\n",
|
||||
") -> Runnable:\n",
|
||||
" if not isinstance(pydantic_schemas, list):\n",
|
||||
" pydantic_schemas = [pydantic_schemas]\n",
|
||||
" prompt = ChatPromptTemplate.from_messages({\n",
|
||||
" (\"system\", _EXTRACTION_TEMPLATE),\n",
|
||||
" (\"user\", \"{input}\")\n",
|
||||
" })\n",
|
||||
" functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]\n",
|
||||
" tools = [{\"type\": \"function\", \"function\": d} for d in functiMons]\n",
|
||||
" model = llm.bind(tools=tools)\n",
|
||||
" chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)\n",
|
||||
" return chain\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2eac6b68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,445 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f970f757-ec76-4bf0-90cd-a2fb68b945e3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Exploring OpenAI V1 functionality\n",
|
||||
"\n",
|
||||
"On 11.06.23 OpenAI released a number of new features, and along with it bumped their Python SDK to 1.0.0. This notebook shows off the new features and how to use them with LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ee897729-263a-4073-898f-bb4cf01ed829",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# need openai>=1.1.0, langchain>=0.0.332, langchain-experimental>=0.0.39\n",
|
||||
"!pip install -U openai langchain langchain-experimental"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c3e067ce-7a43-47a7-bc89-41f1de4cf136",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fa7e7e95-90a1-4f73-98fe-10c4b4e0951b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## [Vision](https://platform.openai.com/docs/guides/vision)\n",
|
||||
"\n",
|
||||
"OpenAI released multi-modal models, which can take a sequence of text and images as input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "1c8c3965-d3c9-4186-b5f3-5e67855ef916",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The image appears to be a diagram representing the architecture or components of a software system or framework related to language processing, possibly named LangChain or associated with a project or product called LangChain, based on the prominent appearance of that term. The diagram is organized into several layers or aspects, each containing various elements or modules:\\n\\n1. **Protocol**: This may be the foundational layer, which includes \"LCEL\" and terms like parallelization, fallbacks, tracing, batching, streaming, async, and composition. These seem related to communication and execution protocols for the system.\\n\\n2. **Integrations Components**: This layer includes \"Model I/O\" with elements such as the model, output parser, prompt, and example selector. It also has a \"Retrieval\" section with a document loader, retriever, embedding model, vector store, and text splitter. Lastly, there\\'s an \"Agent Tooling\" section. These components likely deal with the interaction with external data, models, and tools.\\n\\n3. **Application**: The application layer features \"LangChain\" with chains, agents, agent executors, and common application logic. This suggests that the system uses a modular approach with chains and agents to process language tasks.\\n\\n4. **Deployment**: This contains \"Lang')"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatOpenAI(model=\"gpt-4-vision-preview\", max_tokens=256)\n",
|
||||
"chat.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"What is this image showing\"},\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\n",
|
||||
" \"url\": \"https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/img/langchain_stack.png\",\n",
|
||||
" \"detail\": \"auto\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "210f8248-fcf3-4052-a4a3-0684e08f8785",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## [OpenAI assistants](https://platform.openai.com/docs/assistants/overview)\n",
|
||||
"\n",
|
||||
"> The Assistants API allows you to build AI assistants within your own applications. An Assistant has instructions and can leverage models, tools, and knowledge to respond to user queries. The Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"You can interact with OpenAI Assistants using OpenAI tools or custom tools. When using exclusively OpenAI tools, you can just invoke the assistant directly and get final answers. When using custom tools, you can run the assistant and tool execution loop using the built-in AgentExecutor or easily write your own executor.\n",
|
||||
"\n",
|
||||
"Below we show the different ways to interact with Assistants. As a simple example, let's build a math tutor that can write and run code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "318da28d-4cec-42ab-ae3e-76d95bb34fa5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using only OpenAI tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a9064bbe-d9f7-4a29-a7b3-73933b3197e7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.openai_assistant import OpenAIAssistantRunnable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7a20a008-49ac-46d2-aa26-b270118af5ea",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ThreadMessage(id='msg_g9OJv0rpPgnc3mHmocFv7OVd', assistant_id='asst_hTwZeNMMphxzSOqJ01uBMsJI', content=[MessageContentText(text=Text(annotations=[], value='The result of \\\\(10 - 4^{2.7}\\\\) is approximately \\\\(-32.224\\\\).'), type='text')], created_at=1699460600, file_ids=[], metadata={}, object='thread.message', role='assistant', run_id='run_nBIT7SiAwtUfSCTrQNSPLOfe', thread_id='thread_14n4GgXwxgNL0s30WJW5F6p0')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interpreter_assistant = OpenAIAssistantRunnable.create_assistant(\n",
|
||||
" name=\"langchain assistant\",\n",
|
||||
" instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n",
|
||||
" tools=[{\"type\": \"code_interpreter\"}],\n",
|
||||
" model=\"gpt-4-1106-preview\"\n",
|
||||
")\n",
|
||||
"output = interpreter_assistant.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})\n",
|
||||
"output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a8ddd181-ac63-4ab6-a40d-a236120379c1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### As a LangChain agent with arbitrary tools\n",
|
||||
"\n",
|
||||
"Now let's recreate this functionality using our own tools. For this example we'll use the [E2B sandbox runtime tool](https://e2b.dev/docs?ref=landing-page-get-started)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ee4cc355-f2d6-4c51-bcf7-f502868357d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install e2b duckduckgo-search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "48681ac7-b267-48d4-972c-8a7df8393a21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import E2BDataAnalysisTool, DuckDuckGoSearchRun\n",
|
||||
"\n",
|
||||
"tools = [E2BDataAnalysisTool(api_key=\"...\"), DuckDuckGoSearchRun()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "1c01dd79-dd3e-4509-a2e2-009a7f99f16a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = OpenAIAssistantRunnable.create_assistant(\n",
|
||||
" name=\"langchain assistant e2b tool\",\n",
|
||||
" instructions=\"You are a personal math tutor. Write and run code to answer math questions. You can also search the internet.\",\n",
|
||||
" tools=tools,\n",
|
||||
" model=\"gpt-4-1106-preview\",\n",
|
||||
" as_agent=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1ac71d8b-4b4b-4f98-b826-6b3c57a34166",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Using AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "1f137f94-801f-4766-9ff5-2de9df5e8079",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'content': \"What's the weather in SF today divided by 2.7\",\n",
|
||||
" 'output': \"The weather in San Francisco today is reported to have temperatures as high as 66 °F. To get the temperature divided by 2.7, we will calculate that:\\n\\n66 °F / 2.7 = 24.44 °F\\n\\nSo, when the high temperature of 66 °F is divided by 2.7, the result is approximately 24.44 °F. Please note that this doesn't have a meteorological meaning; it's purely a mathematical operation based on the given temperature.\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools)\n",
|
||||
"agent_executor.invoke({\"content\": \"What's the weather in SF today divided by 2.7\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2d0a0b1d-c1b3-4b50-9dce-1189b51a6206",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Custom execution"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c0475fa7-b6c1-4331-b8e2-55407466c724",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = OpenAIAssistantRunnable.create_assistant(\n",
|
||||
" name=\"langchain assistant e2b tool\",\n",
|
||||
" instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n",
|
||||
" tools=tools,\n",
|
||||
" model=\"gpt-4-1106-preview\",\n",
|
||||
" as_agent=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "b76cb669-6aba-4827-868f-00aa960026f2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema.agent import AgentFinish\n",
|
||||
"\n",
|
||||
"def execute_agent(agent, tools, input):\n",
|
||||
" tool_map = {tool.name: tool for tool in tools}\n",
|
||||
" response = agent.invoke(input)\n",
|
||||
" while not isinstance(response, AgentFinish):\n",
|
||||
" tool_outputs = []\n",
|
||||
" for action in response:\n",
|
||||
" tool_output = tool_map[action.tool].invoke(action.tool_input)\n",
|
||||
" print(action.tool, action.tool_input, tool_output, end=\"\\n\\n\")\n",
|
||||
" tool_outputs.append({\"output\": tool_output, \"tool_call_id\": action.tool_call_id})\n",
|
||||
" response = agent.invoke({\"tool_outputs\": tool_outputs, \"run_id\": action.run_id, \"thread_id\": action.thread_id})\n",
|
||||
" \n",
|
||||
" return response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "7946116a-b82f-492e-835e-ca958a8949a5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"e2b_data_analysis {'python_code': 'print(10 - 4 ** 2.7)'} {\"stdout\": \"-32.22425314473263\", \"stderr\": \"\", \"artifacts\": []}\n",
|
||||
"\n",
|
||||
"\\( 10 - 4^{2.7} \\) is approximately \\(-32.22425314473263\\).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = execute_agent(agent, tools, {\"content\": \"What's 10 - 4 raised to the 2.7\"})\n",
|
||||
"print(response.return_values[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "f2744a56-9f4f-4899-827a-fa55821c318c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"e2b_data_analysis {'python_code': 'result = 10 - 4 ** 2.7\\nprint(result + 17.241)'} {\"stdout\": \"-14.983253144732629\", \"stderr\": \"\", \"artifacts\": []}\n",
|
||||
"\n",
|
||||
"When you add \\( 17.241 \\) to \\( 10 - 4^{2.7} \\), the result is approximately \\( -14.98325314473263 \\).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"next_response = execute_agent(agent, tools, {\"content\": \"now add 17.241\", \"thread_id\": response.thread_id})\n",
|
||||
"print(next_response.return_values[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "71c34763-d1e7-4b9a-a9d7-3e4cc0dfc2c4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## [JSON mode](https://platform.openai.com/docs/guides/text-generation/json-mode)\n",
|
||||
"\n",
|
||||
"Constrain the model to only generate valid JSON. Note that you must include a system message with instructions to use JSON for this mode to work.\n",
|
||||
"\n",
|
||||
"Only works with certain models. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "db6072c4-f3f3-415d-872b-71ea9f3c02bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\").bind(\n",
|
||||
" response_format={\"type\": \"json_object\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"output = chat.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Google was founded in the USA, while Deepmind was founded in the UK\"\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"print(output.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "08e00ccf-b991-4249-846b-9500a0ccbfa0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"json.loads(output.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aa9a94d9-4319-4ab7-a979-c475ce6b5f50",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## [System fingerprint](https://platform.openai.com/docs/guides/text-generation/reproducible-outputs)\n",
|
||||
"\n",
|
||||
"OpenAI sometimes changes model configurations in a way that impacts outputs. Whenever this happens, the system_fingerprint associated with a generation will change."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1281883c-bf8f-4665-89cd-4f33ccde69ab",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")\n",
|
||||
"output = chat.generate(\n",
|
||||
" [\n",
|
||||
" [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Google was founded in the USA, while Deepmind was founded in the UK\"\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"print(output.llm_output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aa6565be-985d-4127-848e-c3bca9d7b434",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Breaking changes to Azure classes\n",
|
||||
"\n",
|
||||
"OpenAI V1 rewrote their clients and separated Azure and OpenAI clients. This has led to some changes in LangChain interfaces when using OpenAI V1.\n",
|
||||
"\n",
|
||||
"BREAKING CHANGES:\n",
|
||||
"- To use Azure embeddings with OpenAI V1, you'll need to use the new `AzureOpenAIEmbeddings` instead of the existing `OpenAIEmbeddings`. `OpenAIEmbeddings` continue to work when using Azure with `openai<1`.\n",
|
||||
"```python\n",
|
||||
"from langchain.embeddings import AzureOpenAIEmbeddings\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"RECOMMENDED CHANGES:\n",
|
||||
"- When using AzureChatOpenAI, if passing in an Azure endpoint (eg https://example-resource.azure.openai.com/) this should be specified via the `azure_endpoint` parameter or the `AZURE_OPENAI_ENDPOINT`. We're maintaining backwards compatibility for now with specifying this via `openai_api_base`/`base_url` or env var `OPENAI_API_BASE` but this shouldn't be relied upon.\n",
|
||||
"- When using Azure chat or embedding models, pass in API keys either via `openai_api_key` parameter or `AZURE_OPENAI_API_KEY` parameter. We're maintaining backwards compatibility for now with specifying this via `OPENAI_API_KEY` but this shouldn't be relied upon."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "52b48493-e4fe-4ab9-97cd-e65bf3996e30",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -35,7 +35,7 @@
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"### API keys and other secrets\n",
|
||||
"### API keys and other secrats\n",
|
||||
"\n",
|
||||
"We use an `.ini` file, like this: \n",
|
||||
"```\n",
|
||||
|
||||
@@ -15,7 +15,7 @@ poetry run python scripts/model_feat_table.py
|
||||
poetry run nbdoc_build --srcdir docs
|
||||
cp ../cookbook/README.md src/pages/cookbook.mdx
|
||||
cp ../.github/CONTRIBUTING.md docs/contributing.md
|
||||
wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
|
||||
wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/guides/deployments/langserve.md
|
||||
poetry run python scripts/generate_api_reference_links.py
|
||||
yarn install
|
||||
yarn start
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -12,19 +12,6 @@
|
||||
"Suppose we have a simple prompt + model sequence:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "950297ed-2d67-4091-8ea7-1d412d259d04",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
@@ -50,6 +37,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
@@ -113,29 +105,31 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 14,
|
||||
"id": "f66a0fe4-fde0-4706-8863-d60253f211c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"function = {\n",
|
||||
" \"name\": \"solver\",\n",
|
||||
" \"description\": \"Formulates and solves an equation\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"equation\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The algebraic expression of the equation\",\n",
|
||||
" },\n",
|
||||
" \"solution\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The solution to the equation\",\n",
|
||||
"functions = [\n",
|
||||
" {\n",
|
||||
" \"name\": \"solver\",\n",
|
||||
" \"description\": \"Formulates and solves an equation\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"equation\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The algebraic expression of the equation\",\n",
|
||||
" },\n",
|
||||
" \"solution\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The solution to the equation\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"equation\", \"solution\"],\n",
|
||||
" },\n",
|
||||
" \"required\": [\"equation\", \"solution\"],\n",
|
||||
" },\n",
|
||||
"}"
|
||||
" }\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -167,70 +161,19 @@
|
||||
" ]\n",
|
||||
")\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4\", temperature=0).bind(\n",
|
||||
" function_call={\"name\": \"solver\"}, functions=[function]\n",
|
||||
" function_call={\"name\": \"solver\"}, functions=functions\n",
|
||||
")\n",
|
||||
"runnable = {\"equation_statement\": RunnablePassthrough()} | prompt | model\n",
|
||||
"runnable.invoke(\"x raised to the third plus seven equals 12\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f07d7528-9269-4d6f-b12e-3669592a9e03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Attaching OpenAI tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": null,
|
||||
"id": "2cdeeb4c-0c1f-43da-bd58-4f591d9e0671",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"get_current_weather\",\n",
|
||||
" \"description\": \"Get the current weather in a given location\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"location\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The city and state, e.g. San Francisco, CA\",\n",
|
||||
" },\n",
|
||||
" \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"location\"],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "2b65beab-48bb-46ff-a5a4-ef8ac95a513c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_zHN0ZHwrxM7nZDdqTp6dkPko', 'function': {'arguments': '{\"location\": \"San Francisco, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_aqdMm9HBSlFW9c9rqxTa7eQv', 'function': {'arguments': '{\"location\": \"New York, NY\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_cx8E567zcLzYV2WSWVgO63f1', 'function': {'arguments': '{\"location\": \"Los Angeles, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}]})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\").bind(tools=tools)\n",
|
||||
"model.invoke(\"What's the weather in SF, NYC and LA?\")"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -8,31 +8,11 @@ sidebar_position: 0
|
||||
- **Are context-aware**: connect a language model to sources of context (prompt instructions, few shot examples, content to ground its response in, etc.)
|
||||
- **Reason**: rely on a language model to reason (about how to answer based on provided context, what actions to take, etc.)
|
||||
|
||||
This framework consists of several parts.
|
||||
You can see how the parts interact with each other below:
|
||||
|
||||

|
||||
|
||||
These parts include:
|
||||
|
||||
- **[LangChain Packages]**: The Python and JavaScript packages. Contains interfaces and integrations for a myriad of components, a basic run time for combining these components into chains and agents, and off-the-shelf implementations of chains and agents.
|
||||
- **[LangChain Templates](https://github.com/langchain-ai/langchain/tree/master/templates)**: A collection of easily deployable reference architectures for a wide variety of tasks.
|
||||
- **[LangServe](https://github.com/langchain-ai/langserve)**: A library for deploying LangChain chains as a REST API.
|
||||
- **[LangSmith](https://smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
Together, these products simplify the entire application lifecycle:
|
||||
- **Develop**: Write your applications in LangChain/LangChain.js. Hit the ground running using Templates for reference.
|
||||
- **Productionize**: Use LangSmith to inspect, test and monitor your chains, so that you can constantly improve and deploy with confidence.
|
||||
- **Deploy**: Turn any chain into an API with LangServe.
|
||||
|
||||
## LangChain Packages
|
||||
|
||||
The main value props of the LangChain packages are:
|
||||
1. **Components**: composable tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
|
||||
2. **Off-the-shelf chains**: built-in assemblages of components for accomplishing higher-level tasks
|
||||
|
||||
Off-the-shelf chains make it easy to get started. Components make it easy to customize existing chains and build new ones.
|
||||
The main value props of LangChain are:
|
||||
1. **Components**: abstractions for working with language models, along with a collection of implementations for each abstraction. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
|
||||
2. **Off-the-shelf chains**: a structured assembly of components for accomplishing specific higher-level tasks
|
||||
|
||||
Off-the-shelf chains make it easy to get started. For complex applications, components make it easy to customize existing chains and build new ones.
|
||||
|
||||
## Get started
|
||||
|
||||
@@ -40,9 +20,11 @@ Off-the-shelf chains make it easy to get started. Components make it easy to cus
|
||||
|
||||
We recommend following our [Quickstart](/docs/get_started/quickstart) guide to familiarize yourself with the framework by building your first LangChain application.
|
||||
|
||||
_**Note**: These docs are for the LangChain [Python package](https://github.com/langchain-ai/langchain). For documentation on [LangChain.js](https://github.com/langchain-ai/langchainjs), the JS/TS version, [head here](https://js.langchain.com/docs)._
|
||||
|
||||
## Modules
|
||||
|
||||
LangChain provides standard, extendable interfaces and integrations for the following modules, listed from least to most complex:
|
||||
LangChain provides standard, extendable interfaces and external integrations for the following modules, listed from least to most complex:
|
||||
|
||||
#### [Model I/O](/docs/modules/model_io/)
|
||||
Interface with language models
|
||||
@@ -59,18 +41,21 @@ Log and stream intermediate steps of any chain
|
||||
|
||||
## Examples, ecosystem, and resources
|
||||
### [Use cases](/docs/use_cases/question_answering/)
|
||||
Walkthroughs and techniques for common end-to-end use cases, like:
|
||||
Walkthroughs and best-practices for common end-to-end use cases, like:
|
||||
- [Document question answering](/docs/use_cases/question_answering/)
|
||||
- [Chatbots](/docs/use_cases/chatbots/)
|
||||
- [Analyzing structured data](/docs/use_cases/qa_structured/sql/)
|
||||
- and much more...
|
||||
|
||||
### [Guides](/docs/guides/adapters/openai)
|
||||
Best practices for developing with LangChain.
|
||||
### [Guides](/docs/guides/)
|
||||
Learn best practices for developing with LangChain.
|
||||
|
||||
### [Ecosystem](/docs/integrations/providers/)
|
||||
LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](/docs/integrations/providers/) and [dependent repos](/docs/additional_resources/dependents).
|
||||
|
||||
### [Additional resources](/docs/additional_resources/)
|
||||
Our community is full of prolific developers, creative builders, and fantastic teachers. Check out [YouTube tutorials](/docs/additional_resources/youtube) for great tutorials from folks in the community, and [Gallery](https://github.com/kyrolabs/awesome-langchain) for a list of awesome LangChain projects, compiled by the folks at [KyroLabs](https://kyrolabs.com).
|
||||
|
||||
### [Community](/docs/community)
|
||||
Head to the [Community navigator](/docs/community) to find places to ask questions, share feedback, meet other developers, and dream about the future of LLM’s.
|
||||
|
||||
|
||||
@@ -44,24 +44,11 @@ from langchain.llms import OpenAI
|
||||
llm = OpenAI(openai_api_key="...")
|
||||
```
|
||||
|
||||
## LangSmith Setup
|
||||
|
||||
Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.
|
||||
As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.
|
||||
The best way to do this is with [LangSmith](https://smith.langchain.com).
|
||||
|
||||
Note that LangSmith is not needed, but it is helpful.
|
||||
If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:
|
||||
|
||||
```shell
|
||||
export LANGCHAIN_TRACING_V2="true"
|
||||
export LANGCHAIN_API_KEY=...
|
||||
```
|
||||
|
||||
## Building an application
|
||||
|
||||
Now we can start building our language model application. LangChain provides many modules that can be used to build language model applications.
|
||||
Modules can be used as standalones in simple applications and they can be combined for more complex use cases.
|
||||
Modules can be used as stand-alones in simple applications and they can be combined for more complex use cases.
|
||||
|
||||
The most common and most important chain that LangChain helps create contains three things:
|
||||
- LLM: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them.
|
||||
|
||||
|
Before Width: | Height: | Size: 766 KiB After Width: | Height: | Size: 766 KiB |
|
Before Width: | Height: | Size: 815 KiB After Width: | Height: | Size: 815 KiB |
@@ -1,13 +1,11 @@
|
||||
---
|
||||
sidebar_class_name: hidden
|
||||
---
|
||||
|
||||
# LangSmith
|
||||
|
||||
import DocCardList from "@theme/DocCardList";
|
||||
|
||||
[LangSmith](https://smith.langchain.com) helps you trace and evaluate your language model applications and intelligent agents to help you
|
||||
move from prototype to production.
|
||||
|
||||
Check out the [interactive walkthrough](/docs/guides/langsmith/walkthrough) to get started.
|
||||
Check out the [interactive walkthrough](/docs/guides/langsmith/walkthrough) below to get started.
|
||||
|
||||
For more information, please refer to the [LangSmith documentation](https://docs.smith.langchain.com/).
|
||||
|
||||
@@ -20,3 +18,5 @@ check out the [LangSmith Cookbook](https://github.com/langchain-ai/langsmith-coo
|
||||
- How to fine-tune a LLM on real usage data ([link](https://github.com/langchain-ai/langsmith-cookbook/blob/main/fine-tuning-examples/export-to-openai/fine-tuning-on-chat-runs.ipynb)).
|
||||
- How to use the [LangChain Hub](https://smith.langchain.com/hub) to version your prompts ([link](https://github.com/langchain-ai/langsmith-cookbook/blob/main/hub-examples/retrieval-qa-chain/retrieval-qa.ipynb))
|
||||
|
||||
|
||||
<DocCardList />
|
||||
@@ -140,7 +140,7 @@
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_functions\n",
|
||||
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.tools import DuckDuckGoSearchResults\n",
|
||||
@@ -165,7 +165,7 @@
|
||||
"runnable_agent = (\n",
|
||||
" {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_function_messages(\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_functions(\n",
|
||||
" x[\"intermediate_steps\"]\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
@@ -335,7 +335,7 @@
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools, AgentExecutor\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_functions\n",
|
||||
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
|
||||
"from langchain.tools.render import format_tool_to_openai_function\n",
|
||||
"from langchain import hub\n",
|
||||
@@ -351,7 +351,7 @@
|
||||
" runnable_agent = (\n",
|
||||
" {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_function_messages(\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_functions(\n",
|
||||
" x[\"intermediate_steps\"]\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
@@ -790,7 +790,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
@@ -5,20 +5,18 @@
|
||||
"id": "38f26d7a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure OpenAI\n",
|
||||
"# Azure\n",
|
||||
"\n",
|
||||
"This notebook goes over how to connect to an Azure hosted OpenAI endpoint. We recommend having version `openai>=1` installed."
|
||||
"This notebook goes over how to connect to an Azure hosted OpenAI endpoint"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "96164b42",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.chat_models import AzureChatOpenAI\n",
|
||||
"from langchain.schema import HumanMessage"
|
||||
]
|
||||
@@ -26,51 +24,57 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "cbe4bb58-ba13-4355-8af9-cd990dc47a64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"AZURE_OPENAI_API_KEY\"] = \"...\"\n",
|
||||
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://<your-endpoint>.openai.azure.com/\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "8161278f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"BASE_URL = \"https://${TODO}.openai.azure.com\"\n",
|
||||
"API_KEY = \"...\"\n",
|
||||
"DEPLOYMENT_NAME = \"chat\"\n",
|
||||
"model = AzureChatOpenAI(\n",
|
||||
" openai_api_base=BASE_URL,\n",
|
||||
" openai_api_version=\"2023-05-15\",\n",
|
||||
" azure_deployment=\"your-deployment-name\",\n",
|
||||
" deployment_name=DEPLOYMENT_NAME,\n",
|
||||
" openai_api_key=API_KEY,\n",
|
||||
" openai_api_type=\"azure\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 5,
|
||||
"id": "99509140",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\")"
|
||||
"AIMessage(content=\"\\n\\nJ'aime programmer.\", additional_kwargs={})"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French. I love programming.\"\n",
|
||||
")\n",
|
||||
"model([message])"
|
||||
"model(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French. I love programming.\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3b6e9376",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f27fa24d",
|
||||
@@ -84,7 +88,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"id": "0531798a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -94,19 +98,48 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 14,
|
||||
"id": "3fd97dfc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"BASE_URL = \"https://{endpoint}.openai.azure.com\"\n",
|
||||
"API_KEY = \"...\"\n",
|
||||
"DEPLOYMENT_NAME = \"gpt-35-turbo\" # in Azure, this deployment has version 0613 - input and output tokens are counted separately"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "aceddb72",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Total Cost (USD): $0.000054\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = AzureChatOpenAI(\n",
|
||||
" openai_api_base=BASE_URL,\n",
|
||||
" openai_api_version=\"2023-05-15\",\n",
|
||||
" azure_deployment=\"gpt-35-turbo\", # in Azure, this deployment has version 0613 - input and output tokens are counted separately\n",
|
||||
" deployment_name=DEPLOYMENT_NAME,\n",
|
||||
" openai_api_key=API_KEY,\n",
|
||||
" openai_api_type=\"azure\",\n",
|
||||
")\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" model([message])\n",
|
||||
" model(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French. I love programming.\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" print(\n",
|
||||
" f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\"\n",
|
||||
" ) # without specifying the model version, flat-rate 0.002 USD per 1k input and output tokens is used"
|
||||
@@ -136,12 +169,21 @@
|
||||
],
|
||||
"source": [
|
||||
"model0613 = AzureChatOpenAI(\n",
|
||||
" openai_api_base=BASE_URL,\n",
|
||||
" openai_api_version=\"2023-05-15\",\n",
|
||||
" deployment_name=\"gpt-35-turbo,\n",
|
||||
" deployment_name=DEPLOYMENT_NAME,\n",
|
||||
" openai_api_key=API_KEY,\n",
|
||||
" openai_api_type=\"azure\",\n",
|
||||
" model_version=\"0613\",\n",
|
||||
")\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" model0613([message])\n",
|
||||
" model0613(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French. I love programming.\"\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" print(f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\")"
|
||||
]
|
||||
},
|
||||
@@ -170,7 +212,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -65,7 +65,9 @@
|
||||
"id": "359565a7-dad3-403c-a73c-6414b1295127",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Define chat loader"
|
||||
"## 2. Define chat loader\n",
|
||||
"\n",
|
||||
"LangChain currently does not support "
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -41,9 +41,7 @@
|
||||
"id": "91ad075f-71d5-4bc8-ab91-cc0ad5ef16bb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Model Loading\n",
|
||||
"\n",
|
||||
"Models can be loaded by specifying the model parameters using the `from_model_id` method."
|
||||
"### Load the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -55,12 +53,12 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n",
|
||||
"from langchain.llms import HuggingFacePipeline\n",
|
||||
"\n",
|
||||
"hf = HuggingFacePipeline.from_model_id(\n",
|
||||
" model_id=\"gpt2\",\n",
|
||||
"llm = HuggingFacePipeline.from_model_id(\n",
|
||||
" model_id=\"bigscience/bloom-1b7\",\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" pipeline_kwargs={\"max_new_tokens\": 10},\n",
|
||||
" model_kwargs={\"temperature\": 0, \"max_length\": 64},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -68,29 +66,6 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "00104b27-0c15-4a97-b198-4512337ee211",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"They can also be loaded by passing in an existing `transformers` pipeline directly"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n",
|
||||
"from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
|
||||
"\n",
|
||||
"model_id = \"gpt2\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(model_id)\n",
|
||||
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=10)\n",
|
||||
"hf = HuggingFacePipeline(pipeline=pipe)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Chain\n",
|
||||
"\n",
|
||||
@@ -112,7 +87,7 @@
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"chain = prompt | hf\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"question = \"What is electroencephalography?\"\n",
|
||||
"\n",
|
||||
@@ -123,40 +98,6 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "dbbc3a37",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### GPU Inference\n",
|
||||
"\n",
|
||||
"When running on a machine with GPU, you can specify the `device=n` parameter to put the model on the specified device.\n",
|
||||
"Defaults to `-1` for CPU inference.\n",
|
||||
"\n",
|
||||
"If you have multiple-GPUs and/or the model is too large for a single GPU, you can specify `device_map=\"auto\"`, which requires and uses the [Accelerate](https://huggingface.co/docs/accelerate/index) library to automatically determine how to load the model weights. \n",
|
||||
"\n",
|
||||
"*Note*: both `device` and `device_map` should not be specified together and can lead to unexpected behavior."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gpu_llm = HuggingFacePipeline.from_model_id(\n",
|
||||
" model_id=\"gpt2\",\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" device=0, # replace with device_map=\"auto\" to use the accelerate library.\n",
|
||||
" pipeline_kwargs={\"max_new_tokens\": 10},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"gpu_chain = prompt | gpu_llm\n",
|
||||
"\n",
|
||||
"question = \"What is electroencephalography?\"\n",
|
||||
"\n",
|
||||
"print(gpu_chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Batch GPU Inference\n",
|
||||
"\n",
|
||||
@@ -206,7 +147,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -85,11 +85,11 @@ model.convert_prompt(prompt_value)
|
||||
This produces the following formatted string:
|
||||
|
||||
```
|
||||
'\n\nYou are a helpful chatbot\n\nHuman: Tell me a joke about bears\n\nAssistant:'
|
||||
'\n\nHuman: <admin>You are a helpful chatbot</admin>\n\nHuman: Tell me a joke about bears\n\nAssistant:'
|
||||
```
|
||||
|
||||
We can see that under the hood LangChain is not appending any prefix/suffix to `SystemMessage`'s. This is because Anthropic has no concept of `SystemMessage`.
|
||||
Anthropic requires all prompts to end with assistant messages. This means if the last message is not an assistant message, the suffix `Assistant:` will automatically be inserted.
|
||||
We can see that under the hood LangChain is representing `SystemMessage`s with `Human: <admin>...</admin>`,
|
||||
and is appending an assistant message to the end IF the last message is NOT already an assistant message.
|
||||
|
||||
If you decide instead to use a normal PromptTemplate (one that just works on a single string) let's take a look at
|
||||
what happens:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
All functionality related to `Microsoft Azure` and other `Microsoft` products.
|
||||
|
||||
## Chat Models
|
||||
## LLM
|
||||
### Azure OpenAI
|
||||
|
||||
>[Microsoft Azure](https://en.wikipedia.org/wiki/Microsoft_Azure), often referred to as `Azure` is a cloud computing platform run by `Microsoft`, which offers access, management, and development of applications and services through global data centers. It provides a range of capabilities, including software as a service (SaaS), platform as a service (PaaS), and infrastructure as a service (IaaS). `Microsoft Azure` supports many programming languages, tools, and frameworks, including Microsoft-specific and third-party software and systems.
|
||||
@@ -18,15 +18,16 @@ Set the environment variables to get access to the `Azure OpenAI` service.
|
||||
```python
|
||||
import os
|
||||
|
||||
os.environ["AZURE_OPENAI_ENDPOINT"] = "https://<your-endpoint.openai.azure.com/"
|
||||
os.environ["AZURE_OPENAI_API_KEY"] = "your AzureOpenAI key"
|
||||
os.environ["OPENAI_API_TYPE"] = "azure"
|
||||
os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/"
|
||||
os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key"
|
||||
os.environ["OPENAI_API_VERSION"] = "2023-05-15"
|
||||
```
|
||||
|
||||
See a [usage example](/docs/integrations/chat/azure_chat_openai)
|
||||
|
||||
See a [usage example](/docs/integrations/llms/azure_openai_example).
|
||||
|
||||
```python
|
||||
from langchain.chat_models import AzureChatOpenAI
|
||||
from langchain.llms import AzureOpenAI
|
||||
```
|
||||
|
||||
## Text Embedding Models
|
||||
@@ -35,16 +36,16 @@ from langchain.chat_models import AzureChatOpenAI
|
||||
See a [usage example](/docs/integrations/text_embedding/azureopenai)
|
||||
|
||||
```python
|
||||
from langchain.embeddings import AzureOpenAIEmbeddings
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
```
|
||||
|
||||
## LLMs
|
||||
## Chat Models
|
||||
### Azure OpenAI
|
||||
|
||||
See a [usage example](/docs/integrations/llms/azure_openai_example).
|
||||
See a [usage example](/docs/integrations/chat/azure_chat_openai)
|
||||
|
||||
```python
|
||||
from langchain.llms import AzureOpenAI
|
||||
from langchain.chat_models import AzureChatOpenAI
|
||||
```
|
||||
|
||||
## Document loaders
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
# Astra DB
|
||||
|
||||
This page lists the integrations available with [Astra DB](https://docs.datastax.com/en/astra/home/astra.html) and [Apache Cassandra®](https://cassandra.apache.org/).
|
||||
|
||||
### Setup
|
||||
|
||||
Install the following Python package:
|
||||
|
||||
```bash
|
||||
pip install "astrapy>=0.5.3"
|
||||
```
|
||||
|
||||
## Astra DB
|
||||
|
||||
> DataStax [Astra DB](https://docs.datastax.com/en/astra/home/astra.html) is a serverless vector-capable database built on Cassandra and made conveniently available
|
||||
> through an easy-to-use JSON API.
|
||||
|
||||
### Vector Store
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import AstraDB
|
||||
vector_store = AstraDB(
|
||||
embedding=my_embedding,
|
||||
collection_name="my_store",
|
||||
api_endpoint="...",
|
||||
token="...",
|
||||
)
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/vectorstores/astradb).
|
||||
|
||||
|
||||
## Apache Cassandra and Astra DB through CQL
|
||||
|
||||
> [Cassandra](https://cassandra.apache.org/) is a NoSQL, row-oriented, highly scalable and highly available database.
|
||||
> Starting with version 5.0, the database ships with [vector search capabilities](https://cassandra.apache.org/doc/trunk/cassandra/vector-search/overview.html).
|
||||
> DataStax [Astra DB through CQL](https://docs.datastax.com/en/astra-serverless/docs/vector-search/quickstart.html) is a managed serverless database built on Cassandra, offering the same interface and strengths.
|
||||
|
||||
These databases use the CQL protocol (Cassandra Query Language).
|
||||
Hence, a different set of connectors, outlined below, shall be used.
|
||||
|
||||
### Vector Store
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Cassandra
|
||||
vector_store = Cassandra(
|
||||
embedding=my_embedding,
|
||||
table_name="my_store",
|
||||
)
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/vectorstores/astradb) (scroll down to the CQL-specific section).
|
||||
|
||||
|
||||
### Memory
|
||||
|
||||
```python
|
||||
from langchain.memory import CassandraChatMessageHistory
|
||||
message_history = CassandraChatMessageHistory(session_id="my-session")
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/memory/cassandra_chat_message_history).
|
||||
|
||||
|
||||
### LLM Cache
|
||||
|
||||
```python
|
||||
from langchain.cache import CassandraCache
|
||||
langchain.llm_cache = CassandraCache()
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/llms/llm_caching) (scroll to the Cassandra section).
|
||||
|
||||
|
||||
### Semantic LLM Cache
|
||||
|
||||
```python
|
||||
from langchain.cache import CassandraSemanticCache
|
||||
cassSemanticCache = CassandraSemanticCache(
|
||||
embedding=my_embedding,
|
||||
table_name="my_store",
|
||||
)
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/llms/llm_caching) (scroll to the appropriate section).
|
||||
35
docs/docs/integrations/providers/cassandra.mdx
Normal file
35
docs/docs/integrations/providers/cassandra.mdx
Normal file
@@ -0,0 +1,35 @@
|
||||
# Cassandra
|
||||
|
||||
>[Apache Cassandra®](https://cassandra.apache.org/) is a free and open-source, distributed, wide-column
|
||||
> store, NoSQL database management system designed to handle large amounts of data across many commodity servers,
|
||||
> providing high availability with no single point of failure. Cassandra offers support for clusters spanning
|
||||
> multiple datacenters, with asynchronous masterless replication allowing low latency operations for all clients.
|
||||
> Cassandra was designed to implement a combination of _Amazon's Dynamo_ distributed storage and replication
|
||||
> techniques combined with _Google's Bigtable_ data and storage engine model.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install cassandra-driver
|
||||
pip install cassio
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/integrations/vectorstores/cassandra).
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Cassandra
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Memory
|
||||
|
||||
See a [usage example](/docs/integrations/memory/cassandra_chat_message_history).
|
||||
|
||||
```python
|
||||
from langchain.memory import CassandraChatMessageHistory
|
||||
```
|
||||
File diff suppressed because one or more lines are too long
@@ -5,100 +5,9 @@
|
||||
"id": "c3852491",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure OpenAI\n",
|
||||
"# AzureOpenAI\n",
|
||||
"\n",
|
||||
"Let's load the Azure OpenAI Embedding class with environment variables set to indicate to use Azure endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "8a6ed30d-806f-4800-b5fd-d04126be9060",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"AZURE_OPENAI_API_KEY\"] = \"...\"\n",
|
||||
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://<your-endpoint>.openai.azure.com/\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "20179bc7-3f71-4909-be12-d38bce009b18",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import AzureOpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = AzureOpenAIEmbeddings(\n",
|
||||
" azure_deployment=\"<your-embeddings-deployment-name>\", \n",
|
||||
" openai_api_version=\"2023-05-15\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "f8cb9dca-738b-450f-9986-5c3efd3c6eb3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"this is a test document\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "0fae0295-b117-4a5a-8b98-500c79306551",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "65a01ddd-0bbf-444f-a87f-93af25ef902c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_result = embeddings.embed_documents([text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "45771052-68ca-4e03-9c4f-a0c7796d9442",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[-0.012222584727053133,\n",
|
||||
" 0.0072103982392216145,\n",
|
||||
" -0.014818063280923775,\n",
|
||||
" -0.026444746872933557,\n",
|
||||
" -0.0034330499700826883]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"doc_result[0][:5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e66ec1f2-6768-4ee5-84bf-a2d76adc20c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## [Legacy] When using `openai<1`"
|
||||
"Let's load the OpenAI Embedding class with environment variables set to indicate to use Azure endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -170,9 +79,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -34,7 +34,7 @@
|
||||
"source": [
|
||||
"## Using `ZERO_SHOT_REACT_DESCRIPTION`\n",
|
||||
"\n",
|
||||
"This shows how to initialize the agent using the `ZERO_SHOT_REACT_DESCRIPTION` agent type."
|
||||
"This shows how to initialize the agent using the `ZERO_SHOT_REACT_DESCRIPTION` agent type. Note that this is an alternative to the above."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"E2B Data Analysis sandbox allows you to:\n",
|
||||
"- Run Python code\n",
|
||||
"- Generate charts via matplotlib\n",
|
||||
"- Install Python packages dynamically during runtime\n",
|
||||
"- Install Python packages dynamically durint runtime\n",
|
||||
"- Install system packages dynamically during runtime\n",
|
||||
"- Run shell commands\n",
|
||||
"- Upload and download files\n",
|
||||
|
||||
@@ -1,202 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Memorize\n",
|
||||
"\n",
|
||||
"Fine-tuning LLM itself to memorize information using unsupervised learning.\n",
|
||||
"\n",
|
||||
"This tool requires LLMs that support fine-tuning. Currently, only `langchain.llms import GradientLLM` is supported."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.llms import GradientLLM\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain.memory import ConversationBufferMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set the Environment API Key\n",
|
||||
"Make sure to get your API key from Gradient AI. You are given $10 in free credits to test and fine-tune different models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if not os.environ.get(\"GRADIENT_ACCESS_TOKEN\",None):\n",
|
||||
" # Access token under https://auth.gradient.ai/select-workspace\n",
|
||||
" os.environ[\"GRADIENT_ACCESS_TOKEN\"] = getpass(\"gradient.ai access token:\")\n",
|
||||
"if not os.environ.get(\"GRADIENT_WORKSPACE_ID\",None):\n",
|
||||
" # `ID` listed in `$ gradient workspace list`\n",
|
||||
" # also displayed after login at at https://auth.gradient.ai/select-workspace\n",
|
||||
" os.environ[\"GRADIENT_WORKSPACE_ID\"] = getpass(\"gradient.ai workspace id:\")\n",
|
||||
"if not os.environ.get(\"GRADIENT_MODEL_ADAPTER_ID\",None):\n",
|
||||
" # `ID` listed in `$ gradient model list --workspace-id \"$GRADIENT_WORKSPACE_ID\"`\n",
|
||||
" os.environ[\"GRADIENT_MODEL_ID\"] = getpass(\"gradient.ai model id:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Optional: Validate your Environment variables ```GRADIENT_ACCESS_TOKEN``` and ```GRADIENT_WORKSPACE_ID``` to get currently deployed models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the `GradientLLM` instance\n",
|
||||
"You can specify different parameters such as the model name, max tokens generated, temperature, etc."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = GradientLLM(\n",
|
||||
" model_id=os.environ['GRADIENT_MODEL_ID'],\n",
|
||||
" # # optional: set new credentials, they default to environment variables\n",
|
||||
" # gradient_workspace_id=os.environ[\"GRADIENT_WORKSPACE_ID\"],\n",
|
||||
" # gradient_access_token=os.environ[\"GRADIENT_ACCESS_TOKEN\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = load_tools([\"memorize\"], llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initiate the Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
" # memory=ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run the agent\n",
|
||||
"Ask the agent to memorize a piece of text."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mI should memorize this fact.\n",
|
||||
"Action: Memorize\n",
|
||||
"Action Input: Zara T\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mTrain complete. Loss: 1.6853971333333335\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI now know the final answer.\n",
|
||||
"Final Answer: Zara Tubikova set a world\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Zara Tubikova set a world'"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Please remember the fact in detail:\\nWith astonishing dexterity, Zara Tubikova set a world record by solving a 4x4 Rubik's Cube variation blindfolded in under 20 seconds, employing only their feet.\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,756 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d2d6ca14-fb7e-4172-9aa0-a3119a064b96",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Astra DB\n",
|
||||
"\n",
|
||||
"This page provides a quickstart for using [Astra DB](https://docs.datastax.com/en/astra/home/astra.html) and [Apache Cassandra®](https://cassandra.apache.org/) as a Vector Store.\n",
|
||||
"\n",
|
||||
"_Note: in addition to access to the database, an OpenAI API Key is required to run the full example._"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bb9be7ce-8c70-4d46-9f11-71c42a36e928",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Setup and general dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dbe7c156-0413-47e3-9237-4769c4248869",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Use of the integration requires the following Python package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8d00fcf4-9798-4289-9214-d9734690adfc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install --quiet \"astrapy>=0.5.3\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2453d83a-bc8f-41e1-a692-befe4dd90156",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"_Note: depending on your LangChain setup, you may need to install/upgrade other dependencies needed for this demo_\n",
|
||||
"_(specifically, recent versions of `datasets` `openai` `pypdf` and `tiktoken` are required)._"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b06619af-fea2-4863-8149-7f239a8c9c82",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from datasets import load_dataset # if not present yet, run: pip install \"datasets==2.14.6\"\n",
|
||||
"\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.document_loaders import PyPDFLoader\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1983f1da-0ae7-4a9b-bf4c-4ade328f7a3a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass(\"OPENAI_API_KEY = \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c656df06-e938-4bc5-b570-440b8b7a0189",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embe = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd8caa76-bc41-429e-a93b-989ba13aff01",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"_Keep reading to connect with Astra DB. For usage with Apache Cassandra and Astra DB through CQL, scroll to the section below._"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22866f09-e10d-4f05-a24b-b9420129462e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Astra DB"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5fba47cc-3533-42fc-84b7-9dc14cd68b2b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"DataStax [Astra DB](https://docs.datastax.com/en/astra/home/astra.html) is a serverless vector-capable database built on Cassandra and made conveniently available through an easy-to-use JSON API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0b32730d-176e-414c-9d91-fd3644c54211",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores import AstraDB"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68f61b01-3e09-47c1-9d67-5d6915c86626",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Astra DB connection parameters\n",
|
||||
"\n",
|
||||
"- the API Endpoint looks like `https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com`\n",
|
||||
"- the Token looks like `AstraCS:6gBhNmsk135....`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d78af8ed-cff9-4f14-aa5d-016f99ab547c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ASTRA_DB_API_ENDPOINT = input(\"ASTRA_DB_API_ENDPOINT = \")\n",
|
||||
"ASTRA_DB_TOKEN = getpass(\"ASTRA_DB_TOKEN = \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b77553b-8bb5-4949-b87b-8c6abac56a26",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vstore = AstraDB(\n",
|
||||
" embedding=embe,\n",
|
||||
" collection_name=\"astra_vector_demo\",\n",
|
||||
" api_endpoint=ASTRA_DB_API_ENDPOINT,\n",
|
||||
" token=ASTRA_DB_TOKEN,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a348678-b2f6-46ca-9a0d-2eb4cc6b66b1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load a dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3a1f532f-ad63-4256-9730-a183841bd8e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"philo_dataset = load_dataset(\"datastax/philosopher-quotes\")[\"train\"]\n",
|
||||
"\n",
|
||||
"docs = []\n",
|
||||
"for entry in philo_dataset:\n",
|
||||
" metadata = {\"author\": entry[\"author\"]}\n",
|
||||
" doc = Document(page_content=entry[\"quote\"], metadata=metadata)\n",
|
||||
" docs.append(doc)\n",
|
||||
"\n",
|
||||
"inserted_ids = vstore.add_documents(docs)\n",
|
||||
"print(f\"\\nInserted {len(inserted_ids)} documents.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "084d8802-ab39-4262-9a87-42eafb746f92",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add some more entries, this time with `add_texts`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b6b157f5-eb31-4907-a78e-2e2b06893936",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"texts = [\"I think, therefore I am.\", \"To the things themselves!\"]\n",
|
||||
"metadatas = [{\"author\": \"descartes\"}, {\"author\": \"husserl\"}]\n",
|
||||
"ids = [\"desc_01\", \"huss_xy\"]\n",
|
||||
"\n",
|
||||
"inserted_ids_2 = vstore.add_texts(texts=texts, metadatas=metadatas, ids=ids)\n",
|
||||
"print(f\"\\nInserted {len(inserted_ids_2)} documents.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c031760a-1fc5-4855-adf2-02ed52fe2181",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run simple searches"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02a77d8e-1aae-4054-8805-01c77947c49f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This section demonstrates metadata filtering and getting the similarity scores back:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1761806a-1afd-4491-867c-25a80d92b9fe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results = vstore.similarity_search(\"Our life is what we make of it\", k=3)\n",
|
||||
"for res in results:\n",
|
||||
" print(f\"* {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "eebc4f7c-f61a-438e-b3c8-17e6888d8a0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results_filtered = vstore.similarity_search(\n",
|
||||
" \"Our life is what we make of it\",\n",
|
||||
" k=3,\n",
|
||||
" filter={\"author\": \"plato\"},\n",
|
||||
")\n",
|
||||
"for res in results_filtered:\n",
|
||||
" print(f\"* {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "11bbfe64-c0cd-40c6-866a-a5786538450e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results = vstore.similarity_search_with_score(\"Our life is what we make of it\", k=3)\n",
|
||||
"for res, score in results:\n",
|
||||
" print(f\"* [SIM={score:3f}] {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b14ea558-bfbe-41ce-807e-d70670060ada",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### MMR (Maximal-marginal-relevance) search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76381ce8-780a-4e3b-97b1-056d6782d7d5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results = vstore.max_marginal_relevance_search(\n",
|
||||
" \"Our life is what we make of it\",\n",
|
||||
" k=3,\n",
|
||||
" filter={\"author\": \"aristotle\"},\n",
|
||||
")\n",
|
||||
"for res in results:\n",
|
||||
" print(f\"* {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1cc86edd-692b-4495-906c-ccfd13b03c23",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Deleting stored documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "38a70ec4-b522-4d32-9ead-c642864fca37",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"delete_1 = vstore.delete(inserted_ids[:3])\n",
|
||||
"print(f\"all_succeed={delete_1}\") # True, all documents deleted"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d4cf49ed-9d29-4ed9-bdab-51a308c41b8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"delete_2 = vstore.delete(inserted_ids[2:5])\n",
|
||||
"print(f\"some_succeeds={delete_2}\") # True, though some IDs were gone already"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "847181ba-77d1-4a17-b7f9-9e2c3d8efd13",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### A minimal RAG chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cd64b844-846f-43c5-a7dd-c26b9ed417d0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The next cells will implement a simple RAG pipeline:\n",
|
||||
"- download a sample PDF file and load it onto the store;\n",
|
||||
"- create a RAG chain with LCEL (LangChain Expression Language), with the vector store at its heart;\n",
|
||||
"- run the question-answering chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5cbc4dba-0d5e-4038-8fc5-de6cadd1c2a9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!curl -L \\\n",
|
||||
" \"https://github.com/awesome-astra/datasets/blob/main/demo-resources/what-is-philosophy/what-is-philosophy.pdf?raw=true\" \\\n",
|
||||
" -o \"what-is-philosophy.pdf\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "459385be-5e9c-47ff-ba53-2b7ae6166b09",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pdf_loader = PyPDFLoader(\"what-is-philosophy.pdf\")\n",
|
||||
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=64)\n",
|
||||
"docs_from_pdf = pdf_loader.load_and_split(text_splitter=splitter)\n",
|
||||
"\n",
|
||||
"print(f\"Documents from PDF: {len(docs_from_pdf)}.\")\n",
|
||||
"inserted_ids_from_pdf = vstore.add_documents(docs_from_pdf)\n",
|
||||
"print(f\"Inserted {len(inserted_ids_from_pdf)} documents.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5010a66c-4298-4e32-82b5-2da0d36a5c70",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vstore.as_retriever(search_kwargs={'k': 3})\n",
|
||||
"\n",
|
||||
"philo_template = \"\"\"\n",
|
||||
"You are a philosopher that draws inspiration from great thinkers of the past\n",
|
||||
"to craft well-thought answers to user questions. Use the provided context as the basis\n",
|
||||
"for your answers and do not make up new reasoning paths - just mix-and-match what you are given.\n",
|
||||
"Your answers must be concise and to the point, and refrain from answering about other topics than philosophy.\n",
|
||||
"\n",
|
||||
"CONTEXT:\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"QUESTION: {question}\n",
|
||||
"\n",
|
||||
"YOUR ANSWER:\"\"\"\n",
|
||||
"\n",
|
||||
"philo_prompt = ChatPromptTemplate.from_template(philo_template)\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever, \"question\": RunnablePassthrough()} \n",
|
||||
" | philo_prompt \n",
|
||||
" | llm \n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fcbc1296-6c7c-478b-b55b-533ba4e54ddb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain.invoke(\"How does Russel elaborate on Peirce's idea of the security blanket?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "869ab448-a029-4692-aefc-26b85513314d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For more, check out a complete RAG template using Astra DB [here](https://github.com/langchain-ai/langchain/tree/master/templates/rag-astradb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "177610c7-50d0-4b7b-8634-b03338054c8e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Cleanup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0da4d19f-9878-4d3d-82c9-09cafca20322",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to completely delete the collection from your Astra DB instance, run this.\n",
|
||||
"\n",
|
||||
"_(You will lose the data you stored in it.)_"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fd405a13-6f71-46fa-87e6-167238e9c25e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vstore.delete_collection()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "94ebaab1-7cbf-4144-a147-7b0e32c43069",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Apache Cassandra and Astra DB through CQL"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc3931b4-211d-4f84-bcc0-51c127e3027c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"[Cassandra](https://cassandra.apache.org/) is a NoSQL, row-oriented, highly scalable and highly available database.Starting with version 5.0, the database ships with [vector search capabilities](https://cassandra.apache.org/doc/trunk/cassandra/vector-search/overview.html).\n",
|
||||
"\n",
|
||||
"DataStax [Astra DB through CQL](https://docs.datastax.com/en/astra-serverless/docs/vector-search/quickstart.html) is a managed serverless database built on Cassandra, offering the same interface and strengths."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a0055fbf-448d-4e46-9c40-28d43df25ca3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### What sets this case apart from \"Astra DB\" above?\n",
|
||||
"\n",
|
||||
"Thanks to LangChain having a standardized `VectorStore` interface, most of the \"Astra DB\" section above applies to this case as well. However, this time the database uses the CQL protocol, which means you'll use a _different_ class this time and instantiate it in another way.\n",
|
||||
"\n",
|
||||
"The cells below show how you should get your `vstore` object in this case and how you can clean up the database resources at the end: for the rest, i.e. the actual usage of the vector store, you will be able to run the very code that was shown above.\n",
|
||||
"\n",
|
||||
"In other words, running this demo in full with Cassandra or Astra DB through CQL means:\n",
|
||||
"\n",
|
||||
"- **initialization as shown below**\n",
|
||||
"- \"Load a dataset\", _see above section_\n",
|
||||
"- \"Run simple searches\", _see above section_\n",
|
||||
"- \"MMR search\", _see above section_\n",
|
||||
"- \"Deleting stored documents\", _see above section_\n",
|
||||
"- \"A minimal RAG chain\", _see above section_\n",
|
||||
"- **cleanup as shown below**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23d12be2-745f-4e72-a82c-334a887bc7cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Initialization"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3212542-79be-423e-8e1f-b8d725e3cda8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The class to use is the following:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "941af73e-a090-4fba-b23c-595757d470eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores import Cassandra"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "414d1e72-f7c9-4b6d-bf6f-16075712c7e3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, depending on whether you connect to a Cassandra cluster or to Astra DB through CQL, you will provide different parameters when creating the vector store object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48ecca56-71a4-4a91-b198-29384c44ce27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Initialization (Cassandra cluster)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "55ebe958-5654-43e0-9aed-d607ffd3fa48",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this case, you first need to create a `cassandra.cluster.Session` object, as described in the [Cassandra driver documentation](https://docs.datastax.com/en/developer/python-driver/latest/api/cassandra/cluster/#module-cassandra.cluster). The details vary (e.g. with network settings and authentication), but this might be something like:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4642dafb-a065-4063-b58c-3d276f5ad07e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from cassandra.cluster import Cluster\n",
|
||||
"\n",
|
||||
"cluster = Cluster([\"127.0.0.1\"])\n",
|
||||
"session = cluster.connect()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "624c93bf-fb46-4350-bcfa-09ca09dc068f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can now set the session, along with your desired keyspace name, as a global CassIO parameter:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "92a4ab28-1c4f-4dad-9671-d47e0b1dde7b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import cassio\n",
|
||||
"\n",
|
||||
"CASSANDRA_KEYSPACE = input(\"CASSANDRA_KEYSPACE = \")\n",
|
||||
"\n",
|
||||
"cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3b87a824-36f1-45b4-b54c-efec2a2de216",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now you can create the vector store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "853a2a88-a565-4e24-8789-d78c213954a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vstore = Cassandra(\n",
|
||||
" embedding=embe,\n",
|
||||
" table_name=\"cassandra_vector_demo\",\n",
|
||||
" # session=None, keyspace=None # Uncomment on older versions of LangChain\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "768ddf7a-0c3e-4134-ad38-25ac53c3da7a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Initialization (Astra DB through CQL)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4ed4269a-b7e7-4503-9e66-5a11335c7681",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this case you initialize CassIO with the following connection parameters:\n",
|
||||
"\n",
|
||||
"- the Database ID, e.g. `01234567-89ab-cdef-0123-456789abcdef`\n",
|
||||
"- the Token, e.g. `AstraCS:6gBhNmsk135....` (it must be a \"Database Administrator\" token)\n",
|
||||
"- Optionally a Keyspace name (if omitted, the default one for the database will be used)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5fa6bd74-d4b2-45c5-9757-96dddc6242fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ASTRA_DB_ID = input(\"ASTRA_DB_ID = \")\n",
|
||||
"ASTRA_DB_TOKEN = getpass(\"ASTRA_DB_TOKEN = \")\n",
|
||||
"\n",
|
||||
"desired_keyspace = input(\"ASTRA_DB_KEYSPACE (optional, can be left empty) = \")\n",
|
||||
"if desired_keyspace:\n",
|
||||
" ASTRA_DB_KEYSPACE = desired_keyspace\n",
|
||||
"else:\n",
|
||||
" ASTRA_DB_KEYSPACE = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "add6e585-17ff-452e-8ef6-7e485ead0b06",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import cassio\n",
|
||||
"\n",
|
||||
"cassio.init(\n",
|
||||
" database_id=ASTRA_DB_ID,\n",
|
||||
" token=ASTRA_DB_TOKEN,\n",
|
||||
" keyspace=ASTRA_DB_KEYSPACE,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b305823c-bc98-4f3d-aabb-d7eb663ea421",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now you can create the vector store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f45f3038-9d59-41cc-8b43-774c6aa80295",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vstore = Cassandra(\n",
|
||||
" embedding=embe,\n",
|
||||
" table_name=\"cassandra_vector_demo\",\n",
|
||||
" # session=None, keyspace=None # Uncomment on older versions of LangChain\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39284918-cf8a-49bb-a2d3-aef285bb2ffa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Usage of the vector store"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3cc1aead-d6ec-48a3-affe-1d0cffa955a9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"_See the sections \"Load a dataset\" through \"A minimal RAG chain\" above._\n",
|
||||
"\n",
|
||||
"Speaking of the latter, you can check out a full RAG template for Astra DB through CQL [here](https://github.com/langchain-ai/langchain/tree/master/templates/cassandra-entomology-rag)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "096397d8-6622-4685-9f9d-7e238beca467",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Cleanup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cc1e74f9-5500-41aa-836f-235b1ed5f20c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"the following essentially retrieves the `Session` object from CassIO and runs a CQL `DROP TABLE` statement with it:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b5b82c33-0e77-4a37-852c-8d50edbdd991",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cassio.config.resolve_session().execute(\n",
|
||||
" f\"DROP TABLE {cassio.config.resolve_keyspace()}.cassandra_vector_demo;\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c10ece4d-ae06-42ab-baf4-4d0ac2051743",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Learn more"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "51ea8b69-7e15-458f-85aa-9fa199f95f9c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For more information, extended quickstarts and additional usage examples, please visit the [CassIO documentation](https://cassio.org/frameworks/langchain/about/) for more on using the LangChain `Cassandra` vector store."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Biadu Cloud ElasticSearch VectorSearch\n",
|
||||
"\n",
|
||||
">[Baidu Cloud VectorSearch](https://cloud.baidu.com/doc/BES/index.html?from=productToDoc) is a fully managed, enterprise-level distributed search and analysis service which is 100% compatible to open source. Baidu Cloud VectorSearch provides low-cost, high-performance, and reliable retrieval and analysis platform level product services for structured/unstructured data. As a vector database , it supports multiple index types and similarity distance methods. \n",
|
||||
"\n",
|
||||
">`Baidu Cloud ElasticSearch` provides a privilege management mechanism, for you to configure the cluster privileges freely, so as to further ensure data security.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `Baidu Cloud ElasticSearch VectorStore`.\n",
|
||||
"To run, you should have an [Baidu Cloud ElasticSearch](https://cloud.baidu.com/product/bes.html) instance up and running:\n",
|
||||
"\n",
|
||||
"Read the [help document](https://cloud.baidu.com/doc/BES/s/8llyn0hh4 ) to quickly familiarize and configure Baidu Cloud ElasticSearch instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After the instance is up and running, follow these steps to split documents, get embeddings, connect to the baidu cloud elasticsearch instance, index documents, and perform vector retrieval."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We need to install the following Python packages first."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install elasticsearch == 7.11.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, we want to use `QianfanEmbeddings` so we have to get the Qianfan AK and SK. Details for QianFan is related to [Baidu Qianfan Workshop](https://cloud.baidu.com/product/wenxinworkshop)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"QIANFAN_AK\"] = getpass.getpass(\"Your Qianfan AK:\")\n",
|
||||
"os.environ[\"QIANFAN_SK\"] = getpass.getpass(\"Your Qianfan SK:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Secondly, split documents and get embeddings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"from langchain.embeddings import QianfanEmbeddingsEndpoint\n",
|
||||
"\n",
|
||||
"embeddings = QianfanEmbeddingsEndpoint()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then, create a Baidu ElasticeSearch accessable instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a bes instance and index docs.\n",
|
||||
"from langchain.vectorstores import BESVectorStore\n",
|
||||
"\n",
|
||||
"bes = BESVectorStore.from_documents(\n",
|
||||
" documents=docs,\n",
|
||||
" embedding=embeddings,\n",
|
||||
" bes_url=\"your bes cluster url\",\n",
|
||||
" index_name=\"your vector index\",\n",
|
||||
")\n",
|
||||
"bes.client.indices.refresh(index=\"your vector index\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, Query and retrive data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = bes.similarity_search(query)\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Please feel free to contact <liuboyao@baidu.com> if you encounter any problems during use, and we will do our best to support you."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.17"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
326
docs/docs/integrations/vectorstores/cassandra.ipynb
Normal file
326
docs/docs/integrations/vectorstores/cassandra.ipynb
Normal file
@@ -0,0 +1,326 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Cassandra\n",
|
||||
"\n",
|
||||
">[Apache Cassandra®](https://cassandra.apache.org) is a NoSQL, row-oriented, highly scalable and highly available database.\n",
|
||||
"\n",
|
||||
"Newest Cassandra releases natively [support](https://cwiki.apache.org/confluence/display/CASSANDRA/CEP-30%3A+Approximate+Nearest+Neighbor(ANN)+Vector+Search+via+Storage-Attached+Indexes) Vector Similarity Search.\n",
|
||||
"\n",
|
||||
"To run this notebook you need either a running Cassandra cluster equipped with Vector Search capabilities (in pre-release at the time of writing) or a DataStax Astra DB instance running in the cloud (you can get one for free at [datastax.com](https://astra.datastax.com)). Check [cassio.org](https://cassio.org/start_here/) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b4c41cad-08ef-4f72-a545-2151e4598efe",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install \"cassio>=0.1.0\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b7e46bb0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Please provide database connection parameters and secrets:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36128a32",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"database_mode = (input(\"\\n(C)assandra or (A)stra DB? \")).upper()\n",
|
||||
"\n",
|
||||
"keyspace_name = input(\"\\nKeyspace name? \")\n",
|
||||
"\n",
|
||||
"if database_mode == \"A\":\n",
|
||||
" ASTRA_DB_APPLICATION_TOKEN = getpass.getpass('\\nAstra DB Token (\"AstraCS:...\") ')\n",
|
||||
" #\n",
|
||||
" ASTRA_DB_SECURE_BUNDLE_PATH = input(\"Full path to your Secure Connect Bundle? \")\n",
|
||||
"elif database_mode == \"C\":\n",
|
||||
" CASSANDRA_CONTACT_POINTS = input(\n",
|
||||
" \"Contact points? (comma-separated, empty for localhost) \"\n",
|
||||
" ).strip()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4f22aac2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### depending on whether local or cloud-based Astra DB, create the corresponding database connection \"Session\" object"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "677f8576",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from cassandra.cluster import Cluster\n",
|
||||
"from cassandra.auth import PlainTextAuthProvider\n",
|
||||
"\n",
|
||||
"if database_mode == \"C\":\n",
|
||||
" if CASSANDRA_CONTACT_POINTS:\n",
|
||||
" cluster = Cluster(\n",
|
||||
" [cp.strip() for cp in CASSANDRA_CONTACT_POINTS.split(\",\") if cp.strip()]\n",
|
||||
" )\n",
|
||||
" else:\n",
|
||||
" cluster = Cluster()\n",
|
||||
" session = cluster.connect()\n",
|
||||
"elif database_mode == \"A\":\n",
|
||||
" ASTRA_DB_CLIENT_ID = \"token\"\n",
|
||||
" cluster = Cluster(\n",
|
||||
" cloud={\n",
|
||||
" \"secure_connect_bundle\": ASTRA_DB_SECURE_BUNDLE_PATH,\n",
|
||||
" },\n",
|
||||
" auth_provider=PlainTextAuthProvider(\n",
|
||||
" ASTRA_DB_CLIENT_ID,\n",
|
||||
" ASTRA_DB_APPLICATION_TOKEN,\n",
|
||||
" ),\n",
|
||||
" )\n",
|
||||
" session = cluster.connect()\n",
|
||||
"else:\n",
|
||||
" raise NotImplementedError"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "320af802-9271-46ee-948f-d2453933d44b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Please provide OpenAI access key\n",
|
||||
"\n",
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ffea66e4-bc23-46a9-9580-b348dfe7b7a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e98a139b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Creation and usage of the Vector Store"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Cassandra\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"SOURCE_FILE_NAME = \"../../modules/state_of_the_union.txt\"\n",
|
||||
"\n",
|
||||
"loader = TextLoader(SOURCE_FILE_NAME)\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embedding_function = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6e104aee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"table_name = \"my_vector_db_table\"\n",
|
||||
"\n",
|
||||
"docsearch = Cassandra.from_documents(\n",
|
||||
" documents=docs,\n",
|
||||
" embedding=embedding_function,\n",
|
||||
" session=session,\n",
|
||||
" keyspace=keyspace_name,\n",
|
||||
" table_name=table_name,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f509ee02",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## if you already have an index, you can load it and use it like this:\n",
|
||||
"\n",
|
||||
"# docsearch_preexisting = Cassandra(\n",
|
||||
"# embedding=embedding_function,\n",
|
||||
"# session=session,\n",
|
||||
"# keyspace=keyspace_name,\n",
|
||||
"# table_name=table_name,\n",
|
||||
"# )\n",
|
||||
"\n",
|
||||
"# docs = docsearch_preexisting.similarity_search(query, k=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9c608226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d46d1452",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Maximal Marginal Relevance Searches\n",
|
||||
"\n",
|
||||
"In addition to using similarity search in the retriever object, you can also use `mmr` as retriever.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a359ed74",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = docsearch.as_retriever(search_type=\"mmr\")\n",
|
||||
"matched_docs = retriever.get_relevant_documents(query)\n",
|
||||
"for i, d in enumerate(matched_docs):\n",
|
||||
" print(f\"\\n## Document {i}\\n\")\n",
|
||||
" print(d.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7c477287",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Or use `max_marginal_relevance_search` directly:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9ca82740",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"found_docs = docsearch.max_marginal_relevance_search(query, k=2, fetch_k=10)\n",
|
||||
"for i, doc in enumerate(found_docs):\n",
|
||||
" print(f\"{i + 1}.\", doc.page_content, \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "da791c5f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Metadata filtering\n",
|
||||
"\n",
|
||||
"You can specify filtering on metadata when running searches in the vector store. By default, when inserting documents, the only metadata is the `\"source\"` (but you can customize the metadata at insertion time).\n",
|
||||
"\n",
|
||||
"Since only one files was inserted, this is just a demonstration of how filters are passed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "93f132fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"filter = {\"source\": SOURCE_FILE_NAME}\n",
|
||||
"filtered_docs = docsearch.similarity_search(query, filter=filter, k=5)\n",
|
||||
"print(f\"{len(filtered_docs)} documents retrieved.\")\n",
|
||||
"print(f\"{filtered_docs[0].page_content[:64]} ...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1b413ec4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"filter = {\"source\": \"nonexisting_file.txt\"}\n",
|
||||
"filtered_docs2 = docsearch.similarity_search(query, filter=filter)\n",
|
||||
"print(f\"{len(filtered_docs2)} documents retrieved.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a0fea764",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Please visit the [cassIO documentation](https://cassio.org/frameworks/langchain/about/) for more on using vector stores with Langchain."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -104,14 +104,11 @@
|
||||
"source": [
|
||||
"from dingodb import DingoDB\n",
|
||||
"\n",
|
||||
"index_name = \"langchain_demo\"\n",
|
||||
"index_name = \"langchain-demo\"\n",
|
||||
"\n",
|
||||
"dingo_client = DingoDB(user=\"\", password=\"\", host=[\"127.0.0.1:13000\"])\n",
|
||||
"# First, check if our index already exists. If it doesn't, we create it\n",
|
||||
"if (\n",
|
||||
" index_name not in dingo_client.get_index()\n",
|
||||
" and index_name.upper() not in dingo_client.get_index()\n",
|
||||
"):\n",
|
||||
"if index_name not in dingo_client.get_index():\n",
|
||||
" # we create a new index, modify to your own\n",
|
||||
" dingo_client.create_index(\n",
|
||||
" index_name=index_name, dimension=1536, metric_type=\"cosine\", auto_id=False\n",
|
||||
|
||||
@@ -143,7 +143,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_functions\n",
|
||||
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser"
|
||||
]
|
||||
},
|
||||
@@ -157,7 +157,7 @@
|
||||
"agent = (\n",
|
||||
" {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_function_messages(\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_functions(\n",
|
||||
" x[\"intermediate_steps\"]\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
|
||||
@@ -115,7 +115,9 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "ba8e4cbe",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -252,7 +254,9 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "4362ebc7",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
@@ -454,7 +458,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,222 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e10aa932",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenAI tools\n",
|
||||
"\n",
|
||||
"With LCEL we can easily construc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ec89be68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# ! pip install -U openai duckduckgo-search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "82787d8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize tools\n",
|
||||
"\n",
|
||||
"We will first create some tools we can use"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b812b982",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import initialize_agent, AgentType, Tool\n",
|
||||
"from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages\n",
|
||||
"from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain.tools import DuckDuckGoSearchRun\n",
|
||||
"from langchain.tools.render import format_tool_to_openai_tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "23fc0aa6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-1106\")\n",
|
||||
"tools = [DuckDuckGoSearchRun()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39c3ba21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using LCEL\n",
|
||||
"\n",
|
||||
"We will first use LangChain Expression Language to create this agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "55292bed",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a helpful assistant\"),\n",
|
||||
" (\"user\", \"{input}\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"agent_scratchpad\"),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "552421b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind(tools=[format_tool_to_openai_tool(t)for t in tools])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "bf514eb4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = (\n",
|
||||
" {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_tool_messages(\n",
|
||||
" x[\"intermediate_steps\"]\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | llm_with_tools\n",
|
||||
" | OpenAIToolsAgentOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "5125573e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "bdc7e506",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "2cd65218",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `duckduckgo_search` with `weather in Los Angeles today`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3m60 °F like 60° Clear N 0 Today's temperature is forecast to be NEARLY THE SAME as yesterday. Radar Satellite WunderMap |Nexrad Today Wed 11/08 High 78 °F 0% Precip. / 0.00 in Sunny. High 78F.... Current Conditions Radar Forecasts Rivers and Lakes Climate and Past Weather Local Programs Fire Weather Show Caption Click a location below for detailed forecast. Last Map Update: Tue, Nov. 7, 2023 at 5:03:23 pm PST Watches, Warnings & Advisories Zoom Out Gale Warning Small Craft Advisory Wind Advisory Fire Weather Watch LOS ANGELES (KABC) -- Southern California will see mostly clear skies and mild temperatures on Monday. Los Angeles and Orange counties will see a few clouds in the morning, but they'll clear up... Storm No. 1: The first storm on Saturday will move quickly through Southern California, with most of the rain falling Saturday night. Light rain is possible late Friday for the Ventura County... Weather in Los Angeles today, California. Friday, 13 October 2023 . Day 1:00 PM +77°F . Broken cloud sky\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `duckduckgo_search` with `weather in New York City today`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mToday Hourly 10-Day Calendar History Wundermap access_time 11:38 PM EDT on October 31, 2023 (GMT -4) | Updated 22 hours ago --° | 49° 56 °F like 56° Rain Shower N 5 Gusts 7mph Radar Satellite... 49°F 9°C More Information: Local Forecast Office More Local Wx 3 Day History Mobile Weather Hourly Weather Forecast Extended Forecast for New York NY Similar City Names Overnight Mostly Cloudy Low: 48 °F Saturday Partly Sunny High: 58 °F Saturday Night Mostly Cloudy Low: 48 °F Sunday Mostly Sunny High: 64 °F Sunday Night Mostly Clear Low: 45 °F 13°C Get Detailed info Tonight Partly Cloudy Low: 47°F Sunday Mostly Sunny High: 62°F change location New York, NY Weather Forecast Office NWS Forecast Office New York, NY Weather.gov > New York, NY Current Hazards Current Conditions Radar Forecasts Rivers and Lakes Climate and Past Weather Local Programs The weather today in New York City will be comfortable with temperatures around 68°F. During the evening and night time the temperatures will drop to 48 ° F. For deep dive information check out our hourly weather forecast for today down the page. Temperature. 68 ° / 48 °. Chance of rain. 0. %. Today's Weather - New York, NY Nov 02, 2023 3:32 AM Churchill School -- Feels like -- Hi -- Lo -- -- Live Radar Weather Radar Map WEATHER DETAILS New York, NY Windchill -- Daily Rain -- Dew Point -- Monthly Rain -- Humidity -- Avg. Wind -- Pressure -- Wind Gust -- Sunrise -- Moon -- Sunset -- UV Index Low WEATHER FORECAST New York, NY\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `duckduckgo_search` with `weather in San Francisco today`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mWeather Underground provides local & long-range weather forecasts, weatherreports, maps & tropical weather conditions for the San Francisco area. ... Today Wed 11/08 High 67 ... Radar Forecasts Rivers and Lakes Climate and Past Weather Local Programs Click a location below for detailed forecast. Last Map Update: Wed, Nov. 8, 2023 at 5:03:31 am PST Watches, Warnings & Advisories Zoom Out Small Craft Advisory Frost Advisory Text Product Selector (Selected product opens in current window) Radar Forecasts Rivers and Lakes Climate and Past Weather Local Programs Rain Continues Monday Show Caption Click a location below for detailed forecast. Last Map Update: Tue, Nov. 7, 2023 at 1:05:27 am PST Watches, Warnings & Advisories Zoom Out Winter Weather Advisory Small Craft Advisory Gale Watch Today's and tonight's San Francisco, CA weather forecast, weather conditions and Doppler radar from The Weather Channel and Weather.com Nov 8, 2023. The National Weather Service forecast for the greater San Francisco Bay Area on Wednesday calls for sunny skies, with clouds expected to increase over some areas around the bay during ...\u001b[0m\u001b[32;1m\u001b[1;3mHere's the weather for today in Los Angeles, New York City, and San Francisco:\n",
|
||||
"\n",
|
||||
"- Los Angeles: The temperature is around 60°F with clear skies and mild temperatures. There are no precipitation forecasts for today.\n",
|
||||
"- New York City: The temperature is around 68°F during the day and will drop to 48°F in the evening and night. There is no chance of rain forecasted for today.\n",
|
||||
"- San Francisco: The temperature is expected to reach a high of 67°F with sunny skies. Clouds are expected to increase over some areas around the bay during the day.\n",
|
||||
"\n",
|
||||
"If you need more detailed information, feel free to ask!\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': \"What's the weather in LA, NYC, and SF today\",\n",
|
||||
" 'output': \"Here's the weather for today in Los Angeles, New York City, and San Francisco:\\n\\n- Los Angeles: The temperature is around 60°F with clear skies and mild temperatures. There are no precipitation forecasts for today.\\n- New York City: The temperature is around 68°F during the day and will drop to 48°F in the evening and night. There is no chance of rain forecasted for today.\\n- San Francisco: The temperature is expected to reach a high of 67°F with sunny skies. Clouds are expected to increase over some areas around the bay during the day.\\n\\nIf you need more detailed information, feel free to ask!\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.invoke(\n",
|
||||
" {\n",
|
||||
" \"input\": \"What's the weather in LA, NYC, and SF today\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c750177f-af6c-4617-b2aa-69e3f1153a3e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -205,7 +205,7 @@
|
||||
"\n",
|
||||
"- prompt: a simple prompt with placeholders for the user's question and then the `agent_scratchpad` (any intermediate steps)\n",
|
||||
"- tools: we can attach the tools and `Response` format to the LLM as functions\n",
|
||||
"- format scratchpad: in order to format the `agent_scratchpad` from intermediate steps, we will use the standard `format_to_openai_function_messages`. This takes intermediate steps and formats them as AIMessages and FunctionMessages.\n",
|
||||
"- format scratchpad: in order to format the `agent_scratchpad` from intermediate steps, we will use the standard `format_to_openai_functions`. This takes intermediate steps and formats them as AIMessages and FunctionMessages.\n",
|
||||
"- output parser: we will use our custom parser above to parse the response of the LLM\n",
|
||||
"- AgentExecutor: we will use the standard AgentExecutor to run the loop of agent-tool-agent-tool..."
|
||||
]
|
||||
@@ -220,7 +220,7 @@
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.tools.render import format_tool_to_openai_function\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_functions\n",
|
||||
"from langchain.agents import AgentExecutor"
|
||||
]
|
||||
},
|
||||
@@ -278,7 +278,7 @@
|
||||
" {\n",
|
||||
" \"input\": lambda x: x[\"input\"],\n",
|
||||
" # Format agent scratchpad from intermediate steps\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_function_messages(\n",
|
||||
" \"agent_scratchpad\": lambda x: format_to_openai_functions(\n",
|
||||
" x[\"intermediate_steps\"]\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
|
||||
@@ -157,11 +157,11 @@ We will import two last utility functions: a component for formatting intermedia
|
||||
|
||||
|
||||
```python
|
||||
from langchain.agents.format_scratchpad import format_to_openai_function_messages
|
||||
from langchain.agents.format_scratchpad import format_to_openai_functions
|
||||
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
|
||||
agent = {
|
||||
"input": lambda x: x["input"],
|
||||
"agent_scratchpad": lambda x: format_to_openai_function_messages(x['intermediate_steps'])
|
||||
"agent_scratchpad": lambda x: format_to_openai_functions(x['intermediate_steps'])
|
||||
} | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()
|
||||
```
|
||||
|
||||
@@ -287,7 +287,7 @@ We can then put it all together!
|
||||
```python
|
||||
agent = {
|
||||
"input": lambda x: x["input"],
|
||||
"agent_scratchpad": lambda x: format_to_openai_function_messages(x['intermediate_steps']),
|
||||
"agent_scratchpad": lambda x: format_to_openai_functions(x['intermediate_steps']),
|
||||
"chat_history": lambda x: x["chat_history"]
|
||||
} | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
||||
|
||||
@@ -58,9 +58,9 @@
|
||||
"1. Do not use with a store that has been pre-populated with content independently of the indexing API, as the record manager will not know that records have been inserted previously.\n",
|
||||
"2. Only works with LangChain `vectorstore`'s that support:\n",
|
||||
" * document addition by id (`add_documents` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with)\n",
|
||||
"\n",
|
||||
"Compatible Vectorstores: `AnalyticDB`, `AstraDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `DashVector`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `MyScale`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `ScaNN`, `SupabaseVectorStore`, `TimescaleVector`, `Vald`, `Vearch`, `VespaStore`, `Weaviate`, `ZepVectorStore`.\n",
|
||||
"Compatible Vectorstores: `AnalyticDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `DashVector`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `MyScale`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `ScaNN`, `SupabaseVectorStore`, `TimescaleVector`, `Vald`, `Vearch`, `VespaStore`, `Weaviate`, `ZepVectorStore`.\n",
|
||||
" \n",
|
||||
"## Caution\n",
|
||||
"\n",
|
||||
|
||||
@@ -524,6 +524,7 @@
|
||||
"from langchain.agents.agent_types import AgentType\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n",
|
||||
"llm = OpenAI(temperature=0, verbose=True)\n",
|
||||
"\n",
|
||||
"agent_executor = create_sql_agent(\n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
|
||||
@@ -159,12 +159,6 @@ const config = {
|
||||
sidebarId: "integrations",
|
||||
label: "Integrations",
|
||||
},
|
||||
{
|
||||
type: "docSidebar",
|
||||
position: "left",
|
||||
sidebarId: "guides",
|
||||
label: "Guides",
|
||||
},
|
||||
{
|
||||
href: "https://api.python.langchain.com",
|
||||
label: "API",
|
||||
@@ -198,31 +192,30 @@ const config = {
|
||||
{ label: "Gallery", href: "https://github.com/kyrolabs/awesome-langchain" }
|
||||
]
|
||||
},
|
||||
{
|
||||
href: "https://chat.langchain.com",
|
||||
label: "Chat our docs",
|
||||
position: "right",
|
||||
},
|
||||
{
|
||||
type: "dropdown",
|
||||
label: "Also by LangChain",
|
||||
position: "right",
|
||||
items: [
|
||||
{
|
||||
href: "https://smith.langchain.com",
|
||||
label: "LangSmith",
|
||||
href: "https://chat.langchain.com",
|
||||
label: "Chat our docs",
|
||||
},
|
||||
{
|
||||
href: "https://github.com/langchain-ai/langserve",
|
||||
label: "LangServe GitHub",
|
||||
href: "https://smith.langchain.com",
|
||||
label: "LangSmith",
|
||||
},
|
||||
{
|
||||
href: "https://smith.langchain.com/hub",
|
||||
label: "LangChain Hub",
|
||||
},
|
||||
{
|
||||
href: "https://js.langchain.com",
|
||||
label: "JS/TS Docs",
|
||||
href: "https://github.com/langchain-ai/langserve",
|
||||
label: "LangServe",
|
||||
},
|
||||
{
|
||||
href: "https://js.langchain.com/docs",
|
||||
label: "JS/TS",
|
||||
},
|
||||
]
|
||||
},
|
||||
|
||||
6
docs/package-lock.json
generated
6
docs/package-lock.json
generated
@@ -10693,9 +10693,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/postcss": {
|
||||
"version": "8.4.31",
|
||||
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
|
||||
"integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==",
|
||||
"version": "8.4.26",
|
||||
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.26.tgz",
|
||||
"integrity": "sha512-jrXHFF8iTloAenySjM/ob3gSj7pCu0Ji49hnjqzsgSRa50hkWCKD0HQ+gMNJkW38jBI68MpAAg7ZWwHwX8NMMw==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "opencollective",
|
||||
|
||||
@@ -26,7 +26,7 @@ module.exports = {
|
||||
label: "Get started",
|
||||
collapsed: false,
|
||||
collapsible: false,
|
||||
items: [{ type: "autogenerated", dirName: "get_started" }, "security"],
|
||||
items: [{ type: "autogenerated", dirName: "get_started" }],
|
||||
link: {
|
||||
type: 'generated-index',
|
||||
description: 'Get started with LangChain',
|
||||
@@ -46,24 +46,29 @@ module.exports = {
|
||||
{
|
||||
type: "category",
|
||||
label: "Modules",
|
||||
collapsed: true,
|
||||
collapsed: false,
|
||||
items: [{ type: "autogenerated", dirName: "modules" } ],
|
||||
link: {
|
||||
type: 'doc',
|
||||
id: "modules/index"
|
||||
},
|
||||
},
|
||||
{type: "doc", id: "langserve", label: "LangServe"},
|
||||
{
|
||||
type: "doc",
|
||||
label: "Security",
|
||||
id: "security",
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "LangSmith",
|
||||
label: "Guides",
|
||||
collapsed: true,
|
||||
items: [{ type: "autogenerated", dirName: "langsmith" } ],
|
||||
items: [{ type: "autogenerated", dirName: "guides" }],
|
||||
link: {
|
||||
type: 'doc',
|
||||
id: "langsmith/index"
|
||||
type: 'generated-index',
|
||||
description: 'Design guides for key parts of the development process',
|
||||
slug: "guides",
|
||||
},
|
||||
},
|
||||
}
|
||||
],
|
||||
integrations: [
|
||||
{
|
||||
@@ -106,7 +111,4 @@ module.exports = {
|
||||
use_cases: [
|
||||
{type: "autogenerated", dirName: "use_cases" }
|
||||
],
|
||||
guides: [
|
||||
{type: "autogenerated", dirName: "guides" }
|
||||
],
|
||||
};
|
||||
|
||||
BIN
docs/static/img/langchain_stack.png
vendored
BIN
docs/static/img/langchain_stack.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 1.6 MiB |
@@ -1,13 +1,5 @@
|
||||
{
|
||||
"redirects": [
|
||||
{
|
||||
"source": "/docs/guides/langsmith(/?)",
|
||||
"destination": "/docs/langsmith/"
|
||||
},
|
||||
{
|
||||
"source": "/docs/guides/langsmith/walkthrough",
|
||||
"destination": "/docs/langsmith/walkthrough"
|
||||
},
|
||||
{
|
||||
"source": "/docs/modules/data_connection/retrievers/self_query/:path*",
|
||||
"destination": "/docs/integrations/retrievers/self_query/:path*"
|
||||
@@ -422,15 +414,7 @@
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/cassandra",
|
||||
"destination": "/docs/integrations/providers/astradb"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/providers/cassandra",
|
||||
"destination": "/docs/integrations/providers/astradb"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/vectorstores/cassandra",
|
||||
"destination": "/docs/integrations/vectorstores/astradb"
|
||||
"destination": "/docs/integrations/providers/cassandra"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/cerebriumai",
|
||||
|
||||
@@ -50,5 +50,5 @@ python3.11 scripts/model_feat_table.py
|
||||
nbdoc_build --srcdir docs
|
||||
cp ../cookbook/README.md src/pages/cookbook.mdx
|
||||
cp ../.github/CONTRIBUTING.md docs/contributing.md
|
||||
wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
|
||||
wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/guides/deployments/langserve.md
|
||||
python3.11 scripts/generate_api_reference_links.py
|
||||
|
||||
@@ -5,9 +5,8 @@ from typing_extensions import Annotated
|
||||
|
||||
from langchain_cli.namespaces import app as app_namespace
|
||||
from langchain_cli.namespaces import template as template_namespace
|
||||
from langchain_cli.utils.packages import get_langserve_export, get_package_root
|
||||
|
||||
__version__ = "0.0.16"
|
||||
__version__ = "0.0.13"
|
||||
|
||||
app = typer.Typer(no_args_is_help=True, add_completion=False)
|
||||
app.add_typer(
|
||||
@@ -50,17 +49,11 @@ def serve(
|
||||
Start the LangServe app, whether it's a template or an app.
|
||||
"""
|
||||
|
||||
# see if is a template
|
||||
# try starting template package, if error, try langserve
|
||||
try:
|
||||
project_dir = get_package_root()
|
||||
pyproject = project_dir / "pyproject.toml"
|
||||
get_langserve_export(pyproject)
|
||||
except KeyError:
|
||||
# not a template
|
||||
app_namespace.serve(port=port, host=host)
|
||||
else:
|
||||
# is a template
|
||||
template_namespace.serve(port=port, host=host)
|
||||
except KeyError:
|
||||
app_namespace.serve(port=port, host=host)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -6,8 +6,9 @@ from typing import Sequence
|
||||
|
||||
from fastapi import FastAPI
|
||||
from langserve import add_routes
|
||||
from langserve.packages import get_langserve_export
|
||||
|
||||
from langchain_cli.utils.packages import get_langserve_export, get_package_root
|
||||
from langchain_cli.utils.packages import get_package_root
|
||||
|
||||
|
||||
def create_demo_server(
|
||||
|
||||
@@ -4,11 +4,11 @@ Manage LangChain apps
|
||||
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import typer
|
||||
from langserve.packages import get_langserve_export
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from langchain_cli.utils.events import create_events
|
||||
@@ -18,15 +18,7 @@ from langchain_cli.utils.git import (
|
||||
parse_dependencies,
|
||||
update_repo,
|
||||
)
|
||||
from langchain_cli.utils.packages import (
|
||||
LangServeExport,
|
||||
get_langserve_export,
|
||||
get_package_root,
|
||||
)
|
||||
from langchain_cli.utils.pyproject import (
|
||||
add_dependencies_to_pyproject_toml,
|
||||
remove_dependencies_from_pyproject_toml,
|
||||
)
|
||||
from langchain_cli.utils.packages import get_package_root
|
||||
|
||||
REPO_DIR = Path(typer.get_app_dir("langchain")) / "git_repos"
|
||||
|
||||
@@ -41,25 +33,10 @@ def new(
|
||||
Optional[List[str]],
|
||||
typer.Option(help="Packages to seed the project with"),
|
||||
] = None,
|
||||
pip: Annotated[
|
||||
Optional[bool],
|
||||
typer.Option(
|
||||
"--pip/--no-pip",
|
||||
help="Pip install the template(s) as editable dependencies",
|
||||
is_flag=True,
|
||||
),
|
||||
] = None,
|
||||
):
|
||||
"""
|
||||
Create a new LangServe application.
|
||||
"""
|
||||
has_packages = package is not None and len(package) > 0
|
||||
pip_bool = False
|
||||
if pip is None and has_packages:
|
||||
pip_bool = typer.confirm(
|
||||
"Would you like to `pip install -e` the template(s)?",
|
||||
default=False,
|
||||
)
|
||||
# copy over template from ../project_template
|
||||
project_template_dir = Path(__file__).parents[1] / "project_template"
|
||||
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
|
||||
@@ -71,8 +48,8 @@ def new(
|
||||
readme.write_text(readme_contents.replace("__app_name__", app_name))
|
||||
|
||||
# add packages if specified
|
||||
if has_packages:
|
||||
add(package, project_dir=destination_dir, pip=pip_bool)
|
||||
if package is not None and len(package) > 0:
|
||||
add(package, project_dir=destination_dir)
|
||||
|
||||
|
||||
@app_cli.command()
|
||||
@@ -92,15 +69,6 @@ def add(
|
||||
branch: Annotated[
|
||||
List[str], typer.Option(help="Install templates from a specific branch")
|
||||
] = [],
|
||||
pip: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
"--pip/--no-pip",
|
||||
help="Pip install the template(s) as editable dependencies",
|
||||
is_flag=True,
|
||||
prompt="Would you like to `pip install -e` the template(s)?",
|
||||
),
|
||||
],
|
||||
):
|
||||
"""
|
||||
Adds the specified template to the current LangServe app.
|
||||
@@ -129,8 +97,7 @@ def add(
|
||||
grouped[key_tup] = lst
|
||||
|
||||
installed_destination_paths: List[Path] = []
|
||||
installed_destination_names: List[str] = []
|
||||
installed_exports: List[LangServeExport] = []
|
||||
installed_exports: List[Dict] = []
|
||||
|
||||
for (git, ref), group_deps in grouped.items():
|
||||
if len(group_deps) == 1:
|
||||
@@ -163,116 +130,76 @@ def add(
|
||||
copy_repo(source_path, destination_path)
|
||||
typer.echo(f" - Downloaded {dep['subdirectory']} to {inner_api_path}")
|
||||
installed_destination_paths.append(destination_path)
|
||||
installed_destination_names.append(inner_api_path)
|
||||
installed_exports.append(langserve_export)
|
||||
|
||||
if len(installed_destination_paths) == 0:
|
||||
typer.echo("No packages installed. Exiting.")
|
||||
return
|
||||
|
||||
try:
|
||||
add_dependencies_to_pyproject_toml(
|
||||
project_root / "pyproject.toml",
|
||||
zip(installed_destination_names, installed_destination_paths),
|
||||
)
|
||||
except Exception:
|
||||
# Can fail if user modified/removed pyproject.toml
|
||||
typer.echo("Failed to add dependencies to pyproject.toml, continuing...")
|
||||
cwd = Path.cwd()
|
||||
installed_desination_strs = [
|
||||
str(p.relative_to(cwd)) for p in installed_destination_paths
|
||||
]
|
||||
cmd = ["pip", "install", "-e"] + installed_desination_strs
|
||||
cmd_str = " \\\n ".join(installed_desination_strs)
|
||||
install_str = f"To install:\n\npip install -e \\\n {cmd_str}"
|
||||
typer.echo(install_str)
|
||||
|
||||
try:
|
||||
cwd = Path.cwd()
|
||||
installed_destination_strs = [
|
||||
str(p.relative_to(cwd)) for p in installed_destination_paths
|
||||
if typer.confirm("Run it?"):
|
||||
subprocess.run(cmd, cwd=cwd)
|
||||
|
||||
if typer.confirm("\nGenerate route code for these packages?", default=True):
|
||||
chain_names = []
|
||||
for e in installed_exports:
|
||||
original_candidate = f'{e["package_name"].replace("-", "_")}_chain'
|
||||
candidate = original_candidate
|
||||
i = 2
|
||||
while candidate in chain_names:
|
||||
candidate = original_candidate + "_" + str(i)
|
||||
i += 1
|
||||
chain_names.append(candidate)
|
||||
|
||||
api_paths = [
|
||||
str(Path("/") / path.relative_to(package_dir))
|
||||
for path in installed_destination_paths
|
||||
]
|
||||
except ValueError:
|
||||
# Can fail if the cwd is not a parent of the package
|
||||
typer.echo("Failed to print install command, continuing...")
|
||||
else:
|
||||
if pip:
|
||||
cmd = ["pip", "install", "-e"] + installed_destination_strs
|
||||
cmd_str = " \\\n ".join(installed_destination_strs)
|
||||
typer.echo(f"Running: pip install -e \\\n {cmd_str}")
|
||||
subprocess.run(cmd, cwd=cwd)
|
||||
|
||||
chain_names = []
|
||||
for e in installed_exports:
|
||||
original_candidate = f'{e["package_name"].replace("-", "_")}_chain'
|
||||
candidate = original_candidate
|
||||
i = 2
|
||||
while candidate in chain_names:
|
||||
candidate = original_candidate + "_" + str(i)
|
||||
i += 1
|
||||
chain_names.append(candidate)
|
||||
imports = [
|
||||
f"from {e['module']} import {e['attr']} as {name}"
|
||||
for e, name in zip(installed_exports, chain_names)
|
||||
]
|
||||
routes = [
|
||||
f'add_routes(app, {name}, path="{path}")'
|
||||
for name, path in zip(chain_names, api_paths)
|
||||
]
|
||||
|
||||
api_paths = [
|
||||
str(Path("/") / path.relative_to(package_dir))
|
||||
for path in installed_destination_paths
|
||||
]
|
||||
|
||||
imports = [
|
||||
f"from {e['module']} import {e['attr']} as {name}"
|
||||
for e, name in zip(installed_exports, chain_names)
|
||||
]
|
||||
routes = [
|
||||
f'add_routes(app, {name}, path="{path}")'
|
||||
for name, path in zip(chain_names, api_paths)
|
||||
]
|
||||
|
||||
t = (
|
||||
"this template"
|
||||
if len(chain_names) == 1
|
||||
else f"these {len(chain_names)} templates"
|
||||
)
|
||||
lines = (
|
||||
["", f"To use {t}, add the following to your app:\n\n```", ""]
|
||||
+ imports
|
||||
+ [""]
|
||||
+ routes
|
||||
+ ["```"]
|
||||
)
|
||||
typer.echo("\n".join(lines))
|
||||
lines = (
|
||||
["", "Great! Add the following to your app:\n\n```", ""]
|
||||
+ imports
|
||||
+ [""]
|
||||
+ routes
|
||||
+ ["```"]
|
||||
)
|
||||
typer.echo("\n".join(lines))
|
||||
|
||||
|
||||
@app_cli.command()
|
||||
def remove(
|
||||
api_paths: Annotated[List[str], typer.Argument(help="The API paths to remove")],
|
||||
*,
|
||||
project_dir: Annotated[
|
||||
Optional[Path], typer.Option(help="The project directory")
|
||||
] = None,
|
||||
):
|
||||
"""
|
||||
Removes the specified package from the current LangServe app.
|
||||
"""
|
||||
|
||||
project_root = get_package_root(project_dir)
|
||||
|
||||
project_pyproject = project_root / "pyproject.toml"
|
||||
|
||||
package_root = project_root / "packages"
|
||||
|
||||
remove_deps: List[str] = []
|
||||
|
||||
for api_path in api_paths:
|
||||
package_dir = package_root / api_path
|
||||
package_dir = Path.cwd() / "packages" / api_path
|
||||
if not package_dir.exists():
|
||||
typer.echo(f"Package {api_path} does not exist. Skipping...")
|
||||
typer.echo(f"Endpoint {api_path} does not exist. Skipping...")
|
||||
continue
|
||||
try:
|
||||
pyproject = package_dir / "pyproject.toml"
|
||||
langserve_export = get_langserve_export(pyproject)
|
||||
typer.echo(f"Removing {langserve_export['package_name']}...")
|
||||
pyproject = package_dir / "pyproject.toml"
|
||||
langserve_export = get_langserve_export(pyproject)
|
||||
typer.echo(f"Removing {langserve_export['package_name']}...")
|
||||
|
||||
shutil.rmtree(package_dir)
|
||||
remove_deps.append(api_path)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
remove_dependencies_from_pyproject_toml(project_pyproject, remove_deps)
|
||||
except Exception:
|
||||
# Can fail if user modified/removed pyproject.toml
|
||||
typer.echo("Failed to remove dependencies from pyproject.toml.")
|
||||
shutil.rmtree(package_dir)
|
||||
|
||||
|
||||
@app_cli.command()
|
||||
@@ -292,14 +219,9 @@ def serve(
|
||||
Starts the LangServe app.
|
||||
"""
|
||||
|
||||
# add current dir as first entry of path
|
||||
sys.path.append(str(Path.cwd()))
|
||||
|
||||
app_str = app if app is not None else "app.server:app"
|
||||
port_str = str(port) if port is not None else "8000"
|
||||
host_str = host if host is not None else "127.0.0.1"
|
||||
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(
|
||||
app_str, host=host_str, port=port if port is not None else 8000, reload=True
|
||||
)
|
||||
cmd = ["uvicorn", app_str, "--reload", "--port", port_str, "--host", host_str]
|
||||
subprocess.run(cmd)
|
||||
|
||||
@@ -9,9 +9,10 @@ from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import typer
|
||||
from langserve.packages import get_langserve_export
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from langchain_cli.utils.packages import get_langserve_export, get_package_root
|
||||
from langchain_cli.utils.packages import get_package_root
|
||||
|
||||
package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
|
||||
|
||||
@@ -66,11 +67,6 @@ def new(
|
||||
package_dir = destination_dir / module_name
|
||||
shutil.move(destination_dir / "package_template", package_dir)
|
||||
|
||||
# update init
|
||||
init = package_dir / "__init__.py"
|
||||
init_contents = init.read_text()
|
||||
init.write_text(init_contents.replace("__module_name__", module_name))
|
||||
|
||||
# replace readme
|
||||
readme = destination_dir / "README.md"
|
||||
readme_contents = readme.read_text()
|
||||
@@ -112,6 +108,7 @@ def serve(
|
||||
# get langserve export - throws KeyError if invalid
|
||||
get_langserve_export(pyproject)
|
||||
|
||||
port_str = str(port) if port is not None else "8000"
|
||||
host_str = host if host is not None else "127.0.0.1"
|
||||
|
||||
script = (
|
||||
@@ -120,12 +117,14 @@ def serve(
|
||||
else "langchain_cli.dev_scripts:create_demo_server_configurable"
|
||||
)
|
||||
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(
|
||||
command = [
|
||||
"uvicorn",
|
||||
"--factory",
|
||||
script,
|
||||
factory=True,
|
||||
reload=True,
|
||||
port=port if port is not None else 8000,
|
||||
host=host_str,
|
||||
)
|
||||
"--reload",
|
||||
"--port",
|
||||
port_str,
|
||||
"--host",
|
||||
host_str,
|
||||
]
|
||||
subprocess.run(command)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
__pycache__
|
||||
@@ -11,7 +11,7 @@ TODO: What environment variables need to be set (if any)
|
||||
To use this package, you should first have the LangChain CLI installed:
|
||||
|
||||
```shell
|
||||
pip install -U langchain-cli
|
||||
pip install -U "langchain-cli[serve]"
|
||||
```
|
||||
|
||||
To create a new LangChain project and install this as the only package, you can do:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from __module_name__.chain import chain
|
||||
from chain import chain
|
||||
|
||||
__all__ = ["chain"]
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
__pycache__
|
||||
@@ -5,7 +5,7 @@
|
||||
Install the LangChain CLI if you haven't yet
|
||||
|
||||
```bash
|
||||
pip install -U langchain-cli
|
||||
pip install -U "langchain-cli[serve]"
|
||||
```
|
||||
|
||||
## Adding packages
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tool.poetry]
|
||||
name = "__app_name__"
|
||||
name = "langservehub-template"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
@@ -7,12 +7,16 @@ readme = "README.md"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.11"
|
||||
sse-starlette = "^1.6.5"
|
||||
tomli-w = "^1.0.0"
|
||||
uvicorn = "^0.23.2"
|
||||
langserve = {extras = ["server"], version = ">=0.0.22"}
|
||||
|
||||
fastapi = "^0.103.2"
|
||||
langserve = ">=0.0.16"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
langchain-cli = ">=0.0.15"
|
||||
uvicorn = "^0.23.2"
|
||||
pygithub = "^2.1.1"
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Set, TypedDict
|
||||
|
||||
from tomlkit import load
|
||||
from typing import Optional, Set
|
||||
|
||||
|
||||
def get_package_root(cwd: Optional[Path] = None) -> Path:
|
||||
@@ -16,30 +14,3 @@ def get_package_root(cwd: Optional[Path] = None) -> Path:
|
||||
return package_root
|
||||
package_root = package_root.parent
|
||||
raise FileNotFoundError("No pyproject.toml found")
|
||||
|
||||
|
||||
class LangServeExport(TypedDict):
|
||||
"""
|
||||
Fields from pyproject.toml that are relevant to LangServe
|
||||
|
||||
Attributes:
|
||||
module: The module to import from, tool.langserve.export_module
|
||||
attr: The attribute to import from the module, tool.langserve.export_attr
|
||||
package_name: The name of the package, tool.poetry.name
|
||||
"""
|
||||
|
||||
module: str
|
||||
attr: str
|
||||
package_name: str
|
||||
|
||||
|
||||
def get_langserve_export(filepath: Path) -> LangServeExport:
|
||||
with open(filepath) as f:
|
||||
data: Dict[str, Any] = load(f)
|
||||
try:
|
||||
module = data["tool"]["langserve"]["export_module"]
|
||||
attr = data["tool"]["langserve"]["export_attr"]
|
||||
package_name = data["tool"]["poetry"]["name"]
|
||||
except KeyError as e:
|
||||
raise KeyError("Invalid LangServe PyProject.toml") from e
|
||||
return LangServeExport(module=module, attr=attr, package_name=package_name)
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterable
|
||||
|
||||
from tomlkit import dump, inline_table, load
|
||||
from tomlkit.items import InlineTable
|
||||
|
||||
|
||||
def _get_dep_inline_table(path: Path) -> InlineTable:
|
||||
dep = inline_table()
|
||||
dep.update({"path": str(path), "develop": True})
|
||||
return dep
|
||||
|
||||
|
||||
def add_dependencies_to_pyproject_toml(
|
||||
pyproject_toml: Path, local_editable_dependencies: Iterable[tuple[str, Path]]
|
||||
) -> None:
|
||||
"""Add dependencies to pyproject.toml."""
|
||||
with open(pyproject_toml, encoding="utf-8") as f:
|
||||
# tomlkit types aren't amazing - treat as Dict instead
|
||||
pyproject: Dict[str, Any] = load(f)
|
||||
pyproject["tool"]["poetry"]["dependencies"].update(
|
||||
{
|
||||
name: _get_dep_inline_table(loc.relative_to(pyproject_toml.parent))
|
||||
for name, loc in local_editable_dependencies
|
||||
}
|
||||
)
|
||||
with open(pyproject_toml, "w", encoding="utf-8") as f:
|
||||
dump(pyproject, f)
|
||||
|
||||
|
||||
def remove_dependencies_from_pyproject_toml(
|
||||
pyproject_toml: Path, local_editable_dependencies: Iterable[str]
|
||||
) -> None:
|
||||
"""Remove dependencies from pyproject.toml."""
|
||||
with open(pyproject_toml, encoding="utf-8") as f:
|
||||
pyproject: Dict[str, Any] = load(f)
|
||||
# tomlkit types aren't amazing - treat as Dict instead
|
||||
dependencies = pyproject["tool"]["poetry"]["dependencies"]
|
||||
for name in local_editable_dependencies:
|
||||
try:
|
||||
del dependencies[name]
|
||||
except KeyError:
|
||||
pass
|
||||
with open(pyproject_toml, "w", encoding="utf-8") as f:
|
||||
dump(pyproject, f)
|
||||
401
libs/cli/poetry.lock
generated
401
libs/cli/poetry.lock
generated
@@ -4,7 +4,7 @@
|
||||
name = "aiohttp"
|
||||
version = "3.8.6"
|
||||
description = "Async http client/server framework (asyncio)"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:41d55fc043954cddbbd82503d9cc3f4814a40bcef30b3569bc7b5e34130718c1"},
|
||||
@@ -112,7 +112,7 @@ speedups = ["Brotli", "aiodns", "cchardet"]
|
||||
name = "aiosignal"
|
||||
version = "1.3.1"
|
||||
description = "aiosignal: a list of registered asynchronous callbacks"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
|
||||
@@ -147,7 +147,7 @@ trio = ["trio (<0.22)"]
|
||||
name = "async-timeout"
|
||||
version = "4.0.3"
|
||||
description = "Timeout context manager for asyncio programs"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
|
||||
@@ -158,7 +158,7 @@ files = [
|
||||
name = "attrs"
|
||||
version = "23.1.0"
|
||||
description = "Classes Without Boilerplate"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
|
||||
@@ -176,7 +176,7 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte
|
||||
name = "certifi"
|
||||
version = "2023.7.22"
|
||||
description = "Python package for providing Mozilla's CA Bundle."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
|
||||
@@ -185,101 +185,101 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "charset-normalizer"
|
||||
version = "3.3.2"
|
||||
version = "3.3.1"
|
||||
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7.0"
|
||||
files = [
|
||||
{file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
|
||||
{file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
|
||||
{file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
|
||||
{file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
|
||||
{file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
|
||||
{file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
|
||||
{file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
|
||||
{file = "charset-normalizer-3.3.1.tar.gz", hash = "sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-win32.whl", hash = "sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f"},
|
||||
{file = "charset_normalizer-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-win32.whl", hash = "sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8"},
|
||||
{file = "charset_normalizer-3.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-win32.whl", hash = "sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61"},
|
||||
{file = "charset_normalizer-3.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63a6f59e2d01310f754c270e4a257426fe5a591dc487f1983b3bbe793cf6bac6"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d6bfc32a68bc0933819cfdfe45f9abc3cae3877e1d90aac7259d57e6e0f85b1"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f3100d86dcd03c03f7e9c3fdb23d92e32abbca07e7c13ebd7ddfbcb06f5991f"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39b70a6f88eebe239fa775190796d55a33cfb6d36b9ffdd37843f7c4c1b5dc67"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e12f8ee80aa35e746230a2af83e81bd6b52daa92a8afaef4fea4a2ce9b9f4fa"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b6cefa579e1237ce198619b76eaa148b71894fb0d6bcf9024460f9bf30fd228"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:61f1e3fb621f5420523abb71f5771a204b33c21d31e7d9d86881b2cffe92c47c"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4f6e2a839f83a6a76854d12dbebde50e4b1afa63e27761549d006fa53e9aa80e"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:1ec937546cad86d0dce5396748bf392bb7b62a9eeb8c66efac60e947697f0e58"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:82ca51ff0fc5b641a2d4e1cc8c5ff108699b7a56d7f3ad6f6da9dbb6f0145b48"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:633968254f8d421e70f91c6ebe71ed0ab140220469cf87a9857e21c16687c034"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-win32.whl", hash = "sha256:c0c72d34e7de5604df0fde3644cc079feee5e55464967d10b24b1de268deceb9"},
|
||||
{file = "charset_normalizer-3.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:63accd11149c0f9a99e3bc095bbdb5a464862d77a7e309ad5938fbc8721235ae"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-win32.whl", hash = "sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb"},
|
||||
{file = "charset_normalizer-3.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-win32.whl", hash = "sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4"},
|
||||
{file = "charset_normalizer-3.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727"},
|
||||
{file = "charset_normalizer-3.3.1-py3-none-any.whl", hash = "sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -311,7 +311,7 @@ files = [
|
||||
name = "dataclasses-json"
|
||||
version = "0.6.1"
|
||||
description = "Easily serialize dataclasses to and from JSON."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7,<4.0"
|
||||
files = [
|
||||
{file = "dataclasses_json-0.6.1-py3-none-any.whl", hash = "sha256:1bd8418a61fe3d588bb0079214d7fb71d44937da40742b787256fd53b26b6c80"},
|
||||
@@ -348,13 +348,13 @@ test = ["pytest (>=6)"]
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.104.1"
|
||||
version = "0.104.0"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "fastapi-0.104.1-py3-none-any.whl", hash = "sha256:752dc31160cdbd0436bb93bad51560b57e525cbb1d4bbf6f4904ceee75548241"},
|
||||
{file = "fastapi-0.104.1.tar.gz", hash = "sha256:e5e4540a7c5e1dcfbbcf5b903c234feddcdcd881f191977a1c5dfd917487e7ae"},
|
||||
{file = "fastapi-0.104.0-py3-none-any.whl", hash = "sha256:456482c1178fb7beb2814b88e1885bc49f9a81f079665016feffe3e1c6a7663e"},
|
||||
{file = "fastapi-0.104.0.tar.gz", hash = "sha256:9c44de45693ae037b0c6914727a29c49a40668432b67c859a87851fc6a7b74c6"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -370,7 +370,7 @@ all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)"
|
||||
name = "frozenlist"
|
||||
version = "1.4.0"
|
||||
description = "A list-like structure which implements collections.abc.MutableSequence"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"},
|
||||
@@ -471,7 +471,7 @@ test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre
|
||||
name = "greenlet"
|
||||
version = "3.0.1"
|
||||
description = "Lightweight in-process concurrent programming"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "greenlet-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f89e21afe925fcfa655965ca8ea10f24773a1791400989ff32f467badfe4a064"},
|
||||
@@ -550,40 +550,39 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "httpcore"
|
||||
version = "1.0.1"
|
||||
version = "0.18.0"
|
||||
description = "A minimal low-level HTTP client."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "httpcore-1.0.1-py3-none-any.whl", hash = "sha256:c5e97ef177dca2023d0b9aad98e49507ef5423e9f1d94ffe2cfe250aa28e63b0"},
|
||||
{file = "httpcore-1.0.1.tar.gz", hash = "sha256:fce1ddf9b606cfb98132ab58865c3728c52c8e4c3c46e2aabb3674464a186e92"},
|
||||
{file = "httpcore-0.18.0-py3-none-any.whl", hash = "sha256:adc5398ee0a476567bf87467063ee63584a8bce86078bf748e48754f60202ced"},
|
||||
{file = "httpcore-0.18.0.tar.gz", hash = "sha256:13b5e5cd1dca1a6636a6aaea212b19f4f85cd88c366a2b82304181b769aab3c9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
anyio = ">=3.0,<5.0"
|
||||
certifi = "*"
|
||||
h11 = ">=0.13,<0.15"
|
||||
sniffio = "==1.*"
|
||||
|
||||
[package.extras]
|
||||
asyncio = ["anyio (>=4.0,<5.0)"]
|
||||
http2 = ["h2 (>=3,<5)"]
|
||||
socks = ["socksio (==1.*)"]
|
||||
trio = ["trio (>=0.22.0,<0.23.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "httpx"
|
||||
version = "0.25.1"
|
||||
version = "0.25.0"
|
||||
description = "The next generation HTTP client."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "httpx-0.25.1-py3-none-any.whl", hash = "sha256:fec7d6cc5c27c578a391f7e87b9aa7d3d8fbcd034f6399f9f79b45bcc12a866a"},
|
||||
{file = "httpx-0.25.1.tar.gz", hash = "sha256:ffd96d5cf901e63863d9f1b4b6807861dbea4d301613415d9e6e57ead15fc5d0"},
|
||||
{file = "httpx-0.25.0-py3-none-any.whl", hash = "sha256:181ea7f8ba3a82578be86ef4171554dd45fec26a02556a744db029a0a27b7100"},
|
||||
{file = "httpx-0.25.0.tar.gz", hash = "sha256:47ecda285389cb32bb2691cc6e069e3ab0205956f681c5b2ad2325719751d875"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
anyio = "*"
|
||||
certifi = "*"
|
||||
httpcore = "*"
|
||||
httpcore = ">=0.18.0,<0.19.0"
|
||||
idna = "*"
|
||||
sniffio = "*"
|
||||
|
||||
@@ -597,7 +596,7 @@ socks = ["socksio (==1.*)"]
|
||||
name = "httpx-sse"
|
||||
version = "0.3.1"
|
||||
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "httpx-sse-0.3.1.tar.gz", hash = "sha256:3bb3289b2867f50cbdb2fee3eeeefecb1e86653122e164faac0023f1ffc88aea"},
|
||||
@@ -630,7 +629,7 @@ files = [
|
||||
name = "jsonpatch"
|
||||
version = "1.33"
|
||||
description = "Apply JSON-Patches (RFC 6902)"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
|
||||
files = [
|
||||
{file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"},
|
||||
@@ -644,7 +643,7 @@ jsonpointer = ">=1.9"
|
||||
name = "jsonpointer"
|
||||
version = "2.4"
|
||||
description = "Identify specific nodes in a JSON document (RFC 6901)"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
|
||||
files = [
|
||||
{file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"},
|
||||
@@ -653,13 +652,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "0.0.331"
|
||||
version = "0.0.325"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
files = [
|
||||
{file = "langchain-0.0.331-py3-none-any.whl", hash = "sha256:64e6e1a57b8deafc1c4e914820b2b8e22a5eed60d49432cadc3b8cca9d613694"},
|
||||
{file = "langchain-0.0.331.tar.gz", hash = "sha256:b1ac365faf7fe413d5aa38329f70f23589ed07152c1a1398a5f16319eb32beb6"},
|
||||
{file = "langchain-0.0.325-py3-none-any.whl", hash = "sha256:666bc3614cf323bd09046f0a50eb6bdb86c8b0f1c65edb7163384877f3423b82"},
|
||||
{file = "langchain-0.0.325.tar.gz", hash = "sha256:423bbbf6706486100e56b111206e5ba8d50b41b07e94ebf9946413d5fd038d9d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -677,14 +676,14 @@ SQLAlchemy = ">=1.4,<3"
|
||||
tenacity = ">=8.1.0,<9.0.0"
|
||||
|
||||
[package.extras]
|
||||
all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.9,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=4,<5)", "deeplake (>=3.8.3,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "librosa (>=0.10.0.post2,<0.11.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=1.2.4,<2.0.0)", "momento (>=1.10.1,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<4)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "python-arango (>=7.5.9,<8.0.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.6.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
|
||||
all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.9,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=4,<5)", "deeplake (>=3.6.8,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "libdeeplake (>=0.0.60,<0.0.61)", "librosa (>=0.10.0.post2,<0.11.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=1.2.4,<2.0.0)", "momento (>=1.10.1,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<4)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "python-arango (>=7.5.9,<8.0.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.6.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
|
||||
azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (>=0,<1)"]
|
||||
clarifai = ["clarifai (>=9.1.0)"]
|
||||
cli = ["typer (>=0.9.0,<0.10.0)"]
|
||||
cohere = ["cohere (>=4,<5)"]
|
||||
docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"]
|
||||
embeddings = ["sentence-transformers (>=2,<3)"]
|
||||
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "dashvector (>=1.0.1,<2.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.6.0,<0.7.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (>=0,<1)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
|
||||
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amazon-textract-caller (<2)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "dashvector (>=1.0.1,<2.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (>=0,<1)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
|
||||
javascript = ["esprima (>=4.0.1,<5.0.0)"]
|
||||
llms = ["clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
|
||||
openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.6.0)"]
|
||||
@@ -693,13 +692,13 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "langserve"
|
||||
version = "0.0.23"
|
||||
version = "0.0.20"
|
||||
description = ""
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.8.1,<4.0.0"
|
||||
files = [
|
||||
{file = "langserve-0.0.23-py3-none-any.whl", hash = "sha256:35f29ba83f7198dd0e6eff57b70b521a36d59d2133fd368d8b3161f55570f7bc"},
|
||||
{file = "langserve-0.0.23.tar.gz", hash = "sha256:4d4202fa3badc198aef884eb5dbfe4b0e9d27565872eae6a3e13d6c4a4aa0fd2"},
|
||||
{file = "langserve-0.0.20-py3-none-any.whl", hash = "sha256:7c0f0c9989a6423a72229c4b7d39d8c08d912a8a8b19e0bc0037ee24305bc13e"},
|
||||
{file = "langserve-0.0.20.tar.gz", hash = "sha256:2b16a33e647f107388d2da644373368ff627bbb96358a401523b9ef6effed834"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -717,13 +716,13 @@ server = ["fastapi (>=0.90.1)", "sse-starlette (>=1.3.0,<2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.0.58"
|
||||
version = "0.0.52"
|
||||
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
files = [
|
||||
{file = "langsmith-0.0.58-py3-none-any.whl", hash = "sha256:75a82744da2d2fa647d8d8d66a2b3791edc914a640516fa2f46cd5502b697380"},
|
||||
{file = "langsmith-0.0.58.tar.gz", hash = "sha256:b935b37b7ce09d998e572e5e3a25f022c283eee6a8de661d41cbda02ce09c6b5"},
|
||||
{file = "langsmith-0.0.52-py3-none-any.whl", hash = "sha256:d02a0ade5a53b36143084e57003ed38ccbdf5fc15a5a0eb14f8989ceaee0b807"},
|
||||
{file = "langsmith-0.0.52.tar.gz", hash = "sha256:1dc29082d257deea1859cb22c53d9481ca5c4a37f3af40c0f9d300fb8adc91db"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -758,7 +757,7 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
|
||||
name = "marshmallow"
|
||||
version = "3.20.1"
|
||||
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "marshmallow-3.20.1-py3-none-any.whl", hash = "sha256:684939db93e80ad3561392f47be0230743131560a41c5110684c16e21ade0a5c"},
|
||||
@@ -789,7 +788,7 @@ files = [
|
||||
name = "multidict"
|
||||
version = "6.0.4"
|
||||
description = "multidict implementation"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"},
|
||||
@@ -872,7 +871,7 @@ files = [
|
||||
name = "mypy-extensions"
|
||||
version = "1.0.0"
|
||||
description = "Type system extensions for programs checked with the mypy type checker."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.5"
|
||||
files = [
|
||||
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
|
||||
@@ -883,7 +882,7 @@ files = [
|
||||
name = "numpy"
|
||||
version = "1.24.4"
|
||||
description = "Fundamental package for array computing in Python"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"},
|
||||
@@ -955,13 +954,13 @@ testing = ["pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "poethepoet"
|
||||
version = "0.24.2"
|
||||
version = "0.24.1"
|
||||
description = "A task runner that works well with poetry."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "poethepoet-0.24.2-py3-none-any.whl", hash = "sha256:affaf7669542f54df05ed1e2be3d24028c9f4bd2ea514813fae7f01c5ca6e686"},
|
||||
{file = "poethepoet-0.24.2.tar.gz", hash = "sha256:f600ecdbf58b474f7dba273060b194242566ffccb41d75f5b0d1cb8f5aa8bf2e"},
|
||||
{file = "poethepoet-0.24.1-py3-none-any.whl", hash = "sha256:3afa44b4fc7327df0dd912eda012604a072af2bb4d243fb0e41e8eca8dabf9ed"},
|
||||
{file = "poethepoet-0.24.1.tar.gz", hash = "sha256:f5a386387c382f08890c273d13495938208a8ce91ab71536abf388c776c4f366"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1079,7 +1078,7 @@ watchdog = ">=0.6.0"
|
||||
name = "pyyaml"
|
||||
version = "6.0.1"
|
||||
description = "YAML parser and emitter for Python"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
|
||||
@@ -1128,7 +1127,7 @@ files = [
|
||||
name = "requests"
|
||||
version = "2.31.0"
|
||||
description = "Python HTTP for Humans."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
|
||||
@@ -1166,28 +1165,28 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.1.4"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
version = "0.1.3"
|
||||
description = "An extremely fast Python linter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.1.4-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:864958706b669cce31d629902175138ad8a069d99ca53514611521f532d91495"},
|
||||
{file = "ruff-0.1.4-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:9fdd61883bb34317c788af87f4cd75dfee3a73f5ded714b77ba928e418d6e39e"},
|
||||
{file = "ruff-0.1.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4eaca8c9cc39aa7f0f0d7b8fe24ecb51232d1bb620fc4441a61161be4a17539"},
|
||||
{file = "ruff-0.1.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a9a1301dc43cbf633fb603242bccd0aaa34834750a14a4c1817e2e5c8d60de17"},
|
||||
{file = "ruff-0.1.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e8db8ab6f100f02e28b3d713270c857d370b8d61871d5c7d1702ae411df683"},
|
||||
{file = "ruff-0.1.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:80fea754eaae06335784b8ea053d6eb8e9aac75359ebddd6fee0858e87c8d510"},
|
||||
{file = "ruff-0.1.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bc02a480d4bfffd163a723698da15d1a9aec2fced4c06f2a753f87f4ce6969c"},
|
||||
{file = "ruff-0.1.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9862811b403063765b03e716dac0fda8fdbe78b675cd947ed5873506448acea4"},
|
||||
{file = "ruff-0.1.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58826efb8b3efbb59bb306f4b19640b7e366967a31c049d49311d9eb3a4c60cb"},
|
||||
{file = "ruff-0.1.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:fdfd453fc91d9d86d6aaa33b1bafa69d114cf7421057868f0b79104079d3e66e"},
|
||||
{file = "ruff-0.1.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:e8791482d508bd0b36c76481ad3117987301b86072158bdb69d796503e1c84a8"},
|
||||
{file = "ruff-0.1.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:01206e361021426e3c1b7fba06ddcb20dbc5037d64f6841e5f2b21084dc51800"},
|
||||
{file = "ruff-0.1.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:645591a613a42cb7e5c2b667cbefd3877b21e0252b59272ba7212c3d35a5819f"},
|
||||
{file = "ruff-0.1.4-py3-none-win32.whl", hash = "sha256:99908ca2b3b85bffe7e1414275d004917d1e0dfc99d497ccd2ecd19ad115fd0d"},
|
||||
{file = "ruff-0.1.4-py3-none-win_amd64.whl", hash = "sha256:1dfd6bf8f6ad0a4ac99333f437e0ec168989adc5d837ecd38ddb2cc4a2e3db8a"},
|
||||
{file = "ruff-0.1.4-py3-none-win_arm64.whl", hash = "sha256:d98ae9ebf56444e18a3e3652b3383204748f73e247dea6caaf8b52d37e6b32da"},
|
||||
{file = "ruff-0.1.4.tar.gz", hash = "sha256:21520ecca4cc555162068d87c747b8f95e1e95f8ecfcbbe59e8dd00710586315"},
|
||||
{file = "ruff-0.1.3-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:b46d43d51f7061652eeadb426a9e3caa1e0002470229ab2fc19de8a7b0766901"},
|
||||
{file = "ruff-0.1.3-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:b8afeb9abd26b4029c72adc9921b8363374f4e7edb78385ffaa80278313a15f9"},
|
||||
{file = "ruff-0.1.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca3cf365bf32e9ba7e6db3f48a4d3e2c446cd19ebee04f05338bc3910114528b"},
|
||||
{file = "ruff-0.1.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4874c165f96c14a00590dcc727a04dca0cfd110334c24b039458c06cf78a672e"},
|
||||
{file = "ruff-0.1.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eec2dd31eed114e48ea42dbffc443e9b7221976554a504767ceaee3dd38edeb8"},
|
||||
{file = "ruff-0.1.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dc3ec4edb3b73f21b4aa51337e16674c752f1d76a4a543af56d7d04e97769613"},
|
||||
{file = "ruff-0.1.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e3de9ed2e39160800281848ff4670e1698037ca039bda7b9274f849258d26ce"},
|
||||
{file = "ruff-0.1.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c595193881922cc0556a90f3af99b1c5681f0c552e7a2a189956141d8666fe8"},
|
||||
{file = "ruff-0.1.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f75e670d529aa2288cd00fc0e9b9287603d95e1536d7a7e0cafe00f75e0dd9d"},
|
||||
{file = "ruff-0.1.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:76dd49f6cd945d82d9d4a9a6622c54a994689d8d7b22fa1322983389b4892e20"},
|
||||
{file = "ruff-0.1.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:918b454bc4f8874a616f0d725590277c42949431ceb303950e87fef7a7d94cb3"},
|
||||
{file = "ruff-0.1.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d8859605e729cd5e53aa38275568dbbdb4fe882d2ea2714c5453b678dca83784"},
|
||||
{file = "ruff-0.1.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:0b6c55f5ef8d9dd05b230bb6ab80bc4381ecb60ae56db0330f660ea240cb0d4a"},
|
||||
{file = "ruff-0.1.3-py3-none-win32.whl", hash = "sha256:3e7afcbdcfbe3399c34e0f6370c30f6e529193c731b885316c5a09c9e4317eef"},
|
||||
{file = "ruff-0.1.3-py3-none-win_amd64.whl", hash = "sha256:7a18df6638cec4a5bd75350639b2bb2a2366e01222825562c7346674bdceb7ea"},
|
||||
{file = "ruff-0.1.3-py3-none-win_arm64.whl", hash = "sha256:12fd53696c83a194a2db7f9a46337ce06445fb9aa7d25ea6f293cf75b21aca9f"},
|
||||
{file = "ruff-0.1.3.tar.gz", hash = "sha256:3ba6145369a151401d5db79f0a47d50e470384d0d89d0d6f7fab0b589ad07c34"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1225,56 +1224,12 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlalchemy"
|
||||
version = "2.0.23"
|
||||
version = "2.0.22"
|
||||
description = "Database Abstraction Library"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "SQLAlchemy-2.0.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:638c2c0b6b4661a4fd264f6fb804eccd392745c5887f9317feb64bb7cb03b3ea"},
|
||||
{file = "SQLAlchemy-2.0.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3b5036aa326dc2df50cba3c958e29b291a80f604b1afa4c8ce73e78e1c9f01d"},
|
||||
{file = "SQLAlchemy-2.0.23-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c14eba45983d2f48f7546bb32b47937ee2cafae353646295f0e99f35b14286ab"},
|
||||
{file = "SQLAlchemy-2.0.23-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:89a01238fcb9a8af118eaad3ffcc5dedaacbd429dc6fdc43fe430d3a941ff965"},
|
||||
{file = "SQLAlchemy-2.0.23-cp310-cp310-win32.whl", hash = "sha256:cabafc7837b6cec61c0e1e5c6d14ef250b675fa9c3060ed8a7e38653bd732ff8"},
|
||||
{file = "SQLAlchemy-2.0.23-cp310-cp310-win_amd64.whl", hash = "sha256:87a3d6b53c39cd173990de2f5f4b83431d534a74f0e2f88bd16eabb5667e65c6"},
|
||||
{file = "SQLAlchemy-2.0.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d5578e6863eeb998980c212a39106ea139bdc0b3f73291b96e27c929c90cd8e1"},
|
||||
{file = "SQLAlchemy-2.0.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62d9e964870ea5ade4bc870ac4004c456efe75fb50404c03c5fd61f8bc669a72"},
|
||||
{file = "SQLAlchemy-2.0.23-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c80c38bd2ea35b97cbf7c21aeb129dcbebbf344ee01a7141016ab7b851464f8e"},
|
||||
{file = "SQLAlchemy-2.0.23-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75eefe09e98043cff2fb8af9796e20747ae870c903dc61d41b0c2e55128f958d"},
|
||||
{file = "SQLAlchemy-2.0.23-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd45a5b6c68357578263d74daab6ff9439517f87da63442d244f9f23df56138d"},
|
||||
{file = "SQLAlchemy-2.0.23-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a86cb7063e2c9fb8e774f77fbf8475516d270a3e989da55fa05d08089d77f8c4"},
|
||||
{file = "SQLAlchemy-2.0.23-cp311-cp311-win32.whl", hash = "sha256:b41f5d65b54cdf4934ecede2f41b9c60c9f785620416e8e6c48349ab18643855"},
|
||||
{file = "SQLAlchemy-2.0.23-cp311-cp311-win_amd64.whl", hash = "sha256:9ca922f305d67605668e93991aaf2c12239c78207bca3b891cd51a4515c72e22"},
|
||||
{file = "SQLAlchemy-2.0.23-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0f7fb0c7527c41fa6fcae2be537ac137f636a41b4c5a4c58914541e2f436b45"},
|
||||
{file = "SQLAlchemy-2.0.23-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c424983ab447dab126c39d3ce3be5bee95700783204a72549c3dceffe0fc8f4"},
|
||||
{file = "SQLAlchemy-2.0.23-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f508ba8f89e0a5ecdfd3761f82dda2a3d7b678a626967608f4273e0dba8f07ac"},
|
||||
{file = "SQLAlchemy-2.0.23-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6463aa765cf02b9247e38b35853923edbf2f6fd1963df88706bc1d02410a5577"},
|
||||
{file = "SQLAlchemy-2.0.23-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e599a51acf3cc4d31d1a0cf248d8f8d863b6386d2b6782c5074427ebb7803bda"},
|
||||
{file = "SQLAlchemy-2.0.23-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd54601ef9cc455a0c61e5245f690c8a3ad67ddb03d3b91c361d076def0b4c60"},
|
||||
{file = "SQLAlchemy-2.0.23-cp312-cp312-win32.whl", hash = "sha256:42d0b0290a8fb0165ea2c2781ae66e95cca6e27a2fbe1016ff8db3112ac1e846"},
|
||||
{file = "SQLAlchemy-2.0.23-cp312-cp312-win_amd64.whl", hash = "sha256:227135ef1e48165f37590b8bfc44ed7ff4c074bf04dc8d6f8e7f1c14a94aa6ca"},
|
||||
{file = "SQLAlchemy-2.0.23-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:14aebfe28b99f24f8a4c1346c48bc3d63705b1f919a24c27471136d2f219f02d"},
|
||||
{file = "SQLAlchemy-2.0.23-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e983fa42164577d073778d06d2cc5d020322425a509a08119bdcee70ad856bf"},
|
||||
{file = "SQLAlchemy-2.0.23-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e0dc9031baa46ad0dd5a269cb7a92a73284d1309228be1d5935dac8fb3cae24"},
|
||||
{file = "SQLAlchemy-2.0.23-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5f94aeb99f43729960638e7468d4688f6efccb837a858b34574e01143cf11f89"},
|
||||
{file = "SQLAlchemy-2.0.23-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:63bfc3acc970776036f6d1d0e65faa7473be9f3135d37a463c5eba5efcdb24c8"},
|
||||
{file = "SQLAlchemy-2.0.23-cp37-cp37m-win32.whl", hash = "sha256:f48ed89dd11c3c586f45e9eec1e437b355b3b6f6884ea4a4c3111a3358fd0c18"},
|
||||
{file = "SQLAlchemy-2.0.23-cp37-cp37m-win_amd64.whl", hash = "sha256:1e018aba8363adb0599e745af245306cb8c46b9ad0a6fc0a86745b6ff7d940fc"},
|
||||
{file = "SQLAlchemy-2.0.23-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:64ac935a90bc479fee77f9463f298943b0e60005fe5de2aa654d9cdef46c54df"},
|
||||
{file = "SQLAlchemy-2.0.23-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c4722f3bc3c1c2fcc3702dbe0016ba31148dd6efcd2a2fd33c1b4897c6a19693"},
|
||||
{file = "SQLAlchemy-2.0.23-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4af79c06825e2836de21439cb2a6ce22b2ca129bad74f359bddd173f39582bf5"},
|
||||
{file = "SQLAlchemy-2.0.23-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:683ef58ca8eea4747737a1c35c11372ffeb84578d3aab8f3e10b1d13d66f2bc4"},
|
||||
{file = "SQLAlchemy-2.0.23-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d4041ad05b35f1f4da481f6b811b4af2f29e83af253bf37c3c4582b2c68934ab"},
|
||||
{file = "SQLAlchemy-2.0.23-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aeb397de65a0a62f14c257f36a726945a7f7bb60253462e8602d9b97b5cbe204"},
|
||||
{file = "SQLAlchemy-2.0.23-cp38-cp38-win32.whl", hash = "sha256:42ede90148b73fe4ab4a089f3126b2cfae8cfefc955c8174d697bb46210c8306"},
|
||||
{file = "SQLAlchemy-2.0.23-cp38-cp38-win_amd64.whl", hash = "sha256:964971b52daab357d2c0875825e36584d58f536e920f2968df8d581054eada4b"},
|
||||
{file = "SQLAlchemy-2.0.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:616fe7bcff0a05098f64b4478b78ec2dfa03225c23734d83d6c169eb41a93e55"},
|
||||
{file = "SQLAlchemy-2.0.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e680527245895aba86afbd5bef6c316831c02aa988d1aad83c47ffe92655e74"},
|
||||
{file = "SQLAlchemy-2.0.23-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4895a63e2c271ffc7a81ea424b94060f7b3b03b4ea0cd58ab5bb676ed02f4221"},
|
||||
{file = "SQLAlchemy-2.0.23-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:967c0b71156f793e6662dd839da54f884631755275ed71f1539c95bbada9aaab"},
|
||||
{file = "SQLAlchemy-2.0.23-cp39-cp39-win32.whl", hash = "sha256:0a8c6aa506893e25a04233bc721c6b6cf844bafd7250535abb56cb6cc1368884"},
|
||||
{file = "SQLAlchemy-2.0.23-cp39-cp39-win_amd64.whl", hash = "sha256:f3420d00d2cb42432c1d0e44540ae83185ccbbc67a6054dcc8ab5387add6620b"},
|
||||
{file = "SQLAlchemy-2.0.23-py3-none-any.whl", hash = "sha256:31952bbc527d633b9479f5f81e8b9dfada00b91d6baba021a869095f1a97006d"},
|
||||
{file = "SQLAlchemy-2.0.23.tar.gz", hash = "sha256:c1bda93cbbe4aa2aa0aa8655c5aeda505cd219ff3e8da91d1d329e143e4aff69"},
|
||||
{file = "SQLAlchemy-2.0.22.tar.gz", hash = "sha256:5434cc601aa17570d79e5377f5fd45ff92f9379e2abed0be5e8c2fba8d353d2b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1283,8 +1238,7 @@ typing-extensions = ">=4.2.0"
|
||||
|
||||
[package.extras]
|
||||
aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"]
|
||||
aioodbc = ["aioodbc", "greenlet (!=0.4.17)"]
|
||||
aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"]
|
||||
aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"]
|
||||
asyncio = ["greenlet (!=0.4.17)"]
|
||||
asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"]
|
||||
mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"]
|
||||
@@ -1294,7 +1248,7 @@ mssql-pyodbc = ["pyodbc"]
|
||||
mypy = ["mypy (>=0.910)"]
|
||||
mysql = ["mysqlclient (>=1.4.0)"]
|
||||
mysql-connector = ["mysql-connector-python"]
|
||||
oracle = ["cx-oracle (>=8)"]
|
||||
oracle = ["cx_oracle (>=7)"]
|
||||
oracle-oracledb = ["oracledb (>=1.0.1)"]
|
||||
postgresql = ["psycopg2 (>=2.7)"]
|
||||
postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"]
|
||||
@@ -1304,13 +1258,13 @@ postgresql-psycopg2binary = ["psycopg2-binary"]
|
||||
postgresql-psycopg2cffi = ["psycopg2cffi"]
|
||||
postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
|
||||
pymysql = ["pymysql"]
|
||||
sqlcipher = ["sqlcipher3-binary"]
|
||||
sqlcipher = ["sqlcipher3_binary"]
|
||||
|
||||
[[package]]
|
||||
name = "sse-starlette"
|
||||
version = "1.6.5"
|
||||
description = "\"SSE plugin for Starlette\""
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "sse-starlette-1.6.5.tar.gz", hash = "sha256:819f2c421fb37067380fe3dcaba246c476b02651b7bb7601099a378ad802a0ac"},
|
||||
@@ -1342,7 +1296,7 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyam
|
||||
name = "tenacity"
|
||||
version = "8.2.3"
|
||||
description = "Retry code until it succeeds"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"},
|
||||
@@ -1363,17 +1317,6 @@ files = [
|
||||
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tomlkit"
|
||||
version = "0.12.2"
|
||||
description = "Style preserving TOML library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "tomlkit-0.12.2-py3-none-any.whl", hash = "sha256:eeea7ac7563faeab0a1ed8fe12c2e5a51c61f933f2502f7e9db0241a65163ad0"},
|
||||
{file = "tomlkit-0.12.2.tar.gz", hash = "sha256:df32fab589a81f0d7dc525a4267b6d7a64ee99619cbd1eeb0fae32c1dd426977"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typer"
|
||||
version = "0.9.0"
|
||||
@@ -1413,7 +1356,7 @@ files = [
|
||||
name = "typing-inspect"
|
||||
version = "0.9.0"
|
||||
description = "Runtime inspection utilities for typing module."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"},
|
||||
@@ -1428,7 +1371,7 @@ typing-extensions = ">=3.7.4"
|
||||
name = "urllib3"
|
||||
version = "2.0.7"
|
||||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"},
|
||||
@@ -1503,7 +1446,7 @@ watchmedo = ["PyYAML (>=3.10)"]
|
||||
name = "yarl"
|
||||
version = "1.9.2"
|
||||
description = "Yet another URL library"
|
||||
optional = false
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"},
|
||||
@@ -1587,9 +1530,9 @@ idna = ">=2.0"
|
||||
multidict = ">=4.0"
|
||||
|
||||
[extras]
|
||||
serve = []
|
||||
serve = ["langserve"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "9aa00d7144f6e0dea9f8e5df25d0aa69117809ba4eea7d3d0664cd3fcccab2f4"
|
||||
content-hash = "80e530d72b799e5d066f3f6e5f472e5b70a288aed9f3bf4ee6293e3bf5a9e987"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain-cli"
|
||||
version = "0.0.16"
|
||||
version = "0.0.13"
|
||||
description = "CLI for interacting with LangChain"
|
||||
authors = ["Erick Friis <erick@langchain.dev>"]
|
||||
readme = "README.md"
|
||||
@@ -8,10 +8,11 @@ readme = "README.md"
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
typer = {extras = ["all"], version = "^0.9.0"}
|
||||
tomli = "^2.0.1"
|
||||
gitpython = "^3.1.40"
|
||||
langserve = {extras = ["all"], version = ">=0.0.16"}
|
||||
langserve = {extras = ["all"], version = ">=0.0.16", optional = true}
|
||||
fastapi = "^0.104.0"
|
||||
uvicorn = "^0.23.2"
|
||||
tomlkit = "^0.12.2"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
langchain = "langchain_cli.cli:app"
|
||||
@@ -31,7 +32,7 @@ ruff = "^0.1.3"
|
||||
|
||||
[tool.poetry.extras]
|
||||
# For langserve
|
||||
serve = []
|
||||
serve = ["langserve"]
|
||||
|
||||
[tool.ruff]
|
||||
select = [
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
from langchain_experimental.openai_assistant.base import OpenAIAssistantRunnable
|
||||
|
||||
__all__ = ["OpenAIAssistantRunnable"]
|
||||
@@ -1,351 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from time import sleep
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.schema.agent import AgentAction, AgentFinish
|
||||
from langchain.schema.runnable import RunnableConfig, RunnableSerializable
|
||||
from langchain.tools import format_tool_to_openai_function
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import openai
|
||||
from openai.types.beta.threads import ThreadMessage
|
||||
from openai.types.beta.threads.required_action_function_tool_call import (
|
||||
RequiredActionFunctionToolCall,
|
||||
)
|
||||
|
||||
|
||||
class OpenAIAssistantFinish(AgentFinish):
|
||||
"""AgentFinish with run and thread metadata."""
|
||||
|
||||
run_id: str
|
||||
thread_id: str
|
||||
|
||||
|
||||
class OpenAIAssistantAction(AgentAction):
|
||||
"""AgentAction with info needed to submit custom tool output to existing run."""
|
||||
|
||||
tool_call_id: str
|
||||
run_id: str
|
||||
thread_id: str
|
||||
|
||||
|
||||
def _get_openai_client() -> openai.OpenAI:
|
||||
try:
|
||||
import openai
|
||||
|
||||
return openai.OpenAI()
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Unable to import openai, please install with `pip install openai`."
|
||||
) from e
|
||||
except AttributeError as e:
|
||||
raise AttributeError(
|
||||
"Please make sure you are using a v1.1-compatible version of openai. You "
|
||||
'can install with `pip install "openai>=1.1"`.'
|
||||
) from e
|
||||
|
||||
|
||||
OutputType = Union[
|
||||
List[OpenAIAssistantAction],
|
||||
OpenAIAssistantFinish,
|
||||
List["ThreadMessage"],
|
||||
List["RequiredActionFunctionToolCall"],
|
||||
]
|
||||
|
||||
|
||||
class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
||||
"""Run an OpenAI Assistant.
|
||||
|
||||
Example using OpenAI tools:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_experimental.openai_assistant import OpenAIAssistantRunnable
|
||||
|
||||
interpreter_assistant = OpenAIAssistantRunnable.create_assistant(
|
||||
name="langchain assistant",
|
||||
instructions="You are a personal math tutor. Write and run code to answer math questions.",
|
||||
tools=[{"type": "code_interpreter"}],
|
||||
model="gpt-4-1106-preview"
|
||||
)
|
||||
output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"})
|
||||
|
||||
Example using custom tools and AgentExecutor:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_experimental.openai_assistant import OpenAIAssistantRunnable
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.tools import E2BDataAnalysisTool
|
||||
|
||||
|
||||
tools = [E2BDataAnalysisTool(api_key="...")]
|
||||
agent = OpenAIAssistantRunnable.create_assistant(
|
||||
name="langchain assistant e2b tool",
|
||||
instructions="You are a personal math tutor. Write and run code to answer math questions.",
|
||||
tools=tools,
|
||||
model="gpt-4-1106-preview",
|
||||
as_agent=True
|
||||
)
|
||||
|
||||
agent_executor = AgentExecutor(agent=agent, tools=tools)
|
||||
agent_executor.invoke({"content": "What's 10 - 4 raised to the 2.7"})
|
||||
|
||||
|
||||
Example using custom tools and custom execution:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_experimental.openai_assistant import OpenAIAssistantRunnable
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.schema.agent import AgentFinish
|
||||
from langchain.tools import E2BDataAnalysisTool
|
||||
|
||||
|
||||
tools = [E2BDataAnalysisTool(api_key="...")]
|
||||
agent = OpenAIAssistantRunnable.create_assistant(
|
||||
name="langchain assistant e2b tool",
|
||||
instructions="You are a personal math tutor. Write and run code to answer math questions.",
|
||||
tools=tools,
|
||||
model="gpt-4-1106-preview",
|
||||
as_agent=True
|
||||
)
|
||||
|
||||
def execute_agent(agent, tools, input):
|
||||
tool_map = {tool.name: tool for tool in tools}
|
||||
response = agent.invoke(input)
|
||||
while not isinstance(response, AgentFinish):
|
||||
tool_outputs = []
|
||||
for action in response:
|
||||
tool_output = tool_map[action.tool].invoke(action.tool_input)
|
||||
print(action.tool, action.tool_input, tool_output, end="\n\n")
|
||||
tool_outputs.append({"output": tool_output, "tool_call_id": action.tool_call_id})
|
||||
response = agent.invoke(
|
||||
{
|
||||
"tool_outputs": tool_outputs,
|
||||
"run_id": action.run_id,
|
||||
"thread_id": action.thread_id
|
||||
}
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"})
|
||||
next_response = execute_agent(agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id})
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
client: openai.OpenAI = Field(default_factory=_get_openai_client)
|
||||
"""OpenAI client."""
|
||||
assistant_id: str
|
||||
"""OpenAI assistant id."""
|
||||
check_every_ms: float = 1_000.0
|
||||
"""Frequency with which to check run progress in ms."""
|
||||
as_agent: bool = False
|
||||
"""Use as a LangChain agent, compatible with the AgentExecutor."""
|
||||
|
||||
@classmethod
|
||||
def create_assistant(
|
||||
cls,
|
||||
name: str,
|
||||
instructions: str,
|
||||
tools: Sequence[Union[BaseTool, dict]],
|
||||
model: str,
|
||||
*,
|
||||
client: Optional[openai.OpenAI] = None,
|
||||
**kwargs: Any,
|
||||
) -> OpenAIAssistantRunnable:
|
||||
"""Create an OpenAI Assistant and instantiate the Runnable.
|
||||
|
||||
Args:
|
||||
name: Assistant name.
|
||||
instructions: Assistant instructions.
|
||||
tools: Assistant tools. Can be passed in in OpenAI format or as BaseTools.
|
||||
model: Assistant model to use.
|
||||
client: OpenAI client. Will create default client if not specified.
|
||||
|
||||
Returns:
|
||||
OpenAIAssistantRunnable configured to run using the created assistant.
|
||||
"""
|
||||
client = client or _get_openai_client()
|
||||
openai_tools: List = []
|
||||
for tool in tools:
|
||||
if isinstance(tool, BaseTool):
|
||||
tool = {
|
||||
"type": "function",
|
||||
"function": format_tool_to_openai_function(tool),
|
||||
}
|
||||
openai_tools.append(tool)
|
||||
assistant = client.beta.assistants.create(
|
||||
name=name,
|
||||
instructions=instructions,
|
||||
tools=openai_tools,
|
||||
model=model,
|
||||
)
|
||||
return cls(assistant_id=assistant.id, **kwargs)
|
||||
|
||||
def invoke(
|
||||
self, input: dict, config: Optional[RunnableConfig] = None
|
||||
) -> OutputType:
|
||||
"""Invoke assistant.
|
||||
|
||||
Args:
|
||||
input: Runnable input dict that can have:
|
||||
content: User message when starting a new run.
|
||||
thread_id: Existing thread to use.
|
||||
run_id: Existing run to use. Should only be supplied when providing
|
||||
the tool output for a required action after an initial invocation.
|
||||
file_ids: File ids to include in new run. Used for retrieval.
|
||||
message_metadata: Metadata to associate with new message.
|
||||
thread_metadata: Metadata to associate with new thread. Only relevant
|
||||
when new thread being created.
|
||||
instructions: Additional run instructions.
|
||||
model: Override Assistant model for this run.
|
||||
tools: Override Assistant tools for this run.
|
||||
run_metadata: Metadata to associate with new run.
|
||||
config: Runnable config:
|
||||
|
||||
Return:
|
||||
If self.as_agent, will return
|
||||
Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise
|
||||
will return OpenAI types
|
||||
Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]].
|
||||
"""
|
||||
# Being run within AgentExecutor and there are tool outputs to submit.
|
||||
if self.as_agent and input.get("intermediate_steps"):
|
||||
tool_outputs = self._parse_intermediate_steps(input["intermediate_steps"])
|
||||
run = self.client.beta.threads.runs.submit_tool_outputs(**tool_outputs)
|
||||
# Starting a new thread and a new run.
|
||||
elif "thread_id" not in input:
|
||||
thread = {
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": input["content"],
|
||||
"file_ids": input.get("file_ids", []),
|
||||
"metadata": input.get("message_metadata"),
|
||||
}
|
||||
],
|
||||
"metadata": input.get("thread_metadata"),
|
||||
}
|
||||
run = self._create_thread_and_run(input, thread)
|
||||
# Starting a new run in an existing thread.
|
||||
elif "run_id" not in input:
|
||||
_ = self.client.beta.threads.messages.create(
|
||||
input["thread_id"],
|
||||
content=input["content"],
|
||||
role="user",
|
||||
file_ids=input.get("file_ids", []),
|
||||
metadata=input.get("message_metadata"),
|
||||
)
|
||||
run = self._create_run(input)
|
||||
# Submitting tool outputs to an existing run, outside the AgentExecutor
|
||||
# framework.
|
||||
else:
|
||||
run = self.client.beta.threads.runs.submit_tool_outputs(**input)
|
||||
return self._get_response(run.id, run.thread_id)
|
||||
|
||||
def _parse_intermediate_steps(
|
||||
self, intermediate_steps: List[Tuple[OpenAIAssistantAction, str]]
|
||||
) -> dict:
|
||||
last_action, last_output = intermediate_steps[-1]
|
||||
run = self._wait_for_run(last_action.run_id, last_action.thread_id)
|
||||
required_tool_call_ids = {
|
||||
tc.id for tc in run.required_action.submit_tool_outputs.tool_calls
|
||||
}
|
||||
tool_outputs = [
|
||||
{"output": output, "tool_call_id": action.tool_call_id}
|
||||
for action, output in intermediate_steps
|
||||
if action.tool_call_id in required_tool_call_ids
|
||||
]
|
||||
submit_tool_outputs = {
|
||||
"tool_outputs": tool_outputs,
|
||||
"run_id": last_action.run_id,
|
||||
"thread_id": last_action.thread_id,
|
||||
}
|
||||
return submit_tool_outputs
|
||||
|
||||
def _create_run(self, input: dict) -> Any:
|
||||
params = {
|
||||
k: v
|
||||
for k, v in input.items()
|
||||
if k in ("instructions", "model", "tools", "run_metadata")
|
||||
}
|
||||
return self.client.beta.threads.runs.create(
|
||||
input["thread_id"],
|
||||
assistant_id=self.assistant_id,
|
||||
**params,
|
||||
)
|
||||
|
||||
def _create_thread_and_run(self, input: dict, thread: dict) -> Any:
|
||||
params = {
|
||||
k: v
|
||||
for k, v in input.items()
|
||||
if k in ("instructions", "model", "tools", "run_metadata")
|
||||
}
|
||||
run = self.client.beta.threads.create_and_run(
|
||||
assistant_id=self.assistant_id,
|
||||
thread=thread,
|
||||
**params,
|
||||
)
|
||||
return run
|
||||
|
||||
def _get_response(self, run_id: str, thread_id: str) -> Any:
|
||||
# TODO: Pagination
|
||||
import openai
|
||||
|
||||
run = self._wait_for_run(run_id, thread_id)
|
||||
if run.status == "completed":
|
||||
messages = self.client.beta.threads.messages.list(thread_id, order="asc")
|
||||
new_messages = [msg for msg in messages if msg.run_id == run_id]
|
||||
if not self.as_agent:
|
||||
return new_messages
|
||||
answer: Any = [
|
||||
msg_content for msg in new_messages for msg_content in msg.content
|
||||
]
|
||||
if all(
|
||||
isinstance(content, openai.types.beta.threads.MessageContentText)
|
||||
for content in answer
|
||||
):
|
||||
answer = "\n".join(content.text.value for content in answer)
|
||||
return OpenAIAssistantFinish(
|
||||
return_values={"output": answer},
|
||||
log="",
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
)
|
||||
elif run.status == "requires_action":
|
||||
if not self.as_agent:
|
||||
return run.required_action.submit_tool_outputs.tool_calls
|
||||
actions = []
|
||||
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
|
||||
function = tool_call.function
|
||||
args = json.loads(function.arguments)
|
||||
if len(args) == 1 and "__arg1" in args:
|
||||
args = args["__arg1"]
|
||||
actions.append(
|
||||
OpenAIAssistantAction(
|
||||
tool=function.name,
|
||||
tool_input=args,
|
||||
tool_call_id=tool_call.id,
|
||||
log="",
|
||||
run_id=run_id,
|
||||
thread_id=thread_id,
|
||||
)
|
||||
)
|
||||
return actions
|
||||
else:
|
||||
run_info = json.dumps(run.dict(), indent=2)
|
||||
raise ValueError(
|
||||
f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})"
|
||||
)
|
||||
|
||||
def _wait_for_run(self, run_id: str, thread_id: str) -> Any:
|
||||
in_progress = True
|
||||
while in_progress:
|
||||
run = self.client.beta.threads.runs.retrieve(run_id, thread_id=thread_id)
|
||||
in_progress = run.status in ("in_progress", "queued")
|
||||
if in_progress:
|
||||
sleep(self.check_every_ms / 1000)
|
||||
return run
|
||||
956
libs/experimental/poetry.lock
generated
956
libs/experimental/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain-experimental"
|
||||
version = "0.0.39"
|
||||
version = "0.0.37"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
|
||||
@@ -25,7 +25,6 @@ from langchain.schema.messages import (
|
||||
FunctionMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
ToolMessage,
|
||||
)
|
||||
|
||||
|
||||
@@ -55,18 +54,15 @@ def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
|
||||
# Fix for azure
|
||||
# Also OpenAI returns None for tool invocations
|
||||
content = _dict.get("content", "") or ""
|
||||
additional_kwargs: Dict = {}
|
||||
if _dict.get("function_call"):
|
||||
additional_kwargs["function_call"] = dict(_dict["function_call"])
|
||||
if _dict.get("tool_calls"):
|
||||
additional_kwargs["tool_calls"] = _dict["tool_calls"]
|
||||
additional_kwargs = {"function_call": dict(_dict["function_call"])}
|
||||
else:
|
||||
additional_kwargs = {}
|
||||
return AIMessage(content=content, additional_kwargs=additional_kwargs)
|
||||
elif role == "system":
|
||||
return SystemMessage(content=_dict["content"])
|
||||
elif role == "function":
|
||||
return FunctionMessage(content=_dict["content"], name=_dict["name"])
|
||||
elif role == "tool":
|
||||
return ToolMessage(content=_dict["content"], tool_call_id=_dict["tool_call_id"])
|
||||
else:
|
||||
return ChatMessage(content=_dict["content"], role=role)
|
||||
|
||||
@@ -92,11 +88,6 @@ def convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
# If function call only, content is None not empty string
|
||||
if message_dict["content"] == "":
|
||||
message_dict["content"] = None
|
||||
if "tool_calls" in message.additional_kwargs:
|
||||
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
|
||||
# If tool calls only, content is None not empty string
|
||||
if message_dict["content"] == "":
|
||||
message_dict["content"] = None
|
||||
elif isinstance(message, SystemMessage):
|
||||
message_dict = {"role": "system", "content": message.content}
|
||||
elif isinstance(message, FunctionMessage):
|
||||
@@ -105,12 +96,6 @@ def convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
"content": message.content,
|
||||
"name": message.name,
|
||||
}
|
||||
elif isinstance(message, ToolMessage):
|
||||
message_dict = {
|
||||
"role": "tool",
|
||||
"content": message.content,
|
||||
"tool_call_id": message.tool_call_id,
|
||||
}
|
||||
else:
|
||||
raise TypeError(f"Got unknown type {message}")
|
||||
if "name" in message.additional_kwargs:
|
||||
|
||||
@@ -327,21 +327,10 @@ class AgentOutputParser(BaseOutputParser):
|
||||
"""Parse text into agent action/finish."""
|
||||
|
||||
|
||||
class MultiActionAgentOutputParser(BaseOutputParser):
|
||||
"""Base class for parsing agent output into agent actions/finish."""
|
||||
|
||||
@abstractmethod
|
||||
def parse(self, text: str) -> Union[List[AgentAction], AgentFinish]:
|
||||
"""Parse text into agent actions/finish."""
|
||||
|
||||
|
||||
class RunnableAgent(BaseMultiActionAgent):
|
||||
class RunnableAgent(BaseSingleActionAgent):
|
||||
"""Agent powered by runnables."""
|
||||
|
||||
runnable: Union[
|
||||
Runnable[dict, Union[AgentAction, AgentFinish]],
|
||||
Runnable[dict, Union[List[AgentAction], AgentFinish]],
|
||||
]
|
||||
runnable: Runnable[dict, Union[AgentAction, AgentFinish]]
|
||||
"""Runnable to call to get agent action."""
|
||||
_input_keys: List[str] = []
|
||||
"""Input keys."""
|
||||
@@ -370,10 +359,7 @@ class RunnableAgent(BaseMultiActionAgent):
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[
|
||||
List[AgentAction],
|
||||
AgentFinish,
|
||||
]:
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
@@ -387,8 +373,6 @@ class RunnableAgent(BaseMultiActionAgent):
|
||||
"""
|
||||
inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}}
|
||||
output = self.runnable.invoke(inputs, config={"callbacks": callbacks})
|
||||
if isinstance(output, AgentAction):
|
||||
output = [output]
|
||||
return output
|
||||
|
||||
async def aplan(
|
||||
@@ -396,10 +380,7 @@ class RunnableAgent(BaseMultiActionAgent):
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[
|
||||
List[AgentAction],
|
||||
AgentFinish,
|
||||
]:
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
@@ -413,8 +394,6 @@ class RunnableAgent(BaseMultiActionAgent):
|
||||
"""
|
||||
inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}}
|
||||
output = await self.runnable.ainvoke(inputs, config={"callbacks": callbacks})
|
||||
if isinstance(output, AgentAction):
|
||||
output = [output]
|
||||
return output
|
||||
|
||||
|
||||
|
||||
@@ -8,14 +8,12 @@ differently before passing them into the LLM.
|
||||
from langchain.agents.format_scratchpad.log import format_log_to_str
|
||||
from langchain.agents.format_scratchpad.log_to_messages import format_log_to_messages
|
||||
from langchain.agents.format_scratchpad.openai_functions import (
|
||||
format_to_openai_function_messages,
|
||||
format_to_openai_functions,
|
||||
)
|
||||
from langchain.agents.format_scratchpad.xml import format_xml
|
||||
|
||||
__all__ = [
|
||||
"format_xml",
|
||||
"format_to_openai_function_messages",
|
||||
"format_to_openai_functions",
|
||||
"format_log_to_str",
|
||||
"format_log_to_messages",
|
||||
|
||||
@@ -49,17 +49,14 @@ def _create_function_message(
|
||||
)
|
||||
|
||||
|
||||
def format_to_openai_function_messages(
|
||||
def format_to_openai_functions(
|
||||
intermediate_steps: Sequence[Tuple[AgentAction, str]],
|
||||
) -> List[BaseMessage]:
|
||||
"""Convert (AgentAction, tool output) tuples into FunctionMessages.
|
||||
|
||||
"""Format intermediate steps.
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date, along with observations
|
||||
|
||||
Returns:
|
||||
list of messages to send to the LLM for the next prediction
|
||||
|
||||
"""
|
||||
messages = []
|
||||
|
||||
@@ -67,7 +64,3 @@ def format_to_openai_function_messages(
|
||||
messages.extend(_convert_agent_action_to_messages(agent_action, observation))
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
# Backwards compatibility
|
||||
format_to_openai_functions = format_to_openai_function_messages
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
import json
|
||||
from typing import List, Sequence, Tuple
|
||||
|
||||
from langchain.agents.output_parsers.openai_tools import OpenAIToolAgentAction
|
||||
from langchain.schema.agent import AgentAction
|
||||
from langchain.schema.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ToolMessage,
|
||||
)
|
||||
|
||||
|
||||
def _create_tool_message(
|
||||
agent_action: OpenAIToolAgentAction, observation: str
|
||||
) -> ToolMessage:
|
||||
"""Convert agent action and observation into a function message.
|
||||
Args:
|
||||
agent_action: the tool invocation request from the agent
|
||||
observation: the result of the tool invocation
|
||||
Returns:
|
||||
FunctionMessage that corresponds to the original tool invocation
|
||||
"""
|
||||
if not isinstance(observation, str):
|
||||
try:
|
||||
content = json.dumps(observation, ensure_ascii=False)
|
||||
except Exception:
|
||||
content = str(observation)
|
||||
else:
|
||||
content = observation
|
||||
return ToolMessage(
|
||||
tool_call_id=agent_action.tool_call_id,
|
||||
content=content,
|
||||
)
|
||||
|
||||
|
||||
def format_to_openai_tool_messages(
|
||||
intermediate_steps: Sequence[Tuple[AgentAction, str]],
|
||||
) -> List[BaseMessage]:
|
||||
"""Convert (AgentAction, tool output) tuples into FunctionMessages.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date, along with observations
|
||||
|
||||
Returns:
|
||||
list of messages to send to the LLM for the next prediction
|
||||
|
||||
"""
|
||||
messages = []
|
||||
for agent_action, observation in intermediate_steps:
|
||||
if isinstance(agent_action, OpenAIToolAgentAction):
|
||||
new_messages = list(agent_action.message_log) + [
|
||||
_create_tool_message(agent_action, observation)
|
||||
]
|
||||
messages.extend([new for new in new_messages if new not in messages])
|
||||
else:
|
||||
messages.append(AIMessage(content=agent_action.log))
|
||||
return messages
|
||||
@@ -58,7 +58,6 @@ from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
|
||||
from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun
|
||||
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchRun
|
||||
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchResults
|
||||
from langchain.tools.memorize.tool import Memorize
|
||||
from langchain.utilities.arxiv import ArxivAPIWrapper
|
||||
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
|
||||
from langchain.utilities.pubmed import PubMedAPIWrapper
|
||||
@@ -328,10 +327,6 @@ def _get_eleven_labs_text2speech(**kwargs: Any) -> BaseTool:
|
||||
return ElevenLabsText2SpeechTool(**kwargs)
|
||||
|
||||
|
||||
def _get_memorize(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
|
||||
return Memorize(llm=llm)
|
||||
|
||||
|
||||
def _get_google_cloud_texttospeech(**kwargs: Any) -> BaseTool:
|
||||
return GoogleCloudTextToSpeechTool(**kwargs)
|
||||
|
||||
@@ -343,7 +338,6 @@ _EXTRA_LLM_TOOLS: Dict[
|
||||
"news-api": (_get_news_api, ["news_api_key"]),
|
||||
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
|
||||
"podcast-api": (_get_podcast_api, ["listen_api_key"]),
|
||||
"memorize": (_get_memorize, []),
|
||||
}
|
||||
_EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {
|
||||
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from langchain.agents.format_scratchpad.openai_functions import (
|
||||
format_to_openai_function_messages,
|
||||
format_to_openai_functions,
|
||||
)
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
@@ -52,7 +52,7 @@ class AgentTokenBufferMemory(BaseChatMemory):
|
||||
"""Save context from this conversation to buffer. Pruned."""
|
||||
input_str, output_str = self._get_input_output(inputs, outputs)
|
||||
self.chat_memory.add_user_message(input_str)
|
||||
steps = format_to_openai_function_messages(outputs[self.intermediate_steps_key])
|
||||
steps = format_to_openai_functions(outputs[self.intermediate_steps_key])
|
||||
for msg in steps:
|
||||
self.chat_memory.add_message(msg)
|
||||
self.chat_memory.add_ai_message(output_str)
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Any, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from langchain.agents import BaseSingleActionAgent
|
||||
from langchain.agents.format_scratchpad.openai_functions import (
|
||||
format_to_openai_function_messages,
|
||||
format_to_openai_functions,
|
||||
)
|
||||
from langchain.agents.output_parsers.openai_functions import (
|
||||
OpenAIFunctionsAgentOutputParser,
|
||||
@@ -93,7 +93,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
|
||||
agent_scratchpad = format_to_openai_functions(intermediate_steps)
|
||||
selected_inputs = {
|
||||
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
|
||||
}
|
||||
@@ -132,7 +132,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
|
||||
agent_scratchpad = format_to_openai_functions(intermediate_steps)
|
||||
selected_inputs = {
|
||||
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ from typing import Any, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from langchain.agents import BaseMultiActionAgent
|
||||
from langchain.agents.format_scratchpad.openai_functions import (
|
||||
format_to_openai_function_messages,
|
||||
format_to_openai_functions,
|
||||
)
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
@@ -87,9 +87,7 @@ def _parse_ai_message(message: BaseMessage) -> Union[List[AgentAction], AgentFin
|
||||
final_tools.append(_tool)
|
||||
return final_tools
|
||||
|
||||
return AgentFinish(
|
||||
return_values={"output": message.content}, log=str(message.content)
|
||||
)
|
||||
return AgentFinish(return_values={"output": message.content}, log=message.content)
|
||||
|
||||
|
||||
class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
@@ -208,7 +206,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
|
||||
agent_scratchpad = format_to_openai_functions(intermediate_steps)
|
||||
selected_inputs = {
|
||||
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
|
||||
}
|
||||
@@ -237,7 +235,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
|
||||
agent_scratchpad = format_to_openai_functions(intermediate_steps)
|
||||
selected_inputs = {
|
||||
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
|
||||
)
|
||||
|
||||
return AgentFinish(
|
||||
return_values={"output": message.content}, log=str(message.content)
|
||||
return_values={"output": message.content}, log=message.content
|
||||
)
|
||||
|
||||
def parse_result(
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
import asyncio
|
||||
import json
|
||||
from json import JSONDecodeError
|
||||
from typing import List, Union
|
||||
|
||||
from langchain.agents.agent import MultiActionAgentOutputParser
|
||||
from langchain.schema import (
|
||||
AgentAction,
|
||||
AgentFinish,
|
||||
OutputParserException,
|
||||
)
|
||||
from langchain.schema.agent import AgentActionMessageLog
|
||||
from langchain.schema.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
)
|
||||
from langchain.schema.output import ChatGeneration, Generation
|
||||
|
||||
|
||||
class OpenAIToolAgentAction(AgentActionMessageLog):
|
||||
tool_call_id: str
|
||||
"""Tool call that this message is responding to."""
|
||||
|
||||
|
||||
def parse_ai_message_to_openai_tool_action(
|
||||
message: BaseMessage
|
||||
) -> Union[List[AgentAction], AgentFinish]:
|
||||
"""Parse an AI message potentially containing tool_calls."""
|
||||
if not isinstance(message, AIMessage):
|
||||
raise TypeError(f"Expected an AI message got {type(message)}")
|
||||
|
||||
if not message.additional_kwargs.get("tool_calls"):
|
||||
return AgentFinish(
|
||||
return_values={"output": message.content}, log=str(message.content)
|
||||
)
|
||||
|
||||
actions: List = []
|
||||
for tool_call in message.additional_kwargs["tool_calls"]:
|
||||
function = tool_call["function"]
|
||||
function_name = function["name"]
|
||||
try:
|
||||
_tool_input = json.loads(function["arguments"])
|
||||
except JSONDecodeError:
|
||||
raise OutputParserException(
|
||||
f"Could not parse tool input: {function} because "
|
||||
f"the `arguments` is not valid JSON."
|
||||
)
|
||||
|
||||
# HACK HACK HACK:
|
||||
# The code that encodes tool input into Open AI uses a special variable
|
||||
# name called `__arg1` to handle old style tools that do not expose a
|
||||
# schema and expect a single string argument as an input.
|
||||
# We unpack the argument here if it exists.
|
||||
# Open AI does not support passing in a JSON array as an argument.
|
||||
if "__arg1" in _tool_input:
|
||||
tool_input = _tool_input["__arg1"]
|
||||
else:
|
||||
tool_input = _tool_input
|
||||
|
||||
content_msg = f"responded: {message.content}\n" if message.content else "\n"
|
||||
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
|
||||
actions.append(
|
||||
OpenAIToolAgentAction(
|
||||
tool=function_name,
|
||||
tool_input=tool_input,
|
||||
log=log,
|
||||
message_log=[message],
|
||||
tool_call_id=tool_call["id"],
|
||||
)
|
||||
)
|
||||
return actions
|
||||
|
||||
|
||||
class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser):
|
||||
"""Parses a message into agent actions/finish.
|
||||
|
||||
Is meant to be used with OpenAI models, as it relies on the specific
|
||||
tool_calls parameter from OpenAI to convey what tools to use.
|
||||
|
||||
If a tool_calls parameter is passed, then that is used to get
|
||||
the tool names and tool inputs.
|
||||
|
||||
If one is not passed, then the AIMessage is assumed to be the final output.
|
||||
"""
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
return "openai-tools-agent-output-parser"
|
||||
|
||||
def parse_result(
|
||||
self, result: List[Generation], *, partial: bool = False
|
||||
) -> Union[List[AgentAction], AgentFinish]:
|
||||
if not isinstance(result[0], ChatGeneration):
|
||||
raise ValueError("This output parser only works on ChatGeneration output")
|
||||
message = result[0].message
|
||||
return parse_ai_message_to_openai_tool_action(message)
|
||||
|
||||
async def aparse_result(
|
||||
self, result: List[Generation], *, partial: bool = False
|
||||
) -> Union[List[AgentAction], AgentFinish]:
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
None, self.parse_result, result
|
||||
)
|
||||
|
||||
def parse(self, text: str) -> Union[List[AgentAction], AgentFinish]:
|
||||
raise ValueError("Can only parse messages")
|
||||
@@ -18,7 +18,7 @@ logger = logging.getLogger(__name__)
|
||||
class StructuredChatOutputParser(AgentOutputParser):
|
||||
"""Output parser for the structured chat agent."""
|
||||
|
||||
pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
|
||||
pattern = re.compile(r"```(?:json)?\n(.*?)```", re.DOTALL)
|
||||
|
||||
def get_format_instructions(self) -> str:
|
||||
return FORMAT_INSTRUCTIONS
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import time
|
||||
from typing import Any, Dict, List, Optional, cast
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
@@ -232,9 +232,7 @@ class InfinoCallbackHandler(BaseCallbackHandler):
|
||||
self.chat_openai_model_name = model_name
|
||||
prompt_tokens = 0
|
||||
for message_list in messages:
|
||||
message_string = " ".join(
|
||||
cast(str, msg.content) for msg in message_list
|
||||
)
|
||||
message_string = " ".join(msg.content for msg in message_list)
|
||||
num_tokens = get_num_tokens(
|
||||
message_string,
|
||||
openai_model_name=self.chat_openai_model_name,
|
||||
@@ -251,9 +249,7 @@ class InfinoCallbackHandler(BaseCallbackHandler):
|
||||
)
|
||||
|
||||
# Send the prompt to infino
|
||||
prompt = " ".join(
|
||||
cast(str, msg.content) for sublist in messages for msg in sublist
|
||||
)
|
||||
prompt = " ".join(msg.content for sublist in messages for msg in sublist)
|
||||
self._send_to_infino("prompt", prompt, is_ts=False)
|
||||
|
||||
# Set the error flag to indicate no error (this will get overridden
|
||||
|
||||
@@ -25,7 +25,6 @@ from typing import (
|
||||
)
|
||||
from uuid import UUID
|
||||
|
||||
from langsmith import utils as ls_utils
|
||||
from langsmith.run_helpers import get_run_tree_context
|
||||
from tenacity import RetryCallState
|
||||
|
||||
@@ -125,11 +124,9 @@ def tracing_enabled(
|
||||
"""
|
||||
cb = LangChainTracerV1()
|
||||
session = cast(TracerSessionV1, cb.load_session(session_name))
|
||||
try:
|
||||
tracing_callback_var.set(cb)
|
||||
yield session
|
||||
finally:
|
||||
tracing_callback_var.set(None)
|
||||
tracing_callback_var.set(cb)
|
||||
yield session
|
||||
tracing_callback_var.set(None)
|
||||
|
||||
|
||||
@contextmanager
|
||||
@@ -194,11 +191,9 @@ def tracing_v2_enabled(
|
||||
tags=tags,
|
||||
client=client,
|
||||
)
|
||||
try:
|
||||
tracing_v2_callback_var.set(cb)
|
||||
yield cb
|
||||
finally:
|
||||
tracing_v2_callback_var.set(None)
|
||||
tracing_v2_callback_var.set(cb)
|
||||
yield cb
|
||||
tracing_v2_callback_var.set(None)
|
||||
|
||||
|
||||
@contextmanager
|
||||
@@ -219,33 +214,6 @@ def collect_runs() -> Generator[run_collector.RunCollectorCallbackHandler, None,
|
||||
run_collector_var.set(None)
|
||||
|
||||
|
||||
def _get_trace_callbacks(
|
||||
project_name: Optional[str] = None,
|
||||
example_id: Optional[Union[str, UUID]] = None,
|
||||
callback_manager: Optional[Union[CallbackManager, AsyncCallbackManager]] = None,
|
||||
) -> Callbacks:
|
||||
if _tracing_v2_is_enabled():
|
||||
project_name_ = project_name or _get_tracer_project()
|
||||
tracer = tracing_v2_callback_var.get() or LangChainTracer(
|
||||
project_name=project_name_,
|
||||
example_id=example_id,
|
||||
)
|
||||
if callback_manager is None:
|
||||
cb = cast(Callbacks, [tracer])
|
||||
else:
|
||||
if not any(
|
||||
isinstance(handler, LangChainTracer)
|
||||
for handler in callback_manager.handlers
|
||||
):
|
||||
callback_manager.add_handler(tracer, True)
|
||||
# If it already has a LangChainTracer, we don't need to add another one.
|
||||
# this would likely mess up the trace hierarchy.
|
||||
cb = callback_manager
|
||||
else:
|
||||
cb = None
|
||||
return cb
|
||||
|
||||
|
||||
@contextmanager
|
||||
def trace_as_chain_group(
|
||||
group_name: str,
|
||||
@@ -273,8 +241,6 @@ def trace_as_chain_group(
|
||||
tags (List[str], optional): The inheritable tags to apply to all runs.
|
||||
Defaults to None.
|
||||
|
||||
Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith.
|
||||
|
||||
Returns:
|
||||
CallbackManagerForChainGroup: The callback manager for the chain group.
|
||||
|
||||
@@ -287,8 +253,16 @@ def trace_as_chain_group(
|
||||
res = llm.predict(llm_input, callbacks=manager)
|
||||
manager.on_chain_end({"output": res})
|
||||
""" # noqa: E501
|
||||
cb = _get_trace_callbacks(
|
||||
project_name, example_id, callback_manager=callback_manager
|
||||
cb = cast(
|
||||
Callbacks,
|
||||
[
|
||||
LangChainTracer(
|
||||
project_name=project_name,
|
||||
example_id=example_id,
|
||||
)
|
||||
]
|
||||
if callback_manager is None
|
||||
else callback_manager,
|
||||
)
|
||||
cm = CallbackManager.configure(
|
||||
inheritable_callbacks=cb,
|
||||
@@ -347,8 +321,6 @@ async def atrace_as_chain_group(
|
||||
Returns:
|
||||
AsyncCallbackManager: The async callback manager for the chain group.
|
||||
|
||||
Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
@@ -358,8 +330,16 @@ async def atrace_as_chain_group(
|
||||
res = await llm.apredict(llm_input, callbacks=manager)
|
||||
await manager.on_chain_end({"output": res})
|
||||
""" # noqa: E501
|
||||
cb = _get_trace_callbacks(
|
||||
project_name, example_id, callback_manager=callback_manager
|
||||
cb = cast(
|
||||
Callbacks,
|
||||
[
|
||||
LangChainTracer(
|
||||
project_name=project_name,
|
||||
example_id=example_id,
|
||||
)
|
||||
]
|
||||
if callback_manager is None
|
||||
else callback_manager,
|
||||
)
|
||||
cm = AsyncCallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags)
|
||||
|
||||
@@ -1915,34 +1895,6 @@ def env_var_is_set(env_var: str) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def _tracing_v2_is_enabled() -> bool:
|
||||
return (
|
||||
env_var_is_set("LANGCHAIN_TRACING_V2")
|
||||
or tracing_v2_callback_var.get() is not None
|
||||
or get_run_tree_context() is not None
|
||||
)
|
||||
|
||||
|
||||
def _get_tracer_project() -> str:
|
||||
run_tree = get_run_tree_context()
|
||||
return getattr(
|
||||
run_tree,
|
||||
"session_name",
|
||||
getattr(
|
||||
# Note, if people are trying to nest @traceable functions and the
|
||||
# tracing_v2_enabled context manager, this will likely mess up the
|
||||
# tree structure.
|
||||
tracing_v2_callback_var.get(),
|
||||
"project",
|
||||
# Have to set this to a string even though it always will return
|
||||
# a string because `get_tracer_project` technically can return
|
||||
# None, but only when a specific argument is supplied.
|
||||
# Therefore, this just tricks the mypy type checker
|
||||
str(ls_utils.get_tracer_project()),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _configure(
|
||||
callback_manager_cls: Type[T],
|
||||
inheritable_callbacks: Callbacks = None,
|
||||
@@ -2021,8 +1973,18 @@ def _configure(
|
||||
)
|
||||
|
||||
tracer_v2 = tracing_v2_callback_var.get()
|
||||
tracing_v2_enabled_ = _tracing_v2_is_enabled()
|
||||
tracer_project = _get_tracer_project()
|
||||
tracing_v2_enabled_ = (
|
||||
env_var_is_set("LANGCHAIN_TRACING_V2")
|
||||
or tracer_v2 is not None
|
||||
or run_tree is not None
|
||||
)
|
||||
tracer_project = getattr(
|
||||
run_tree,
|
||||
"session_name",
|
||||
os.environ.get(
|
||||
"LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default")
|
||||
),
|
||||
)
|
||||
run_collector_ = run_collector_var.get()
|
||||
debug = _get_debug()
|
||||
if (
|
||||
|
||||
@@ -9,7 +9,7 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
|
||||
from uuid import UUID
|
||||
|
||||
import langsmith
|
||||
from langsmith.evaluation.evaluator import EvaluationResult, EvaluationResults
|
||||
from langsmith.evaluation.evaluator import EvaluationResult
|
||||
|
||||
from langchain.callbacks import manager
|
||||
from langchain.callbacks.tracers import langchain as langchain_tracer
|
||||
@@ -153,7 +153,7 @@ class EvaluatorCallbackHandler(BaseTracer):
|
||||
|
||||
def _select_eval_results(
|
||||
self,
|
||||
results: Union[EvaluationResult, EvaluationResults],
|
||||
results: Union[EvaluationResult, dict],
|
||||
) -> List[EvaluationResult]:
|
||||
if isinstance(results, EvaluationResult):
|
||||
results_ = [results]
|
||||
@@ -168,7 +168,7 @@ class EvaluatorCallbackHandler(BaseTracer):
|
||||
|
||||
def _log_evaluation_feedback(
|
||||
self,
|
||||
evaluator_response: Union[EvaluationResult, EvaluationResults],
|
||||
evaluator_response: Union[EvaluationResult, dict],
|
||||
run: Run,
|
||||
source_run_id: Optional[UUID] = None,
|
||||
) -> List[EvaluationResult]:
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import weakref
|
||||
from concurrent.futures import Future, ThreadPoolExecutor, wait
|
||||
from datetime import datetime
|
||||
@@ -9,7 +10,7 @@ from typing import Any, Callable, Dict, List, Optional, Union
|
||||
from uuid import UUID
|
||||
|
||||
from langsmith import Client
|
||||
from langsmith import utils as ls_utils
|
||||
from langsmith.utils import LangSmithError
|
||||
from tenacity import (
|
||||
Retrying,
|
||||
retry_if_exception_type,
|
||||
@@ -90,7 +91,9 @@ class LangChainTracer(BaseTracer):
|
||||
self.example_id = (
|
||||
UUID(example_id) if isinstance(example_id, str) else example_id
|
||||
)
|
||||
self.project_name = project_name or ls_utils.get_tracer_project()
|
||||
self.project_name = project_name or os.getenv(
|
||||
"LANGCHAIN_PROJECT", os.getenv("LANGCHAIN_SESSION", "default")
|
||||
)
|
||||
self.client = client or get_client()
|
||||
self._futures: weakref.WeakSet[Future] = weakref.WeakSet()
|
||||
self.tags = tags or []
|
||||
@@ -149,7 +152,7 @@ class LangChainTracer(BaseTracer):
|
||||
for attempt in Retrying(
|
||||
stop=stop_after_attempt(5),
|
||||
wait=wait_exponential_jitter(),
|
||||
retry=retry_if_exception_type(ls_utils.LangSmithError),
|
||||
retry=retry_if_exception_type(LangSmithError),
|
||||
):
|
||||
with attempt:
|
||||
return self.client.get_run_url(
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
from langchain.chains.openai_tools.extraction import create_extraction_chain_pydantic
|
||||
|
||||
__all__ = ["create_extraction_chain_pydantic"]
|
||||
@@ -1,30 +0,0 @@
|
||||
from typing import List, Type, Union
|
||||
|
||||
from langchain.output_parsers import PydanticToolsParser
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
from langchain.pydantic_v1 import BaseModel
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.schema.runnable import Runnable
|
||||
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
|
||||
|
||||
_EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned \
|
||||
in the following passage together with their properties.
|
||||
|
||||
If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501
|
||||
|
||||
|
||||
def create_extraction_chain_pydantic(
|
||||
pydantic_schemas: Union[List[Type[BaseModel]], Type[BaseModel]],
|
||||
llm: BaseLanguageModel,
|
||||
system_message: str = _EXTRACTION_TEMPLATE,
|
||||
) -> Runnable:
|
||||
if not isinstance(pydantic_schemas, list):
|
||||
pydantic_schemas = [pydantic_schemas]
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[("system", system_message), ("user", "{input}")]
|
||||
)
|
||||
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
|
||||
tools = [{"type": "function", "function": d} for d in functions]
|
||||
model = llm.bind(tools=tools)
|
||||
chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)
|
||||
return chain
|
||||
@@ -21,11 +21,6 @@ def merge_chat_runs_in_session(
|
||||
"""
|
||||
messages: List[BaseMessage] = []
|
||||
for message in chat_session["messages"]:
|
||||
if not isinstance(message.content, str):
|
||||
raise ValueError(
|
||||
"Chat Loaders only support messages with content type string, "
|
||||
f"got {message.content}"
|
||||
)
|
||||
if not messages:
|
||||
messages.append(deepcopy(message))
|
||||
elif (
|
||||
@@ -34,11 +29,6 @@ def merge_chat_runs_in_session(
|
||||
and messages[-1].additional_kwargs["sender"]
|
||||
== message.additional_kwargs.get("sender")
|
||||
):
|
||||
if not isinstance(messages[-1].content, str):
|
||||
raise ValueError(
|
||||
"Chat Loaders only support messages with content type string, "
|
||||
f"got {messages[-1].content}"
|
||||
)
|
||||
messages[-1].content = (
|
||||
messages[-1].content + delimiter + message.content
|
||||
).strip()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, cast
|
||||
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
@@ -27,15 +27,14 @@ def _convert_one_message_to_text(
|
||||
human_prompt: str,
|
||||
ai_prompt: str,
|
||||
) -> str:
|
||||
content = cast(str, message.content)
|
||||
if isinstance(message, ChatMessage):
|
||||
message_text = f"\n\n{message.role.capitalize()}: {content}"
|
||||
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
|
||||
elif isinstance(message, HumanMessage):
|
||||
message_text = f"{human_prompt} {content}"
|
||||
message_text = f"{human_prompt} {message.content}"
|
||||
elif isinstance(message, AIMessage):
|
||||
message_text = f"{ai_prompt} {content}"
|
||||
message_text = f"{ai_prompt} {message.content}"
|
||||
elif isinstance(message, SystemMessage):
|
||||
message_text = content
|
||||
message_text = message.content
|
||||
else:
|
||||
raise ValueError(f"Got unknown type {message}")
|
||||
return message_text
|
||||
|
||||
@@ -30,8 +30,6 @@ DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
|
||||
class ChatAnyscale(ChatOpenAI):
|
||||
"""`Anyscale` Chat large language models.
|
||||
|
||||
See https://www.anyscale.com/ for information about Anyscale.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``ANYSCALE_API_KEY`` set with your API key.
|
||||
Alternatively, you can use the anyscale_api_key keyword argument.
|
||||
@@ -55,7 +53,7 @@ class ChatAnyscale(ChatOpenAI):
|
||||
def lc_secrets(self) -> Dict[str, str]:
|
||||
return {"anyscale_api_key": "ANYSCALE_API_KEY"}
|
||||
|
||||
anyscale_api_key: SecretStr
|
||||
anyscale_api_key: Optional[SecretStr] = None
|
||||
"""AnyScale Endpoints API keys."""
|
||||
model_name: str = Field(default=DEFAULT_MODEL, alias="model")
|
||||
"""Model name to use."""
|
||||
@@ -100,12 +98,7 @@ class ChatAnyscale(ChatOpenAI):
|
||||
@root_validator(pre=True)
|
||||
def validate_environment_override(cls, values: dict) -> dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
values["openai_api_key"] = get_from_dict_or_env(
|
||||
values,
|
||||
"anyscale_api_key",
|
||||
"ANYSCALE_API_KEY",
|
||||
)
|
||||
values["anyscale_api_key"] = convert_to_secret_str(
|
||||
values["openai_api_key"] = convert_to_secret_str(
|
||||
get_from_dict_or_env(
|
||||
values,
|
||||
"anyscale_api_key",
|
||||
|
||||
@@ -2,15 +2,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
from typing import Any, Dict, Union
|
||||
from typing import Any, Dict, Mapping
|
||||
|
||||
from langchain.chat_models.openai import ChatOpenAI
|
||||
from langchain.pydantic_v1 import BaseModel, Field, root_validator
|
||||
from langchain.pydantic_v1 import root_validator
|
||||
from langchain.schema import ChatResult
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
from langchain.utils.openai import is_openai_v1
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -24,9 +21,9 @@ class AzureChatOpenAI(ChatOpenAI):
|
||||
|
||||
In addition, you should have the ``openai`` python package installed, and the
|
||||
following environment variables set or passed in constructor in lower case:
|
||||
- ``AZURE_OPENAI_API_KEY``
|
||||
- ``AZURE_OPENAI_API_ENDPOINT``
|
||||
- ``AZURE_OPENAI_AD_TOKEN``
|
||||
- ``OPENAI_API_TYPE`` (default: ``azure``)
|
||||
- ``OPENAI_API_KEY``
|
||||
- ``OPENAI_API_BASE``
|
||||
- ``OPENAI_API_VERSION``
|
||||
- ``OPENAI_PROXY``
|
||||
|
||||
@@ -36,7 +33,7 @@ class AzureChatOpenAI(ChatOpenAI):
|
||||
.. code-block:: python
|
||||
|
||||
AzureChatOpenAI(
|
||||
azure_deployment="35-turbo-dev",
|
||||
deployment_name="35-turbo-dev",
|
||||
openai_api_version="2023-05-15",
|
||||
)
|
||||
|
||||
@@ -54,84 +51,48 @@ class AzureChatOpenAI(ChatOpenAI):
|
||||
in, even if not explicitly saved on this class.
|
||||
"""
|
||||
|
||||
azure_endpoint: Union[str, None] = None
|
||||
"""Your Azure endpoint, including the resource.
|
||||
|
||||
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
|
||||
|
||||
Example: `https://example-resource.azure.openai.com/`
|
||||
"""
|
||||
deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment")
|
||||
"""A model deployment.
|
||||
|
||||
If given sets the base client URL to include `/deployments/{azure_deployment}`.
|
||||
Note: this means you won't be able to use non-deployment endpoints.
|
||||
"""
|
||||
openai_api_version: str = Field(default="", alias="api_version")
|
||||
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
|
||||
openai_api_key: Union[str, None] = Field(default=None, alias="api_key")
|
||||
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
|
||||
azure_ad_token: Union[str, None] = None
|
||||
"""Your Azure Active Directory token.
|
||||
|
||||
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
|
||||
|
||||
For more:
|
||||
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
|
||||
""" # noqa: E501
|
||||
azure_ad_token_provider: Union[str, None] = None
|
||||
"""A function that returns an Azure Active Directory token.
|
||||
|
||||
Will be invoked on every request.
|
||||
"""
|
||||
deployment_name: str = ""
|
||||
model_version: str = ""
|
||||
"""Legacy, for openai<1.0.0 support."""
|
||||
openai_api_type: str = ""
|
||||
"""Legacy, for openai<1.0.0 support."""
|
||||
validate_base_url: bool = True
|
||||
openai_api_base: str = ""
|
||||
openai_api_version: str = ""
|
||||
openai_api_key: str = ""
|
||||
openai_organization: str = ""
|
||||
openai_proxy: str = ""
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
|
||||
# Check OPENAI_KEY for backwards compatibility.
|
||||
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
|
||||
# other forms of azure credentials.
|
||||
values["openai_api_key"] = (
|
||||
values["openai_api_key"]
|
||||
or os.getenv("AZURE_OPENAI_API_KEY")
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
values["openai_api_key"] = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_api_key",
|
||||
"OPENAI_API_KEY",
|
||||
)
|
||||
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
|
||||
"OPENAI_API_BASE"
|
||||
values["openai_api_base"] = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_api_base",
|
||||
"OPENAI_API_BASE",
|
||||
)
|
||||
values["openai_api_version"] = values["openai_api_version"] or os.getenv(
|
||||
"OPENAI_API_VERSION"
|
||||
values["openai_api_version"] = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_api_version",
|
||||
"OPENAI_API_VERSION",
|
||||
)
|
||||
# Check OPENAI_ORGANIZATION for backwards compatibility.
|
||||
values["openai_organization"] = (
|
||||
values["openai_organization"]
|
||||
or os.getenv("OPENAI_ORG_ID")
|
||||
or os.getenv("OPENAI_ORGANIZATION")
|
||||
)
|
||||
values["azure_endpoint"] = values["azure_endpoint"] or os.getenv(
|
||||
"AZURE_OPENAI_ENDPOINT"
|
||||
)
|
||||
values["azure_ad_token"] = values["azure_ad_token"] or os.getenv(
|
||||
"AZURE_OPENAI_AD_TOKEN"
|
||||
)
|
||||
|
||||
values["openai_api_type"] = get_from_dict_or_env(
|
||||
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
|
||||
)
|
||||
values["openai_proxy"] = get_from_dict_or_env(
|
||||
values, "openai_proxy", "OPENAI_PROXY", default=""
|
||||
values["openai_organization"] = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_organization",
|
||||
"OPENAI_ORGANIZATION",
|
||||
default="",
|
||||
)
|
||||
values["openai_proxy"] = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_proxy",
|
||||
"OPENAI_PROXY",
|
||||
default="",
|
||||
)
|
||||
|
||||
try:
|
||||
import openai
|
||||
|
||||
@@ -140,75 +101,27 @@ class AzureChatOpenAI(ChatOpenAI):
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
if is_openai_v1():
|
||||
# For backwards compatibility. Before openai v1, no distinction was made
|
||||
# between azure_endpoint and base_url (openai_api_base).
|
||||
openai_api_base = values["openai_api_base"]
|
||||
if openai_api_base and values["validate_base_url"]:
|
||||
if "/openai" not in openai_api_base:
|
||||
values["openai_api_base"] = (
|
||||
values["openai_api_base"].rstrip("/") + "/openai"
|
||||
)
|
||||
warnings.warn(
|
||||
"As of openai>=1.0.0, Azure endpoints should be specified via "
|
||||
f"the `azure_endpoint` param not `openai_api_base` "
|
||||
f"(or alias `base_url`). Updating `openai_api_base` from "
|
||||
f"{openai_api_base} to {values['openai_api_base']}."
|
||||
)
|
||||
if values["deployment_name"]:
|
||||
warnings.warn(
|
||||
"As of openai>=1.0.0, if `deployment_name` (or alias "
|
||||
"`azure_deployment`) is specified then "
|
||||
"`openai_api_base` (or alias `base_url`) should not be. "
|
||||
"Instead use `deployment_name` (or alias `azure_deployment`) "
|
||||
"and `azure_endpoint`."
|
||||
)
|
||||
if values["deployment_name"] not in values["openai_api_base"]:
|
||||
warnings.warn(
|
||||
"As of openai>=1.0.0, if `openai_api_base` "
|
||||
"(or alias `base_url`) is specified it is expected to be "
|
||||
"of the form "
|
||||
"https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501
|
||||
f"Updating {openai_api_base} to "
|
||||
f"{values['openai_api_base']}."
|
||||
)
|
||||
values["openai_api_base"] += (
|
||||
"/deployments/" + values["deployment_name"]
|
||||
)
|
||||
values["deployment_name"] = None
|
||||
client_params = {
|
||||
"api_version": values["openai_api_version"],
|
||||
"azure_endpoint": values["azure_endpoint"],
|
||||
"azure_deployment": values["deployment_name"],
|
||||
"api_key": values["openai_api_key"],
|
||||
"azure_ad_token": values["azure_ad_token"],
|
||||
"azure_ad_token_provider": values["azure_ad_token_provider"],
|
||||
"organization": values["openai_organization"],
|
||||
"base_url": values["openai_api_base"],
|
||||
"timeout": values["request_timeout"],
|
||||
"max_retries": values["max_retries"],
|
||||
"default_headers": values["default_headers"],
|
||||
"default_query": values["default_query"],
|
||||
"http_client": values["http_client"],
|
||||
}
|
||||
values["client"] = openai.AzureOpenAI(**client_params).chat.completions
|
||||
values["async_client"] = openai.AsyncAzureOpenAI(
|
||||
**client_params
|
||||
).chat.completions
|
||||
else:
|
||||
try:
|
||||
values["client"] = openai.ChatCompletion
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"`openai` has no `ChatCompletion` attribute, this is likely "
|
||||
"due to an old version of the openai package. Try upgrading it "
|
||||
"with `pip install --upgrade openai`."
|
||||
)
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling OpenAI API."""
|
||||
if is_openai_v1():
|
||||
return super()._default_params
|
||||
else:
|
||||
return {
|
||||
**super()._default_params,
|
||||
"engine": self.deployment_name,
|
||||
}
|
||||
return {
|
||||
**super()._default_params,
|
||||
"engine": self.deployment_name,
|
||||
}
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
@@ -218,14 +131,11 @@ class AzureChatOpenAI(ChatOpenAI):
|
||||
@property
|
||||
def _client_params(self) -> Dict[str, Any]:
|
||||
"""Get the config params used for the openai client."""
|
||||
if is_openai_v1():
|
||||
return super()._client_params
|
||||
else:
|
||||
return {
|
||||
**super()._client_params,
|
||||
"api_type": self.openai_api_type,
|
||||
"api_version": self.openai_api_version,
|
||||
}
|
||||
return {
|
||||
**super()._client_params,
|
||||
"api_type": self.openai_api_type,
|
||||
"api_version": self.openai_api_version,
|
||||
}
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
@@ -238,9 +148,7 @@ class AzureChatOpenAI(ChatOpenAI):
|
||||
"openai_api_version": self.openai_api_version,
|
||||
}
|
||||
|
||||
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
|
||||
if not isinstance(response, dict):
|
||||
response = response.dict()
|
||||
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
|
||||
for res in response["choices"]:
|
||||
if res.get("finish_reason", None) == "content_filter":
|
||||
raise ValueError(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional, cast
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.chat_models.base import SimpleChatModel
|
||||
@@ -23,21 +23,26 @@ class LlamaContentFormatter(ContentFormatterBase):
|
||||
@staticmethod
|
||||
def _convert_message_to_dict(message: BaseMessage) -> Dict:
|
||||
"""Converts message to a dict according to role"""
|
||||
content = cast(str, message.content)
|
||||
if isinstance(message, HumanMessage):
|
||||
return {
|
||||
"role": "user",
|
||||
"content": ContentFormatterBase.escape_special_characters(content),
|
||||
"content": ContentFormatterBase.escape_special_characters(
|
||||
message.content
|
||||
),
|
||||
}
|
||||
elif isinstance(message, AIMessage):
|
||||
return {
|
||||
"role": "assistant",
|
||||
"content": ContentFormatterBase.escape_special_characters(content),
|
||||
"content": ContentFormatterBase.escape_special_characters(
|
||||
message.content
|
||||
),
|
||||
}
|
||||
elif isinstance(message, SystemMessage):
|
||||
return {
|
||||
"role": "system",
|
||||
"content": ContentFormatterBase.escape_special_characters(content),
|
||||
"content": ContentFormatterBase.escape_special_characters(
|
||||
message.content
|
||||
),
|
||||
}
|
||||
elif (
|
||||
isinstance(message, ChatMessage)
|
||||
@@ -45,7 +50,9 @@ class LlamaContentFormatter(ContentFormatterBase):
|
||||
):
|
||||
return {
|
||||
"role": message.role,
|
||||
"content": ContentFormatterBase.escape_special_characters(content),
|
||||
"content": ContentFormatterBase.escape_special_characters(
|
||||
message.content
|
||||
),
|
||||
}
|
||||
else:
|
||||
supported = ",".join(
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, cast
|
||||
from typing import (
|
||||
Any,
|
||||
AsyncIterator,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
)
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
@@ -203,7 +211,7 @@ class QianfanChatEndpoint(BaseChatModel):
|
||||
for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]:
|
||||
if "system" not in messages_dict:
|
||||
messages_dict["system"] = ""
|
||||
messages_dict["system"] += cast(str, messages[i].content) + "\n"
|
||||
messages_dict["system"] += messages[i].content + "\n"
|
||||
|
||||
return {
|
||||
**messages_dict,
|
||||
|
||||
@@ -634,10 +634,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
else:
|
||||
_stop = list(stop)
|
||||
result = self([HumanMessage(content=text)], stop=_stop, **kwargs)
|
||||
if isinstance(result.content, str):
|
||||
return result.content
|
||||
else:
|
||||
raise ValueError("Cannot use predict when output is not a string.")
|
||||
return result.content
|
||||
|
||||
def predict_messages(
|
||||
self,
|
||||
@@ -662,10 +659,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
result = await self._call_async(
|
||||
[HumanMessage(content=text)], stop=_stop, **kwargs
|
||||
)
|
||||
if isinstance(result.content, str):
|
||||
return result.content
|
||||
else:
|
||||
raise ValueError("Cannot use predict when output is not a string.")
|
||||
return result.content
|
||||
|
||||
async def apredict_messages(
|
||||
self,
|
||||
|
||||
@@ -166,16 +166,6 @@ class ChatCohere(BaseChatModel, BaseCohere):
|
||||
if run_manager:
|
||||
await run_manager.on_llm_new_token(delta)
|
||||
|
||||
def _get_generation_info(self, response: Any) -> Dict[str, Any]:
|
||||
"""Get the generation info from cohere API response."""
|
||||
return {
|
||||
"documents": response.documents,
|
||||
"citations": response.citations,
|
||||
"search_results": response.search_results,
|
||||
"search_queries": response.search_queries,
|
||||
"token_count": response.token_count,
|
||||
}
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
@@ -195,7 +185,7 @@ class ChatCohere(BaseChatModel, BaseCohere):
|
||||
message = AIMessage(content=response.text)
|
||||
generation_info = None
|
||||
if hasattr(response, "documents"):
|
||||
generation_info = self._get_generation_info(response)
|
||||
generation_info = {"documents": response.documents}
|
||||
return ChatResult(
|
||||
generations=[
|
||||
ChatGeneration(message=message, generation_info=generation_info)
|
||||
@@ -221,7 +211,7 @@ class ChatCohere(BaseChatModel, BaseCohere):
|
||||
message = AIMessage(content=response.text)
|
||||
generation_info = None
|
||||
if hasattr(response, "documents"):
|
||||
generation_info = self._get_generation_info(response)
|
||||
generation_info = {"documents": response.documents}
|
||||
return ChatResult(
|
||||
generations=[
|
||||
ChatGeneration(message=message, generation_info=generation_info)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, cast
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
|
||||
from tenacity import (
|
||||
before_sleep_log,
|
||||
@@ -114,7 +114,7 @@ def _messages_to_prompt_dict(
|
||||
if isinstance(input_message, SystemMessage):
|
||||
if index != 0:
|
||||
raise ChatGooglePalmError("System message must be first input message.")
|
||||
context = cast(str, input_message.content)
|
||||
context = input_message.content
|
||||
elif isinstance(input_message, HumanMessage) and input_message.example:
|
||||
if messages:
|
||||
raise ChatGooglePalmError(
|
||||
|
||||
@@ -58,10 +58,7 @@ def _collect_yaml_input(
|
||||
if message is None:
|
||||
return HumanMessage(content="")
|
||||
if stop:
|
||||
if isinstance(message.content, str):
|
||||
message.content = enforce_stop_tokens(message.content, stop)
|
||||
else:
|
||||
raise ValueError("Cannot use when output is not a string.")
|
||||
message.content = enforce_stop_tokens(message.content, stop)
|
||||
return message
|
||||
except yaml.YAMLError:
|
||||
raise ValueError("Invalid YAML string entered.")
|
||||
|
||||
@@ -21,8 +21,8 @@ from langchain.adapters.openai import convert_dict_to_message, convert_message_t
|
||||
from langchain.callbacks.manager import (
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain.chat_models.base import BaseChatModel, _generate_from_stream
|
||||
from langchain.chat_models.openai import _convert_delta_to_message_chunk
|
||||
from langchain.chat_models.base import _generate_from_stream
|
||||
from langchain.chat_models.openai import ChatOpenAI, _convert_delta_to_message_chunk
|
||||
from langchain.pydantic_v1 import Field, root_validator
|
||||
from langchain.schema import ChatGeneration, ChatResult
|
||||
from langchain.schema.messages import AIMessageChunk, BaseMessage
|
||||
@@ -35,7 +35,7 @@ DEFAULT_MODEL = "meta-llama/Llama-2-13b-chat-hf"
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatKonko(BaseChatModel):
|
||||
class ChatKonko(ChatOpenAI):
|
||||
"""`ChatKonko` Chat large language models API.
|
||||
|
||||
To use, you should have the ``konko`` python package installed, and the
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Wrapper around Minimax chat models."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional, cast
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
@@ -27,11 +27,10 @@ def _parse_chat_history(history: List[BaseMessage]) -> List:
|
||||
"""Parse a sequence of messages into history."""
|
||||
chat_history = []
|
||||
for message in history:
|
||||
content = cast(str, message.content)
|
||||
if isinstance(message, HumanMessage):
|
||||
chat_history.append(_parse_message("USER", content))
|
||||
chat_history.append(_parse_message("USER", message.content))
|
||||
if isinstance(message, AIMessage):
|
||||
chat_history.append(_parse_message("BOT", content))
|
||||
chat_history.append(_parse_message("BOT", message.content))
|
||||
return chat_history
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -42,18 +41,12 @@ from langchain.schema.messages import (
|
||||
FunctionMessageChunk,
|
||||
HumanMessageChunk,
|
||||
SystemMessageChunk,
|
||||
ToolMessageChunk,
|
||||
)
|
||||
from langchain.schema.output import ChatGenerationChunk
|
||||
from langchain.schema.runnable import Runnable
|
||||
from langchain.utils import (
|
||||
get_from_dict_or_env,
|
||||
get_pydantic_field_names,
|
||||
)
|
||||
from langchain.utils.openai import is_openai_v1
|
||||
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import httpx
|
||||
import tiktoken
|
||||
|
||||
|
||||
@@ -98,9 +91,6 @@ async def acompletion_with_retry(
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Use tenacity to retry the async completion call."""
|
||||
if is_openai_v1():
|
||||
return await llm.async_client.create(**kwargs)
|
||||
|
||||
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
|
||||
|
||||
@retry_decorator
|
||||
@@ -116,14 +106,10 @@ def _convert_delta_to_message_chunk(
|
||||
) -> BaseMessageChunk:
|
||||
role = _dict.get("role")
|
||||
content = _dict.get("content") or ""
|
||||
additional_kwargs: Dict = {}
|
||||
if _dict.get("function_call"):
|
||||
function_call = dict(_dict["function_call"])
|
||||
if "name" in function_call and function_call["name"] is None:
|
||||
function_call["name"] = ""
|
||||
additional_kwargs["function_call"] = function_call
|
||||
if _dict.get("tool_calls"):
|
||||
additional_kwargs["tool_calls"] = _dict["tool_calls"]
|
||||
additional_kwargs = {"function_call": dict(_dict["function_call"])}
|
||||
else:
|
||||
additional_kwargs = {}
|
||||
|
||||
if role == "user" or default_class == HumanMessageChunk:
|
||||
return HumanMessageChunk(content=content)
|
||||
@@ -133,8 +119,6 @@ def _convert_delta_to_message_chunk(
|
||||
return SystemMessageChunk(content=content)
|
||||
elif role == "function" or default_class == FunctionMessageChunk:
|
||||
return FunctionMessageChunk(content=content, name=_dict["name"])
|
||||
elif role == "tool" or default_class == ToolMessageChunk:
|
||||
return ToolMessageChunk(content=content, tool_call_id=_dict["tool_call_id"])
|
||||
elif role or default_class == ChatMessageChunk:
|
||||
return ChatMessageChunk(content=content, role=role)
|
||||
else:
|
||||
@@ -165,13 +149,13 @@ class ChatOpenAI(BaseChatModel):
|
||||
def lc_attributes(self) -> Dict[str, Any]:
|
||||
attributes: Dict[str, Any] = {}
|
||||
|
||||
if self.openai_organization:
|
||||
if self.openai_organization != "":
|
||||
attributes["openai_organization"] = self.openai_organization
|
||||
|
||||
if self.openai_api_base:
|
||||
if self.openai_api_base != "":
|
||||
attributes["openai_api_base"] = self.openai_api_base
|
||||
|
||||
if self.openai_proxy:
|
||||
if self.openai_proxy != "":
|
||||
attributes["openai_proxy"] = self.openai_proxy
|
||||
|
||||
return attributes
|
||||
@@ -182,30 +166,22 @@ class ChatOpenAI(BaseChatModel):
|
||||
return True
|
||||
|
||||
client: Any = None #: :meta private:
|
||||
async_client: Any = None #: :meta private:
|
||||
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
|
||||
"""Model name to use."""
|
||||
temperature: float = 0.7
|
||||
"""What sampling temperature to use."""
|
||||
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Holds any model parameters valid for `create` call not explicitly specified."""
|
||||
# When updating this to use a SecretStr
|
||||
# Check for classes that derive from this class (as some of them
|
||||
# may assume openai_api_key is a str)
|
||||
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
|
||||
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
|
||||
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
|
||||
"""Base URL path for API requests, leave blank if not using a proxy or service
|
||||
emulator."""
|
||||
openai_organization: Optional[str] = Field(default=None, alias="organization")
|
||||
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
|
||||
openai_api_key: Optional[str] = None
|
||||
"""Base URL path for API requests,
|
||||
leave blank if not using a proxy or service emulator."""
|
||||
openai_api_base: Optional[str] = None
|
||||
openai_organization: Optional[str] = None
|
||||
# to support explicit proxy for OpenAI
|
||||
openai_proxy: Optional[str] = None
|
||||
request_timeout: Union[float, Tuple[float, float], httpx.Timeout, None] = Field(
|
||||
default=None, alias="timeout"
|
||||
)
|
||||
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
|
||||
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
|
||||
max_retries: int = 2
|
||||
max_retries: int = 6
|
||||
"""Maximum number of retries to make when generating."""
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results or not."""
|
||||
@@ -223,11 +199,6 @@ class ChatOpenAI(BaseChatModel):
|
||||
when using one of the many model providers that expose an OpenAI-like
|
||||
API but with different models. In those cases, in order to avoid erroring
|
||||
when tiktoken is called, you can specify a model name to use here."""
|
||||
default_headers: Union[Mapping[str, str], None] = None
|
||||
default_query: Union[Mapping[str, object], None] = None
|
||||
# Configure a custom httpx client. See the
|
||||
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
|
||||
http_client: Union[httpx.Client, None] = None
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
@@ -263,22 +234,20 @@ class ChatOpenAI(BaseChatModel):
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
|
||||
values["openai_api_key"] = get_from_dict_or_env(
|
||||
values, "openai_api_key", "OPENAI_API_KEY"
|
||||
)
|
||||
# Check OPENAI_ORGANIZATION for backwards compatibility.
|
||||
values["openai_organization"] = (
|
||||
values["openai_organization"]
|
||||
or os.getenv("OPENAI_ORG_ID")
|
||||
or os.getenv("OPENAI_ORGANIZATION")
|
||||
values["openai_organization"] = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_organization",
|
||||
"OPENAI_ORGANIZATION",
|
||||
default="",
|
||||
)
|
||||
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
|
||||
"OPENAI_API_BASE"
|
||||
values["openai_api_base"] = get_from_dict_or_env(
|
||||
values,
|
||||
"openai_api_base",
|
||||
"OPENAI_API_BASE",
|
||||
default="",
|
||||
)
|
||||
values["openai_proxy"] = get_from_dict_or_env(
|
||||
values,
|
||||
@@ -290,51 +259,41 @@ class ChatOpenAI(BaseChatModel):
|
||||
import openai
|
||||
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
|
||||
if is_openai_v1():
|
||||
client_params = {
|
||||
"api_key": values["openai_api_key"],
|
||||
"organization": values["openai_organization"],
|
||||
"base_url": values["openai_api_base"],
|
||||
"timeout": values["request_timeout"],
|
||||
"max_retries": values["max_retries"],
|
||||
"default_headers": values["default_headers"],
|
||||
"default_query": values["default_query"],
|
||||
"http_client": values["http_client"],
|
||||
}
|
||||
values["client"] = openai.OpenAI(**client_params).chat.completions
|
||||
values["async_client"] = openai.AsyncOpenAI(
|
||||
**client_params
|
||||
).chat.completions
|
||||
else:
|
||||
try:
|
||||
values["client"] = openai.ChatCompletion
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"`openai` has no `ChatCompletion` attribute, this is likely "
|
||||
"due to an old version of the openai package. Try upgrading it "
|
||||
"with `pip install --upgrade openai`."
|
||||
)
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling OpenAI API."""
|
||||
params = {
|
||||
return {
|
||||
"model": self.model_name,
|
||||
"request_timeout": self.request_timeout,
|
||||
"max_tokens": self.max_tokens,
|
||||
"stream": self.streaming,
|
||||
"n": self.n,
|
||||
"temperature": self.temperature,
|
||||
**self.model_kwargs,
|
||||
}
|
||||
if self.max_tokens is not None:
|
||||
params["max_tokens"] = self.max_tokens
|
||||
return params
|
||||
|
||||
def completion_with_retry(
|
||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Use tenacity to retry the completion call."""
|
||||
if is_openai_v1():
|
||||
return self.client.create(**kwargs)
|
||||
|
||||
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
|
||||
|
||||
@retry_decorator
|
||||
@@ -345,7 +304,6 @@ class ChatOpenAI(BaseChatModel):
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
overall_token_usage: dict = {}
|
||||
system_fingerprint = None
|
||||
for output in llm_outputs:
|
||||
if output is None:
|
||||
# Happens in streaming
|
||||
@@ -356,12 +314,7 @@ class ChatOpenAI(BaseChatModel):
|
||||
overall_token_usage[k] += v
|
||||
else:
|
||||
overall_token_usage[k] = v
|
||||
if system_fingerprint is None:
|
||||
system_fingerprint = output.get("system_fingerprint")
|
||||
combined = {"token_usage": overall_token_usage, "model_name": self.model_name}
|
||||
if system_fingerprint:
|
||||
combined["system_fingerprint"] = system_fingerprint
|
||||
return combined
|
||||
return {"token_usage": overall_token_usage, "model_name": self.model_name}
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
@@ -377,8 +330,6 @@ class ChatOpenAI(BaseChatModel):
|
||||
for chunk in self.completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
):
|
||||
if not isinstance(chunk, dict):
|
||||
chunk = chunk.dict()
|
||||
if len(chunk["choices"]) == 0:
|
||||
continue
|
||||
choice = chunk["choices"][0]
|
||||
@@ -427,10 +378,8 @@ class ChatOpenAI(BaseChatModel):
|
||||
message_dicts = [convert_message_to_dict(m) for m in messages]
|
||||
return message_dicts, params
|
||||
|
||||
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
|
||||
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
|
||||
generations = []
|
||||
if not isinstance(response, dict):
|
||||
response = response.dict()
|
||||
for res in response["choices"]:
|
||||
message = convert_dict_to_message(res["message"])
|
||||
gen = ChatGeneration(
|
||||
@@ -439,11 +388,7 @@ class ChatOpenAI(BaseChatModel):
|
||||
)
|
||||
generations.append(gen)
|
||||
token_usage = response.get("usage", {})
|
||||
llm_output = {
|
||||
"token_usage": token_usage,
|
||||
"model_name": self.model_name,
|
||||
"system_fingerprint": response.get("system_fingerprint", ""),
|
||||
}
|
||||
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
|
||||
return ChatResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
async def _astream(
|
||||
@@ -460,8 +405,6 @@ class ChatOpenAI(BaseChatModel):
|
||||
async for chunk in await acompletion_with_retry(
|
||||
self, messages=message_dicts, run_manager=run_manager, **params
|
||||
):
|
||||
if not isinstance(chunk, dict):
|
||||
chunk = chunk.dict()
|
||||
if len(chunk["choices"]) == 0:
|
||||
continue
|
||||
choice = chunk["choices"][0]
|
||||
@@ -509,16 +452,11 @@ class ChatOpenAI(BaseChatModel):
|
||||
def _client_params(self) -> Dict[str, Any]:
|
||||
"""Get the parameters used for the openai client."""
|
||||
openai_creds: Dict[str, Any] = {
|
||||
"api_key": self.openai_api_key,
|
||||
"api_base": self.openai_api_base,
|
||||
"organization": self.openai_organization,
|
||||
"model": self.model_name,
|
||||
}
|
||||
if not is_openai_v1():
|
||||
openai_creds.update(
|
||||
{
|
||||
"api_key": self.openai_api_key,
|
||||
"api_base": self.openai_api_base,
|
||||
"organization": self.openai_organization,
|
||||
}
|
||||
)
|
||||
if self.openai_proxy:
|
||||
import openai
|
||||
|
||||
@@ -632,6 +570,7 @@ class ChatOpenAI(BaseChatModel):
|
||||
from langchain.chains.openai_functions.base import convert_to_openai_function
|
||||
|
||||
formatted_functions = [convert_to_openai_function(fn) for fn in functions]
|
||||
function_call_ = None
|
||||
if function_call is not None:
|
||||
if len(formatted_functions) != 1:
|
||||
raise ValueError(
|
||||
|
||||
@@ -2,7 +2,7 @@ import asyncio
|
||||
import json
|
||||
import logging
|
||||
from functools import partial
|
||||
from typing import Any, AsyncIterator, Dict, List, Optional, cast
|
||||
from typing import Any, AsyncIterator, Dict, List, Optional
|
||||
|
||||
import requests
|
||||
|
||||
@@ -133,24 +133,23 @@ class PaiEasChatEndpoint(BaseChatModel):
|
||||
|
||||
for message in messages:
|
||||
"""Converts message to a dict according to role"""
|
||||
content = cast(str, message.content)
|
||||
if isinstance(message, HumanMessage):
|
||||
user_content = user_content + [content]
|
||||
user_content = user_content + [message.content]
|
||||
elif isinstance(message, AIMessage):
|
||||
assistant_content = assistant_content + [content]
|
||||
assistant_content = assistant_content + [message.content]
|
||||
elif isinstance(message, SystemMessage):
|
||||
prompt["system_prompt"] = content
|
||||
prompt["system_prompt"] = message.content
|
||||
elif isinstance(message, ChatMessage) and message.role in [
|
||||
"user",
|
||||
"assistant",
|
||||
"system",
|
||||
]:
|
||||
if message.role == "system":
|
||||
prompt["system_prompt"] = content
|
||||
prompt["system_prompt"] = message.content
|
||||
elif message.role == "user":
|
||||
user_content = user_content + [content]
|
||||
user_content = user_content + [message.content]
|
||||
elif message.role == "assistant":
|
||||
assistant_content = assistant_content + [content]
|
||||
assistant_content = assistant_content + [message.content]
|
||||
else:
|
||||
supported = ",".join([role for role in ["user", "assistant", "system"]])
|
||||
raise ValueError(
|
||||
@@ -295,7 +294,7 @@ class PaiEasChatEndpoint(BaseChatModel):
|
||||
# yield text, if any
|
||||
if text:
|
||||
if run_manager:
|
||||
await run_manager.on_llm_new_token(cast(str, content.content))
|
||||
await run_manager.on_llm_new_token(content.content)
|
||||
yield ChatGenerationChunk(message=content)
|
||||
|
||||
# break if stop sequence found
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union, cast
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
@@ -57,9 +57,8 @@ def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory:
|
||||
|
||||
vertex_messages, context = [], None
|
||||
for i, message in enumerate(history):
|
||||
content = cast(str, message.content)
|
||||
if i == 0 and isinstance(message, SystemMessage):
|
||||
context = content
|
||||
context = message.content
|
||||
elif isinstance(message, AIMessage):
|
||||
vertex_message = ChatMessage(content=message.content, author="bot")
|
||||
vertex_messages.append(vertex_message)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Wrapper around YandexGPT chat models."""
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional, Tuple, cast
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
@@ -34,13 +34,12 @@ def _parse_chat_history(history: List[BaseMessage]) -> Tuple[List[Dict[str, str]
|
||||
chat_history = []
|
||||
instruction = ""
|
||||
for message in history:
|
||||
content = cast(str, message.content)
|
||||
if isinstance(message, HumanMessage):
|
||||
chat_history.append(_parse_message("user", content))
|
||||
chat_history.append(_parse_message("user", message.content))
|
||||
if isinstance(message, AIMessage):
|
||||
chat_history.append(_parse_message("assistant", content))
|
||||
chat_history.append(_parse_message("assistant", message.content))
|
||||
if isinstance(message, SystemMessage):
|
||||
instruction = content
|
||||
instruction = message.content
|
||||
return chat_history, instruction
|
||||
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ class BaiduBOSDirectoryLoader(BaseLoader):
|
||||
if response.is_truncated or contents_len < int(str(response.max_keys)):
|
||||
break
|
||||
marker = response.next_marker
|
||||
from langchain.document_loaders.baiducloud_bos_file import BaiduBOSFileLoader
|
||||
from baidu_bos_file import BaiduBOSFileLoader
|
||||
|
||||
for content in contents:
|
||||
if str(content.key).endswith("/"):
|
||||
|
||||
@@ -476,10 +476,8 @@ class AmazonTextractPDFParser(BaseBlobParser):
|
||||
self.textract_features = []
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import amazon-textract-caller or "
|
||||
"amazon-textract-textractor python package. Please install it "
|
||||
"with `pip install amazon-textract-caller` & "
|
||||
"`pip install amazon-textract-textractor`."
|
||||
"Could not import amazon-textract-caller python package. "
|
||||
"Please install it with `pip install amazon-textract-caller`."
|
||||
)
|
||||
|
||||
if not client:
|
||||
|
||||
@@ -19,7 +19,6 @@ from langchain.embeddings.aleph_alpha import (
|
||||
AlephAlphaSymmetricSemanticEmbedding,
|
||||
)
|
||||
from langchain.embeddings.awa import AwaEmbeddings
|
||||
from langchain.embeddings.azure_openai import AzureOpenAIEmbeddings
|
||||
from langchain.embeddings.baidu_qianfan_endpoint import QianfanEmbeddingsEndpoint
|
||||
from langchain.embeddings.bedrock import BedrockEmbeddings
|
||||
from langchain.embeddings.cache import CacheBackedEmbeddings
|
||||
@@ -54,7 +53,6 @@ from langchain.embeddings.mosaicml import MosaicMLInstructorEmbeddings
|
||||
from langchain.embeddings.nlpcloud import NLPCloudEmbeddings
|
||||
from langchain.embeddings.octoai_embeddings import OctoAIEmbeddings
|
||||
from langchain.embeddings.ollama import OllamaEmbeddings
|
||||
from langchain.embeddings.open_clip import OpenCLIPEmbeddings
|
||||
from langchain.embeddings.openai import OpenAIEmbeddings
|
||||
from langchain.embeddings.sagemaker_endpoint import SagemakerEndpointEmbeddings
|
||||
from langchain.embeddings.self_hosted import SelfHostedEmbeddings
|
||||
@@ -73,7 +71,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
__all__ = [
|
||||
"OpenAIEmbeddings",
|
||||
"AzureOpenAIEmbeddings",
|
||||
"CacheBackedEmbeddings",
|
||||
"ClarifaiEmbeddings",
|
||||
"CohereEmbeddings",
|
||||
@@ -120,7 +117,6 @@ __all__ = [
|
||||
"QianfanEmbeddingsEndpoint",
|
||||
"JohnSnowLabsEmbeddings",
|
||||
"VoyageEmbeddings",
|
||||
"OpenCLIPEmbeddings",
|
||||
]
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user