mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-06 01:00:22 +00:00
Compare commits
26 Commits
v0.0.285
...
harrison/s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f00570f50 | ||
|
|
8b5662473f | ||
|
|
65e1606daa | ||
|
|
d09ef9eb52 | ||
|
|
ee3f950a67 | ||
|
|
e0d45e6a09 | ||
|
|
90504fc499 | ||
|
|
40d9191955 | ||
|
|
6ad6bb46c4 | ||
|
|
675d57df50 | ||
|
|
ddd07001f3 | ||
|
|
b3a8fc7cb1 | ||
|
|
62fa2bc518 | ||
|
|
e93240f023 | ||
|
|
7203c97e8f | ||
|
|
4258c23867 | ||
|
|
3e5a143625 | ||
|
|
c902a1545b | ||
|
|
8c0f391815 | ||
|
|
5d8a689d5e | ||
|
|
0a86a70fe7 | ||
|
|
9095dc69ac | ||
|
|
c6b27b3692 | ||
|
|
5a4ce9ef2b | ||
|
|
1b0eebe1e3 | ||
|
|
2423f7f3b4 |
@@ -12,7 +12,7 @@ Output parsers are classes that help structure language model responses. There a
|
||||
|
||||
And then one optional one:
|
||||
|
||||
- "Parse with prompt": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.
|
||||
- "Parse with prompt": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.
|
||||
|
||||
## Get started
|
||||
|
||||
|
||||
203
docs/extras/expression_language/cookbook/agent.ipynb
Normal file
203
docs/extras/expression_language/cookbook/agent.ipynb
Normal file
@@ -0,0 +1,203 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e89f490d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Agents\n",
|
||||
"\n",
|
||||
"You can pass a Runnable into an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "af4381de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import XMLAgent, tool, AgentExecutor\n",
|
||||
"from langchain.chat_models import ChatAnthropic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "24cc8134",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = ChatAnthropic(model=\"claude-2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "67c0b0e4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tool\n",
|
||||
"def search(query: str) -> str:\n",
|
||||
" \"\"\"Search things about current events.\"\"\"\n",
|
||||
" return \"32 degrees\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "7203b101",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool_list = [search]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "b68e756d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get prompt to use\n",
|
||||
"prompt = XMLAgent.get_default_prompt()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "61ab3e9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Logic for going from intermediate steps to a string to pass into model\n",
|
||||
"# This is pretty tied to the prompt\n",
|
||||
"def convert_intermediate_steps(intermediate_steps):\n",
|
||||
" log = \"\"\n",
|
||||
" for action, observation in intermediate_steps:\n",
|
||||
" log += (\n",
|
||||
" f\"<tool>{action.tool}</tool><tool_input>{action.tool_input}\"\n",
|
||||
" f\"</tool_input><observation>{observation}</observation>\"\n",
|
||||
" )\n",
|
||||
" return log\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Logic for converting tools to string to go in prompt\n",
|
||||
"def convert_tools(tools):\n",
|
||||
" return \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in tools])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "260f5988",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Building an agent from a runnable usually involves a few things:\n",
|
||||
"\n",
|
||||
"1. Data processing for the intermediate steps. These need to represented in a way that the language model can recognize them. This should be pretty tightly coupled to the instructions in the prompt\n",
|
||||
"\n",
|
||||
"2. The prompt itself\n",
|
||||
"\n",
|
||||
"3. The model, complete with stop tokens if needed\n",
|
||||
"\n",
|
||||
"4. The output parser - should be in sync with how the prompt specifies things to be formatted."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e92f1d6f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = (\n",
|
||||
" {\n",
|
||||
" \"question\": lambda x: x[\"question\"],\n",
|
||||
" \"intermediate_steps\": lambda x: convert_intermediate_steps(x[\"intermediate_steps\"])\n",
|
||||
" }\n",
|
||||
" | prompt.partial(tools=convert_tools(tool_list))\n",
|
||||
" | model.bind(stop=[\"</tool_input>\", \"</final_answer>\"])\n",
|
||||
" | XMLAgent.get_default_output_parser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "6ce6ec7a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tool_list, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "fb5cb2e3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m <tool>search</tool>\n",
|
||||
"<tool_input>weather in new york\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"<final_answer>The weather in New York is 32 degrees\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'whats the weather in New york?',\n",
|
||||
" 'output': 'The weather in New York is 32 degrees'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.invoke({\"question\": \"whats the weather in New york?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bce86dd8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
310
docs/extras/integrations/callbacks/confident.ipynb
Normal file
310
docs/extras/integrations/callbacks/confident.ipynb
Normal file
@@ -0,0 +1,310 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Confident\n",
|
||||
"\n",
|
||||
">[DeepEval](https://confident-ai.com) package for unit testing LLMs.\n",
|
||||
"> Using Confident, everyone can build robust language models through faster iterations\n",
|
||||
"> using both unit testing and integration testing. We provide support for each step in the iteration\n",
|
||||
"> from synthetic data creation to testing.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this guide we will demonstrate how to test and measure LLMs in performance. We show how you can use our callback to measure performance and how you can define your own metric and log them into our dashboard.\n",
|
||||
"\n",
|
||||
"DeepEval also offers:\n",
|
||||
"- How to generate synthetic data\n",
|
||||
"- How to measure performance\n",
|
||||
"- A dashboard to monitor and review results over time"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Installation and Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install deepeval --upgrade"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Getting API Credentials\n",
|
||||
"\n",
|
||||
"To get the DeepEval API credentials, follow the next steps:\n",
|
||||
"\n",
|
||||
"1. Go to https://app.confident-ai.com\n",
|
||||
"2. Click on \"Organization\"\n",
|
||||
"3. Copy the API Key.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"When you log in, you will also be asked to set the `implementation` name. The implementation name is required to describe the type of implementation. (Think of what you want to call your project. We recommend making it descriptive.)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!deepeval login"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Setup DeepEval\n",
|
||||
"\n",
|
||||
"You can, by default, use the `DeepEvalCallbackHandler` to set up the metrics you want to track. However, this has limited support for metrics at the moment (more to be added soon). It currently supports:\n",
|
||||
"- [Answer Relevancy](https://docs.confident-ai.com/docs/measuring_llm_performance/answer_relevancy)\n",
|
||||
"- [Bias](https://docs.confident-ai.com/docs/measuring_llm_performance/debias)\n",
|
||||
"- [Toxicness](https://docs.confident-ai.com/docs/measuring_llm_performance/non_toxic)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from deepeval.metrics.answer_relevancy import AnswerRelevancy\n",
|
||||
"\n",
|
||||
"# Here we want to make sure the answer is minimally relevant\n",
|
||||
"answer_relevancy_metric = AnswerRelevancy(minimum_score=0.5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Get Started"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use the `DeepEvalCallbackHandler`, we need the `implementation_name`. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.callbacks.confident_callback import DeepEvalCallbackHandler\n",
|
||||
"\n",
|
||||
"deepeval_callback = DeepEvalCallbackHandler(\n",
|
||||
" implementation_name=\"langchainQuickstart\",\n",
|
||||
" metrics=[answer_relevancy_metric]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Scenario 1: Feeding into LLM\n",
|
||||
"\n",
|
||||
"You can then feed it into your LLM with OpenAI."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[Generation(text='\\n\\nQ: What did the fish say when he hit the wall? \\nA: Dam.', generation_info={'finish_reason': 'stop', 'logprobs': None})], [Generation(text='\\n\\nThe Moon \\n\\nThe moon is high in the midnight sky,\\nSparkling like a star above.\\nThe night so peaceful, so serene,\\nFilling up the air with love.\\n\\nEver changing and renewing,\\nA never-ending light of grace.\\nThe moon remains a constant view,\\nA reminder of life’s gentle pace.\\n\\nThrough time and space it guides us on,\\nA never-fading beacon of hope.\\nThe moon shines down on us all,\\nAs it continues to rise and elope.', generation_info={'finish_reason': 'stop', 'logprobs': None})], [Generation(text='\\n\\nQ. What did one magnet say to the other magnet?\\nA. \"I find you very attractive!\"', generation_info={'finish_reason': 'stop', 'logprobs': None})], [Generation(text=\"\\n\\nThe world is charged with the grandeur of God.\\nIt will flame out, like shining from shook foil;\\nIt gathers to a greatness, like the ooze of oil\\nCrushed. Why do men then now not reck his rod?\\n\\nGenerations have trod, have trod, have trod;\\nAnd all is seared with trade; bleared, smeared with toil;\\nAnd wears man's smudge and shares man's smell: the soil\\nIs bare now, nor can foot feel, being shod.\\n\\nAnd for all this, nature is never spent;\\nThere lives the dearest freshness deep down things;\\nAnd though the last lights off the black West went\\nOh, morning, at the brown brink eastward, springs —\\n\\nBecause the Holy Ghost over the bent\\nWorld broods with warm breast and with ah! bright wings.\\n\\n~Gerard Manley Hopkins\", generation_info={'finish_reason': 'stop', 'logprobs': None})], [Generation(text='\\n\\nQ: What did one ocean say to the other ocean?\\nA: Nothing, they just waved.', generation_info={'finish_reason': 'stop', 'logprobs': None})], [Generation(text=\"\\n\\nA poem for you\\n\\nOn a field of green\\n\\nThe sky so blue\\n\\nA gentle breeze, the sun above\\n\\nA beautiful world, for us to love\\n\\nLife is a journey, full of surprise\\n\\nFull of joy and full of surprise\\n\\nBe brave and take small steps\\n\\nThe future will be revealed with depth\\n\\nIn the morning, when dawn arrives\\n\\nA fresh start, no reason to hide\\n\\nSomewhere down the road, there's a heart that beats\\n\\nBelieve in yourself, you'll always succeed.\", generation_info={'finish_reason': 'stop', 'logprobs': None})]], llm_output={'token_usage': {'completion_tokens': 504, 'total_tokens': 528, 'prompt_tokens': 24}, 'model_name': 'text-davinci-003'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"llm = OpenAI(\n",
|
||||
" temperature=0,\n",
|
||||
" callbacks=[deepeval_callback],\n",
|
||||
" verbose=True,\n",
|
||||
" openai_api_key=\"<YOUR_API_KEY>\",\n",
|
||||
")\n",
|
||||
"output = llm.generate(\n",
|
||||
" [\n",
|
||||
" \"What is the best evaluation tool out there? (no bias at all)\",\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can then check the metric if it was successful by calling the `is_successful()` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"answer_relevancy_metric.is_successful()\n",
|
||||
"# returns True/False"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once you have ran that, you should be able to see our dashboard below. \n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Scenario 2: Tracking an LLM in a chain without callbacks\n",
|
||||
"\n",
|
||||
"To track an LLM in a chain without callbacks, you can plug into it at the end.\n",
|
||||
"\n",
|
||||
"We can start by defining a simple chain as shown below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"text_file_url = \"https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt\"\n",
|
||||
"\n",
|
||||
"openai_api_key = \"sk-XXX\"\n",
|
||||
"\n",
|
||||
"with open(\"state_of_the_union.txt\", \"w\") as f:\n",
|
||||
" response = requests.get(text_file_url)\n",
|
||||
" f.write(response.text)\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)\n",
|
||||
"docsearch = Chroma.from_documents(texts, embeddings)\n",
|
||||
"\n",
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=OpenAI(openai_api_key=openai_api_key), chain_type=\"stuff\",\n",
|
||||
" retriever=docsearch.as_retriever()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Providing a new question-answering pipeline\n",
|
||||
"query = \"Who is the president?\"\n",
|
||||
"result = qa.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After defining a chain, you can then manually check for answer similarity."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"answer_relevancy_metric.measure(result, query)\n",
|
||||
"answer_relevancy_metric.is_successful()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### What's next?\n",
|
||||
"\n",
|
||||
"You can create your own custom metrics [here](https://docs.confident-ai.com/docs/quickstart/custom-metrics). \n",
|
||||
"\n",
|
||||
"DeepEval also offers other features such as being able to [automatically create unit tests](https://docs.confident-ai.com/docs/quickstart/synthetic-data-creation), [tests for hallucination](https://docs.confident-ai.com/docs/measuring_llm_performance/factual_consistency).\n",
|
||||
"\n",
|
||||
"If you are interested, check out our Github repository here [https://github.com/confident-ai/deepeval](https://github.com/confident-ai/deepeval). We welcome any PRs and discussions on how to improve LLM performance."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a53ebf4a859167383b364e7e7521d0add3c2dbbdecce4edf676e8c4634ff3fbb"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,19 +1,23 @@
|
||||
# LLMonitor
|
||||
|
||||
[LLMonitor](https://llmonitor.com) is an open-source observability platform that provides cost tracking, user tracking and powerful agent tracing.
|
||||
[LLMonitor](https://llmonitor.com?utm_source=langchain&utm_medium=py&utm_campaign=docs) is an open-source observability platform that provides cost and usage analytics, user tracking, tracing and evaluation tools.
|
||||
|
||||
<video controls width='100%' >
|
||||
<source src='https://llmonitor.com/videos/demo-annotated.mp4'/>
|
||||
</video>
|
||||
|
||||
## Setup
|
||||
Create an account on [llmonitor.com](https://llmonitor.com), create an `App`, and then copy the associated `tracking id`.
|
||||
|
||||
Create an account on [llmonitor.com](https://llmonitor.com?utm_source=langchain&utm_medium=py&utm_campaign=docs), then copy your new app's `tracking id`.
|
||||
|
||||
Once you have it, set it as an environment variable by running:
|
||||
|
||||
```bash
|
||||
export LLMONITOR_APP_ID="..."
|
||||
```
|
||||
|
||||
If you'd prefer not to set an environment variable, you can pass the key directly when initializing the callback handler:
|
||||
|
||||
```python
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
@@ -21,12 +25,13 @@ handler = LLMonitorCallbackHandler(app_id="...")
|
||||
```
|
||||
|
||||
## Usage with LLM/Chat models
|
||||
|
||||
```python
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
handler = LLMonitorCallbackHandler(app_id="...")
|
||||
handler = LLMonitorCallbackHandler()
|
||||
|
||||
llm = OpenAI(
|
||||
callbacks=[handler],
|
||||
@@ -38,26 +43,63 @@ chat = ChatOpenAI(
|
||||
)
|
||||
```
|
||||
|
||||
## Usage with chains and agents
|
||||
|
||||
Make sure to pass the callback handler to the `run` method so that all related chains and llm calls are correctly tracked.
|
||||
|
||||
It is also recommended to pass `agent_name` in the metadata to be able to distinguish between agents in the dashboard.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.schema import SystemMessage, HumanMessage
|
||||
from langchain.agents import OpenAIFunctionsAgent, AgentExecutor, tool
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
llm = ChatOpenAI(temperature=0)
|
||||
|
||||
handler = LLMonitorCallbackHandler()
|
||||
|
||||
@tool
|
||||
def get_word_length(word: str) -> int:
|
||||
"""Returns the length of a word."""
|
||||
return len(word)
|
||||
|
||||
tools = [get_word_length]
|
||||
|
||||
prompt = OpenAIFunctionsAgent.create_prompt(
|
||||
system_message=SystemMessage(
|
||||
content="You are very powerful assistant, but bad at calculating lengths of words."
|
||||
)
|
||||
)
|
||||
|
||||
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt, verbose=True)
|
||||
agent_executor = AgentExecutor(
|
||||
agent=agent, tools=tools, verbose=True, metadata={"agent_name": "WordCount"} # <- recommended, assign a custom name
|
||||
)
|
||||
agent_executor.run("how many letters in the word educa?", callbacks=[handler])
|
||||
```
|
||||
|
||||
Another example:
|
||||
|
||||
## Usage with agents
|
||||
```python
|
||||
from langchain.agents import load_tools, initialize_agent, AgentType
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
handler = LLMonitorCallbackHandler(app_id="...")
|
||||
handler = LLMonitorCallbackHandler()
|
||||
|
||||
llm = OpenAI(temperature=0)
|
||||
tools = load_tools(["serpapi", "llm-math"], llm=llm)
|
||||
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
|
||||
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, metadata={ "agent_name": "GirlfriendAgeFinder" }) # <- recommended, assign a custom name
|
||||
|
||||
agent.run(
|
||||
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?",
|
||||
callbacks=[handler],
|
||||
metadata={
|
||||
"agentName": "Leo DiCaprio's girlfriend", # you can assign a custom agent in the metadata
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
## Support
|
||||
|
||||
For any question or issue with integration you can reach out to the LLMonitor team on [Discord](http://discord.com/invite/8PafSG58kK) or via [email](mailto:vince@llmonitor.com).
|
||||
|
||||
164
docs/extras/integrations/chat/konko.ipynb
Normal file
164
docs/extras/integrations/chat/konko.ipynb
Normal file
@@ -0,0 +1,164 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Konko\n",
|
||||
"\n",
|
||||
">[Konko](https://www.konko.ai/) API is a fully managed Web API designed to help application developers:\n",
|
||||
"\n",
|
||||
"Konko API is a fully managed API designed to help application developers:\n",
|
||||
"\n",
|
||||
"1. Select the right LLM(s) for their application\n",
|
||||
"2. Prototype with various open-source and proprietary LLMs\n",
|
||||
"3. Move to production in-line with their security, privacy, throughput, latency SLAs without infrastructure set-up or administration using Konko AI's SOC 2 compliant infrastructure\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `Konko` [models](https://docs.konko.ai/docs/overview)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To run this notebook, you'll need Konko API key. You can request it by messaging support@konko.ai."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatKonko\n",
|
||||
"from langchain.prompts.chat import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
" AIMessagePromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import AIMessage, HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Set API Keys\n",
|
||||
"\n",
|
||||
"<br />\n",
|
||||
"\n",
|
||||
"### Option 1: Set Environment Variables\n",
|
||||
"\n",
|
||||
"1. You can set environment variables for \n",
|
||||
" 1. KONKO_API_KEY (Required)\n",
|
||||
" 2. OPENAI_API_KEY (Optional)\n",
|
||||
"2. In your current shell session, use the export command:\n",
|
||||
"\n",
|
||||
"```shell\n",
|
||||
"export KONKO_API_KEY={your_KONKO_API_KEY_here}\n",
|
||||
"export OPENAI_API_KEY={your_OPENAI_API_KEY_here} #Optional\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Alternatively, you can add the above lines directly to your shell startup script (such as .bashrc or .bash_profile for Bash shell and .zshrc for Zsh shell) to have them set automatically every time a new shell session starts.\n",
|
||||
"\n",
|
||||
"### Option 2: Set API Keys Programmatically\n",
|
||||
"\n",
|
||||
"If you prefer to set your API keys directly within your Python script or Jupyter notebook, you can use the following commands:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"konko.set_api_key('your_KONKO_API_KEY_here') \n",
|
||||
"konko.set_openai_api_key('your_OPENAI_API_KEY_here') # Optional\n",
|
||||
"```\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Calling a model\n",
|
||||
"\n",
|
||||
"Find a model on the [Konko overview page](https://docs.konko.ai/docs/overview)\n",
|
||||
"\n",
|
||||
"For example, for this [LLama 2 model](https://docs.konko.ai/docs/meta-llama-2-13b-chat). The model id would be: `\"meta-llama/Llama-2-13b-chat-hf\"`\n",
|
||||
"\n",
|
||||
"Another way to find the list of models running on the Konko instance is through this [endpoint](https://docs.konko.ai/reference/listmodels).\n",
|
||||
"\n",
|
||||
"From here, we can initialize our model:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatKonko(max_tokens=400, model = 'meta-llama/Llama-2-13b-chat-hf')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" Sure, I'd be happy to explain the Big Bang Theory briefly!\\n\\nThe Big Bang Theory is the leading explanation for the origin and evolution of the universe, based on a vast amount of observational evidence from many fields of science. In essence, the theory posits that the universe began as an infinitely hot and dense point, known as a singularity, around 13.8 billion years ago. This singularity expanded rapidly, and as it did, it cooled and formed subatomic particles, which eventually coalesced into the first atoms, and later into the stars and galaxies we see today.\\n\\nThe theory gets its name from the idea that the universe began in a state of incredibly high energy and temperature, and has been expanding and cooling ever since. This expansion is thought to have been driven by a mysterious force known as dark energy, which is thought to be responsible for the accelerating expansion of the universe.\\n\\nOne of the key predictions of the Big Bang Theory is that the universe should be homogeneous and isotropic on large scales, meaning that it should look the same in all directions and have the same properties everywhere. This prediction has been confirmed by a wealth of observational evidence, including the cosmic microwave background radiation, which is thought to be a remnant of the early universe.\\n\\nOverall, the Big Bang Theory is a well-established and widely accepted explanation for the origins of the universe, and it has been supported by a vast amount of observational evidence from many fields of science.\", additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Explain Big Bang Theory briefly\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"chat(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
240
docs/extras/integrations/llms/ctranslate2.ipynb
Normal file
240
docs/extras/integrations/llms/ctranslate2.ipynb
Normal file
@@ -0,0 +1,240 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# CTranslate2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**CTranslate2** is a C++ and Python library for efficient inference with Transformer models.\n",
|
||||
"\n",
|
||||
"The project implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU.\n",
|
||||
"\n",
|
||||
"Full list of features and supported models is included in the [project's repository](https://opennmt.net/CTranslate2/guides/transformers.html). To start, please check out the official [quickstart guide](https://opennmt.net/CTranslate2/quickstart.html).\n",
|
||||
"\n",
|
||||
"To use, you should have `ctranslate2` python package installed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install ctranslate2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use a Hugging Face model with CTranslate2, it has to be first converted to CTranslate2 format using the `ct2-transformers-converter` command. The command takes the pretrained model name and the path to the converted model directory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Loading checkpoint shards: 100%|██████████████████| 2/2 [00:01<00:00, 1.81it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# converstion can take several minutes\n",
|
||||
"!ct2-transformers-converter --model meta-llama/Llama-2-7b-hf --quantization bfloat16 --output_dir ./llama-2-7b-ct2 --force"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import CTranslate2\n",
|
||||
"\n",
|
||||
"llm = CTranslate2(\n",
|
||||
" # output_dir from above:\n",
|
||||
" model_path=\"./llama-2-7b-ct2\",\n",
|
||||
" tokenizer_name=\"meta-llama/Llama-2-7b-hf\",\n",
|
||||
" device=\"cuda\",\n",
|
||||
" # device_index can be either single int or list or ints,\n",
|
||||
" # indicating the ids of GPUs to use for inference:\n",
|
||||
" device_index=[0,1], \n",
|
||||
" compute_type=\"bfloat16\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Single call"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"He presented me with plausible evidence for the existence of unicorns: 1) they are mentioned in ancient texts; and, more importantly to him (and not so much as a matter that would convince most people), he had seen one.\n",
|
||||
"I was skeptical but I didn't want my friend upset by his belief being dismissed outright without any consideration or argument on its behalf whatsoever - which is why we were having this conversation at all! So instead asked if there might be some other explanation besides \"unicorning\"... maybe it could have been an ostrich? Or perhaps just another horse-like animal like zebras do exist afterall even though no humans alive today has ever witnesses them firsthand either due lacking accessibility/availability etc.. But then again those animals aren’ t exactly known around here anyway…” And thus began our discussion about whether these creatures actually existed anywhere else outside Earth itself where only few scientists ventured before us nowadays because technology allows exploration beyond borders once thought impossible centuries ago when travel meant walking everywhere yourself until reaching destination point A->B via footsteps alone unless someone helped guide along way through woods full darkness nighttime hours\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(\n",
|
||||
" llm(\n",
|
||||
" \"He presented me with plausible evidence for the existence of unicorns: \",\n",
|
||||
" max_length=256,\n",
|
||||
" sampling_topk=50,\n",
|
||||
" sampling_temperature=0.2,\n",
|
||||
" repetition_penalty=2,\n",
|
||||
" cache_static_prompt=False,\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multiple calls:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"generations=[[Generation(text='The list of top romantic songs:\\n1. “I Will Always Love You” by Whitney Houston\\n2. “Can’t Help Falling in Love” by Elvis Presley\\n3. “Unchained Melody” by The Righteous Brothers\\n4. “I Will Always Love You” by Dolly Parton\\n5. “I Will Always Love You” by Whitney Houston\\n6. “I Will Always Love You” by Dolly Parton\\n7. “I Will Always Love You” by The Beatles\\n8. “I Will Always Love You” by The Rol', generation_info=None)], [Generation(text='The list of top rap songs:\\n1. “God’s Plan” by Drake\\n2. “Rockstar” by Post Malone\\n3. “Bad and Boujee” by Migos\\n4. “Humble” by Kendrick Lamar\\n5. “Bodak Yellow” by Cardi B\\n6. “I’m the One” by DJ Khaled\\n7. “Motorsport” by Migos\\n8. “No Limit” by G-Eazy\\n9. “Bounce Back” by Big Sean\\n10. “', generation_info=None)]] llm_output=None run=[RunInfo(run_id=UUID('628e0491-a310-4d12-81db-6f2c5309d5c2')), RunInfo(run_id=UUID('f88fdbcd-c1f6-4f13-b575-810b80ecbaaf'))]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(\n",
|
||||
" llm.generate(\n",
|
||||
" [\"The list of top romantic songs:\\n1.\", \"The list of top rap songs:\\n1.\"],\n",
|
||||
" max_length=128\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Integrate the model in an LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Who was the US president in the year the first Pokemon game was released?\n",
|
||||
"\n",
|
||||
"Let's think step by step. 1996 was the year the first Pokemon game was released.\n",
|
||||
"\n",
|
||||
"\\begin{blockquote}\n",
|
||||
"\n",
|
||||
"\\begin{itemize}\n",
|
||||
" \\item 1996 was the year Bill Clinton was president.\n",
|
||||
" \\item 1996 was the year the first Pokemon game was released.\n",
|
||||
" \\item 1996 was the year the first Pokemon game was released.\n",
|
||||
"\n",
|
||||
"\\end{itemize}\n",
|
||||
"\\end{blockquote}\n",
|
||||
"\n",
|
||||
"I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n",
|
||||
"Comment: @JoeZ. I'm not sure if this is a valid question, but I'm sure it's a fun one.\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate, LLMChain\n",
|
||||
"\n",
|
||||
"template = \"\"\"{question}\n",
|
||||
"\n",
|
||||
"Let's think step by step. \"\"\"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
"question = \"Who was the US president in the year the first Pokemon game was released?\"\n",
|
||||
"\n",
|
||||
"print(llm_chain.run(question))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.10.12 ('langchain_venv': venv)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "d1d3a3c58a58885896c5459933a599607cdbb9917d7e1ad7516c8786c51f2dd2"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
22
docs/extras/integrations/providers/confident.mdx
Normal file
22
docs/extras/integrations/providers/confident.mdx
Normal file
@@ -0,0 +1,22 @@
|
||||
# Confident AI
|
||||
|
||||

|
||||
|
||||
>[DeepEval](https://confident-ai.com) package for unit testing LLMs.
|
||||
> Using Confident, everyone can build robust language models through faster iterations
|
||||
> using both unit testing and integration testing. We provide support for each step in the iteration
|
||||
> from synthetic data creation to testing.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
First, you'll need to install the `DeepEval` Python package as follows:
|
||||
|
||||
```bash
|
||||
pip install deepeval
|
||||
```
|
||||
|
||||
Afterwards, you can get started in as little as a few lines of code.
|
||||
|
||||
```python
|
||||
from langchain.callbacks import DeepEvalCallback
|
||||
```
|
||||
80
docs/extras/integrations/providers/konko.mdx
Normal file
80
docs/extras/integrations/providers/konko.mdx
Normal file
@@ -0,0 +1,80 @@
|
||||
# Konko
|
||||
This page covers how to run models on Konko within LangChain.
|
||||
|
||||
Konko API is a fully managed API designed to help application developers:
|
||||
|
||||
Select the right LLM(s) for their application
|
||||
Prototype with various open-source and proprietary LLMs
|
||||
Move to production in-line with their security, privacy, throughput, latency SLAs without infrastructure set-up or administration using Konko AI's SOC 2 compliant infrastructure
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
### First you'll need an API key
|
||||
You can request it by messaging [support@konko.ai](mailto:support@konko.ai)
|
||||
|
||||
### Install Konko AI's Python SDK
|
||||
|
||||
#### 1. Enable a Python3.8+ environment
|
||||
|
||||
#### 2. Set API Keys
|
||||
|
||||
##### Option 1: Set Environment Variables
|
||||
|
||||
1. You can set environment variables for
|
||||
1. KONKO_API_KEY (Required)
|
||||
2. OPENAI_API_KEY (Optional)
|
||||
|
||||
2. In your current shell session, use the export command:
|
||||
|
||||
```shell
|
||||
export KONKO_API_KEY={your_KONKO_API_KEY_here}
|
||||
export OPENAI_API_KEY={your_OPENAI_API_KEY_here} #Optional
|
||||
```
|
||||
|
||||
Alternatively, you can add the above lines directly to your shell startup script (such as .bashrc or .bash_profile for Bash shell and .zshrc for Zsh shell) to have them set automatically every time a new shell session starts.
|
||||
|
||||
##### Option 2: Set API Keys Programmatically
|
||||
|
||||
If you prefer to set your API keys directly within your Python script or Jupyter notebook, you can use the following commands:
|
||||
|
||||
```python
|
||||
konko.set_api_key('your_KONKO_API_KEY_here')
|
||||
konko.set_openai_api_key('your_OPENAI_API_KEY_here') # Optional
|
||||
```
|
||||
|
||||
#### 3. Install the SDK
|
||||
|
||||
|
||||
```shell
|
||||
pip install konko
|
||||
```
|
||||
|
||||
#### 4. Verify Installation & Authentication
|
||||
|
||||
```python
|
||||
#Confirm konko has installed successfully
|
||||
import konko
|
||||
#Confirm API keys from Konko and OpenAI are set properly
|
||||
konko.Model.list()
|
||||
```
|
||||
|
||||
## Calling a model
|
||||
|
||||
Find a model on the [Konko Introduction page](https://docs.konko.ai/docs#available-models)
|
||||
|
||||
For example, for this [LLama 2 model](https://docs.konko.ai/docs/meta-llama-2-13b-chat). The model id would be: `"meta-llama/Llama-2-13b-chat-hf"`
|
||||
|
||||
Another way to find the list of models running on the Konko instance is through this [endpoint](https://docs.konko.ai/reference/listmodels).
|
||||
|
||||
From here, we can initialize our model:
|
||||
|
||||
```python
|
||||
chat_instance = ChatKonko(max_tokens=10, model = 'meta-llama/Llama-2-13b-chat-hf')
|
||||
```
|
||||
|
||||
And run it:
|
||||
|
||||
```python
|
||||
msg = HumanMessage(content="Hi")
|
||||
chat_response = chat_instance([msg])
|
||||
```
|
||||
@@ -24,42 +24,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 60,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Requirement already satisfied: pgvector in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (0.1.8)\n",
|
||||
"Requirement already satisfied: numpy in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from pgvector) (1.24.3)\n",
|
||||
"Requirement already satisfied: openai in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (0.27.7)\n",
|
||||
"Requirement already satisfied: requests>=2.20 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from openai) (2.28.2)\n",
|
||||
"Requirement already satisfied: tqdm in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from openai) (4.65.0)\n",
|
||||
"Requirement already satisfied: aiohttp in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from openai) (3.8.4)\n",
|
||||
"Requirement already satisfied: charset-normalizer<4,>=2 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from requests>=2.20->openai) (3.1.0)\n",
|
||||
"Requirement already satisfied: idna<4,>=2.5 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from requests>=2.20->openai) (3.4)\n",
|
||||
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from requests>=2.20->openai) (1.26.15)\n",
|
||||
"Requirement already satisfied: certifi>=2017.4.17 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from requests>=2.20->openai) (2023.5.7)\n",
|
||||
"Requirement already satisfied: attrs>=17.3.0 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from aiohttp->openai) (23.1.0)\n",
|
||||
"Requirement already satisfied: multidict<7.0,>=4.5 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from aiohttp->openai) (6.0.4)\n",
|
||||
"Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from aiohttp->openai) (4.0.2)\n",
|
||||
"Requirement already satisfied: yarl<2.0,>=1.0 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from aiohttp->openai) (1.9.2)\n",
|
||||
"Requirement already satisfied: frozenlist>=1.1.1 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from aiohttp->openai) (1.3.3)\n",
|
||||
"Requirement already satisfied: aiosignal>=1.1.2 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from aiohttp->openai) (1.3.1)\n",
|
||||
"Requirement already satisfied: psycopg2-binary in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (2.9.6)\n",
|
||||
"Requirement already satisfied: tiktoken in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (0.4.0)\n",
|
||||
"Requirement already satisfied: regex>=2022.1.18 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from tiktoken) (2023.5.5)\n",
|
||||
"Requirement already satisfied: requests>=2.26.0 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from tiktoken) (2.28.2)\n",
|
||||
"Requirement already satisfied: charset-normalizer<4,>=2 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from requests>=2.26.0->tiktoken) (3.1.0)\n",
|
||||
"Requirement already satisfied: idna<4,>=2.5 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from requests>=2.26.0->tiktoken) (3.4)\n",
|
||||
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from requests>=2.26.0->tiktoken) (1.26.15)\n",
|
||||
"Requirement already satisfied: certifi>=2017.4.17 in /Users/joyeed/langchain/langchain/.venv/lib/python3.9/site-packages (from requests>=2.26.0->tiktoken) (2023.5.7)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Pip install necessary package\n",
|
||||
"!pip install pgvector\n",
|
||||
@@ -77,17 +46,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI API Key:········\n"
|
||||
]
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:02:16.802456Z",
|
||||
"start_time": "2023-09-09T08:02:07.065604Z"
|
||||
}
|
||||
],
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
@@ -97,18 +63,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 61,
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
"tags": [],
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:02:19.742896Z",
|
||||
"start_time": "2023-09-09T08:02:19.732527Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"False"
|
||||
]
|
||||
"text/plain": "False"
|
||||
},
|
||||
"execution_count": 61,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -123,9 +91,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
"tags": [],
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:02:23.144824Z",
|
||||
"start_time": "2023-09-09T08:02:22.047801Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -138,8 +110,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:02:25.452472Z",
|
||||
"start_time": "2023-09-09T08:02:25.441563Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||
@@ -152,8 +129,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:02:28.174088Z",
|
||||
"start_time": "2023-09-09T08:02:28.162698Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# PGVector needs the connection string to the database.\n",
|
||||
@@ -174,15 +156,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Similarity Search with Euclidean Distance (Default)"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:04:16.696625Z",
|
||||
"start_time": "2023-09-09T08:02:31.817790Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The PGVector Module will try to create a table with the name of the collection.\n",
|
||||
@@ -200,8 +189,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:05:11.104135Z",
|
||||
"start_time": "2023-09-09T08:05:10.548998Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
@@ -210,15 +204,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:05:13.532334Z",
|
||||
"start_time": "2023-09-09T08:05:13.523191Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.18460171628856903\n",
|
||||
"Score: 0.18456886638850434\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
@@ -228,27 +227,7 @@
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.18460171628856903\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.18470284560586236\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.21730864082247825\n",
|
||||
"Score: 0.21742627672631343\n",
|
||||
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
|
||||
"\n",
|
||||
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n",
|
||||
@@ -260,6 +239,38 @@
|
||||
"We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n",
|
||||
"\n",
|
||||
"We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.22641793174529334\n",
|
||||
"And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n",
|
||||
"\n",
|
||||
"As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n",
|
||||
"\n",
|
||||
"While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n",
|
||||
"\n",
|
||||
"And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n",
|
||||
"\n",
|
||||
"So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \n",
|
||||
"\n",
|
||||
"First, beat the opioid epidemic.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.22670040608054465\n",
|
||||
"Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \n",
|
||||
"\n",
|
||||
"And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. \n",
|
||||
"\n",
|
||||
"That ends on my watch. \n",
|
||||
"\n",
|
||||
"Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \n",
|
||||
"\n",
|
||||
"We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. \n",
|
||||
"\n",
|
||||
"Let’s pass the Paycheck Fairness Act and paid leave. \n",
|
||||
"\n",
|
||||
"Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \n",
|
||||
"\n",
|
||||
"Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges.\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
@@ -272,6 +283,131 @@
|
||||
" print(\"-\" * 80)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Maximal Marginal Relevance Search (MMR)\n",
|
||||
"Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs_with_score = db.max_marginal_relevance_search_with_score(query)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:05:23.276819Z",
|
||||
"start_time": "2023-09-09T08:05:21.972256Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.18453882564037527\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.23523731441720075\n",
|
||||
"We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n",
|
||||
"\n",
|
||||
"I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n",
|
||||
"\n",
|
||||
"They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n",
|
||||
"\n",
|
||||
"Officer Mora was 27 years old. \n",
|
||||
"\n",
|
||||
"Officer Rivera was 22. \n",
|
||||
"\n",
|
||||
"Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n",
|
||||
"\n",
|
||||
"I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n",
|
||||
"\n",
|
||||
"I’ve worked on these issues a long time. \n",
|
||||
"\n",
|
||||
"I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.2448441215698569\n",
|
||||
"One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \n",
|
||||
"\n",
|
||||
"When they came home, many of the world’s fittest and best trained warriors were never the same. \n",
|
||||
"\n",
|
||||
"Headaches. Numbness. Dizziness. \n",
|
||||
"\n",
|
||||
"A cancer that would put them in a flag-draped coffin. \n",
|
||||
"\n",
|
||||
"I know. \n",
|
||||
"\n",
|
||||
"One of those soldiers was my son Major Beau Biden. \n",
|
||||
"\n",
|
||||
"We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \n",
|
||||
"\n",
|
||||
"But I’m committed to finding out everything we can. \n",
|
||||
"\n",
|
||||
"Committed to military families like Danielle Robinson from Ohio. \n",
|
||||
"\n",
|
||||
"The widow of Sergeant First Class Heath Robinson. \n",
|
||||
"\n",
|
||||
"He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \n",
|
||||
"\n",
|
||||
"Stationed near Baghdad, just yards from burn pits the size of football fields. \n",
|
||||
"\n",
|
||||
"Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter.\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Score: 0.2513994424701056\n",
|
||||
"And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n",
|
||||
"\n",
|
||||
"Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n",
|
||||
"\n",
|
||||
"America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n",
|
||||
"\n",
|
||||
"These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n",
|
||||
"\n",
|
||||
"But I want you to know that we are going to be okay. \n",
|
||||
"\n",
|
||||
"When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. \n",
|
||||
"\n",
|
||||
"While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly.\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for doc, score in docs_with_score:\n",
|
||||
" print(\"-\" * 80)\n",
|
||||
" print(\"Score: \", score)\n",
|
||||
" print(doc.page_content)\n",
|
||||
" print(\"-\" * 80)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:05:27.478580Z",
|
||||
"start_time": "2023-09-09T08:05:27.470138Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -10,9 +10,9 @@
|
||||
"\n",
|
||||
"## What is Redis?\n",
|
||||
"\n",
|
||||
"Most developers from a web services background are probably familiar with Redis. At it's core, Redis is an open-source key-value store that can be used as a cache, message broker, and database. Developers choice Redis because it is fast, has a large ecosystem of client libraries, and has been deployed by major enterprises for years.\n",
|
||||
"Most developers from a web services background are probably familiar with Redis. At it's core, Redis is an open-source key-value store that can be used as a cache, message broker, and database. Developers choose Redis because it is fast, has a large ecosystem of client libraries, and has been deployed by major enterprises for years.\n",
|
||||
"\n",
|
||||
"In addition to the traditional uses of Redis. Redis also provides capabilities built directly into Redis. These capabilities include the Search and Query capability that allows users to create secondary index structures within Redis. This allows Redis to be a Vector Database, at the speed of a cache. \n",
|
||||
"On top of these traditional use cases, Redis provides additional capabilities like the Search and Query capability that allows users to create secondary index structures within Redis. This allows Redis to be a Vector Database, at the speed of a cache. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Redis as a Vector Database\n",
|
||||
@@ -123,7 +123,7 @@
|
||||
"source": [
|
||||
"## Install Redis Python Client\n",
|
||||
"\n",
|
||||
"Redis-py is the officially supported client by Redis. Recently released is the RedisVL client which is purpose built for the Vector Database use cases. Both can be installed with pip."
|
||||
"Redis-py is the officially supported client by Redis. Recently released is the RedisVL client which is purpose-built for the Vector Database use cases. Both can be installed with pip."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -153,9 +153,17 @@
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
@@ -215,6 +223,12 @@
|
||||
"source": [
|
||||
"## Initializing Redis\n",
|
||||
"\n",
|
||||
"To locally deploy Redis, run:\n",
|
||||
"```console\n",
|
||||
"docker run -d -p 6379:6379 -p 8001:8001 redis/redis-stack:latest\n",
|
||||
"```\n",
|
||||
"If things are running correctly you should see a nice Redis UI at http://localhost:8001. See the [Deployment Options](#deployment-options) section above for other ways to deploy.\n",
|
||||
"\n",
|
||||
"The Redis VectorStore instance can be initialized in a number of ways. There are multiple class methods that can be used to initialize a Redis VectorStore instance.\n",
|
||||
"\n",
|
||||
"- ``Redis.__init__`` - Initialize directly\n",
|
||||
@@ -223,7 +237,7 @@
|
||||
"- ``Redis.from_texts_return_keys`` - Initialize from a list of texts (optionally with metadata) and return the keys\n",
|
||||
"- ``Redis.from_existing_index`` - Initialize from an existing Redis index\n",
|
||||
"\n",
|
||||
"Below we will use the ``Redis.from_documents`` method."
|
||||
"Below we will use the ``Redis.from_texts`` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -234,28 +248,12 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores.redis import Redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you're not interested in the keys of your entries you can also create your redis instance from the documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain.vectorstores.redis import Redis\n",
|
||||
"\n",
|
||||
"documents = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadata)]\n",
|
||||
"rds = Redis.from_documents(\n",
|
||||
" documents,\n",
|
||||
"rds = Redis.from_texts(\n",
|
||||
" texts,\n",
|
||||
" embeddings,\n",
|
||||
" metadatas=metadats,\n",
|
||||
" redis_url=\"redis://localhost:6379\",\n",
|
||||
" index_name=\"users\"\n",
|
||||
")"
|
||||
@@ -413,7 +411,8 @@
|
||||
"- ``similarity_search``: Find the most similar vectors to a given vector.\n",
|
||||
"- ``similarity_search_with_score``: Find the most similar vectors to a given vector and return the vector distance\n",
|
||||
"- ``similarity_search_limit_score``: Find the most similar vectors to a given vector and limit the number of results to the ``score_threshold``\n",
|
||||
"- ``similarity_search_with_relevance_scores``: Find the most similar vectors to a given vector and return the vector similarities"
|
||||
"- ``similarity_search_with_relevance_scores``: Find the most similar vectors to a given vector and return the vector similarities\n",
|
||||
"- ``max_marginal_relevance_search``: Find the most similar vectors to a given vector while also optimizing for diversity"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -453,7 +452,7 @@
|
||||
"results = rds.similarity_search(\"foo\", k=3)\n",
|
||||
"meta = results[1].metadata\n",
|
||||
"print(\"Key of the document in Redis: \", meta.pop(\"id\"))\n",
|
||||
"print(\"Metadata of the document: \", meta)\n"
|
||||
"print(\"Metadata of the document: \", meta)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -596,6 +595,26 @@
|
||||
"print(results[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use maximal marginal relevance search to diversify results\n",
|
||||
"results = rds.max_marginal_relevance_search(\"foo\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# the lambda_mult parameter controls the diversity of the results, the lower the more diverse\n",
|
||||
"results = rds.max_marginal_relevance_search(\"foo\", lambda_mult=0.1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -1208,7 +1227,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
413
docs/extras/integrations/vectorstores/vearch.ipynb
Normal file
413
docs/extras/integrations/vectorstores/vearch.ipynb
Normal file
@@ -0,0 +1,413 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/export/anaconda3/envs/langchainGLM6B/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n",
|
||||
"INFO 2023-08-28 18:26:07,485-1d: \n",
|
||||
"loading model config\n",
|
||||
"llm device: cuda\n",
|
||||
"embedding device: cuda\n",
|
||||
"dir: /data/zhx/zhx/langchain-ChatGLM_new\n",
|
||||
"flagging username: e2fc35b8e87c4de18d692e951a5f7c46\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Loading checkpoint shards: 100%|██████████| 7/7 [00:06<00:00, 1.01it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"import os, sys, torch\n",
|
||||
"from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel\n",
|
||||
"from langchain import HuggingFacePipeline, ConversationChain\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain.vectorstores.vearch import VearchDb\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n",
|
||||
"\n",
|
||||
"# your local model path\n",
|
||||
"model_path =\"/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b\" \n",
|
||||
"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n",
|
||||
"model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda(0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Human: 你好!\n",
|
||||
"ChatGLM:你好👋!我是人工智能助手 ChatGLM2-6B,很高兴见到你,欢迎问我任何问题。\n",
|
||||
"\n",
|
||||
"Human: 你知道凌波微步吗,你知道都有谁学会了吗?\n",
|
||||
"ChatGLM:凌波微步是一种步伐,最早出自于《倚天屠龙记》。在小说中,灭绝师太曾因与练习凌波微步的杨过的恩怨纠葛,而留下了一部经书,内容是记载凌波微步的起源和作用。后来,凌波微步便成为杨过和小龙女的感情象征。在现实生活中,凌波微步是一句口号,是清华大学学生社团“模型社”的社训。\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"你好!\"\n",
|
||||
"response, history = model.chat(tokenizer, query, history=[])\n",
|
||||
"print(f\"Human: {query}\\nChatGLM:{response}\\n\")\n",
|
||||
"query = \"你知道凌波微步吗,你知道都有谁学会了吗?\"\n",
|
||||
"response, history = model.chat(tokenizer, query, history=history)\n",
|
||||
"print(f\"Human: {query}\\nChatGLM:{response}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO 2023-08-28 18:27:36,037-1d: Load pretrained SentenceTransformer: /data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese\n",
|
||||
"WARNING 2023-08-28 18:27:36,038-1d: No sentence-transformers model found with name /data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese. Creating a new one with MEAN pooling.\n",
|
||||
"INFO 2023-08-28 18:27:38,936-1d: Use pytorch device: cuda\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Add your local knowledge files\n",
|
||||
"file_path = \"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt\"#Your local file path\"\n",
|
||||
"loader = TextLoader(file_path,encoding=\"utf-8\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"# split text into sentences and embedding the sentences\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(\n",
|
||||
" chunk_size=500, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"#your model path\n",
|
||||
"embedding_path = '/data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese'\n",
|
||||
"embeddings = HuggingFaceEmbeddings(model_name=embedding_path)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Batches: 100%|██████████| 1/1 [00:00<00:00, 4.56it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['7aae36236f784105a0004d8ff3c7c3ad', '7e495d4e5962497db2080e84d52e75ed', '9a640124fc324a8abb0eaa31acb638b7']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#first add your document into vearch vectorstore\n",
|
||||
"vearch_db = VearchDb.from_documents(texts,embeddings,table_name=\"your_table_name\",metadata_path=\"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/your_table_name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Batches: 100%|██████████| 1/1 [00:00<00:00, 22.49it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"####################第1段相关文档####################\n",
|
||||
"\n",
|
||||
"午饭过后,段誉又练“凌波微步”,走一步,吸一口气,走第二步时将气呼出,六十四卦走完,四肢全无麻痹之感,料想呼吸顺畅,便无害处。第二次再走时连走两步吸一口气,再走两步始行呼出。这“凌波微步”是以动功修习内功,脚步踏遍六十四卦一个周天,内息自然而然地也转了一个周天。因此他每走一遍,内力便有一分进益。\n",
|
||||
"\n",
|
||||
"这般练了几天,“凌波微步”已走得颇为纯熟,不须再数呼吸,纵然疾行,气息也已无所窒滞。心意既畅,跨步时渐渐想到《洛神赋》中那些与“凌波微步”有关的句子:“仿佛兮若轻云之蔽月,飘飘兮若流风之回雪”,“竦轻躯以鹤立,若将飞而未翔”,“体迅飞凫,飘忽若神”,“动无常则,若危若安。进止难期,若往若还”。\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"百度简介\n",
|
||||
"\n",
|
||||
"凌波微步是「逍遥派」独门轻功身法,精妙异常。\n",
|
||||
"\n",
|
||||
"凌波微步乃是一门极上乘的轻功,所以列于卷轴之末,以易经八八六十四卦为基础,使用者按特定顺序踏着卦象方位行进,从第一步到最后一步正好行走一个大圈。此步法精妙异常,原是要待人练成「北冥神功」,吸人内力,自身内力已【颇为深厚】之后再练。\n",
|
||||
"\n",
|
||||
"####################第2段相关文档####################\n",
|
||||
"\n",
|
||||
"《天龙八部》第五回 微步縠纹生\n",
|
||||
"\n",
|
||||
"卷轴中此外诸种经脉修习之法甚多,皆是取人内力的法门,段誉虽自语宽解,总觉习之有违本性,单是贪多务得,便非好事,当下暂不理会。\n",
|
||||
"\n",
|
||||
"卷到卷轴末端,又见到了“凌波微步”那四字,登时便想起《洛神赋》中那些句子来:“凌波微步,罗袜生尘……转眄流精,光润玉颜。含辞未吐,气若幽兰。华容婀娜,令我忘餐。”曹子建那些千古名句,在脑海中缓缓流过:“秾纤得衷,修短合度,肩若削成,腰如约素。延颈秀项,皓质呈露。芳泽无加,铅华弗御。云髻峨峨,修眉连娟。丹唇外朗,皓齿内鲜。明眸善睐,靥辅承权。瑰姿艳逸,仪静体闲。柔情绰态,媚于语言……”这些句子用在木婉清身上,“这话倒也有理”;但如用之于神仙姊姊,只怕更为适合。想到神仙姊姊的姿容体态,“皎若太阳升朝霞,灼若芙蓉出绿波”,但觉依她吩咐行事,实为人生至乐,心想:“我先来练这‘凌波微步’,此乃逃命之妙法,非害人之手段也,练之有百利而无一害。”\n",
|
||||
"\n",
|
||||
"####################第3段相关文档####################\n",
|
||||
"\n",
|
||||
"《天龙八部》第二回 玉壁月华明\n",
|
||||
"\n",
|
||||
"再展帛卷,长卷上源源皆是裸女画像,或立或卧,或现前胸,或见后背。人像的面容都是一般,但或喜或愁,或含情凝眸,或轻嗔薄怒,神情各异。一共有三十六幅图像,每幅像上均有颜色细线,注明穴道部位及练功法诀。\n",
|
||||
"\n",
|
||||
"帛卷尽处题着“凌波微步”四字,其后绘的是无数足印,注明“妇妹”、“无妄”等等字样,尽是《易经》中的方位。段誉前几日还正全心全意地钻研《易经》,一见到这些名称,登时精神大振,便似遇到故交良友一般。只见足印密密麻麻,不知有几千百个,自一个足印至另一个足印均有绿线贯串,线上绘有箭头,最后写着一行字道:“步法神妙,保身避敌,待积内力,再取敌命。”\n",
|
||||
"\n",
|
||||
"段誉心道:“神仙姊姊所遗的步法,必定精妙之极,遇到强敌时脱身逃走,那就很好,‘再取敌命’也就不必了。”\n",
|
||||
"卷好帛卷,对之作了两个揖,珍而重之地揣入怀中,转身对那玉像道:“神仙姊姊,你吩咐我朝午晚三次练功,段誉不敢有违。今后我对人加倍客气,别人不会来打我,我自然也不会去吸他内力。你这套‘凌波微步’我更要用心练熟,眼见不对,立刻溜之大吉,就吸不到他内力了。”至于“杀尽我逍遥派弟子”一节,却想也不敢去想。\n",
|
||||
"\n",
|
||||
"********ChatGLM:凌波微步是一种轻功身法,属于逍遥派独门轻功。它以《易经》中的六十四卦为基础,按照特定顺序踏着卦象方位行进,从第一步到最后一步正好行走一个大圈。凌波微步精妙异常,可以让人内力相助,自身内力颇为深厚之后再练。《天龙八部》第五回中有描述。\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"res=vearch_db.similarity_search(query, 3)\n",
|
||||
"query = \"你知道凌波微步吗,你知道都有谁会凌波微步?\"\n",
|
||||
"for idx,tmp in enumerate(res): \n",
|
||||
" print(f\"{'#'*20}第{idx+1}段相关文档{'#'*20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
"\n",
|
||||
"# combine your local knowleadge and query \n",
|
||||
"context = \"\".join([tmp.page_content for tmp in res])\n",
|
||||
"new_query = f\"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\\n {context} \\n 回答用户这个问题:{query}\\n\\n\"\n",
|
||||
"response, history = model.chat(tokenizer, new_query, history=[])\n",
|
||||
"print(f\"********ChatGLM:{response}\\n\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Human: 你知道vearch是什么吗?\n",
|
||||
"ChatGLM:是的,我知道 Vearch。Vearch 是一种矩阵分解 technique,用于将矩阵分解为若干个不可约矩阵的乘积。它是由 Linus Torvalds 开发的,旨在提高 Linux 内核中矩阵操作的性能。\n",
|
||||
"\n",
|
||||
"Vearch 可以通过使用特殊的操作来对矩阵进行操作,从而避免了使用昂贵的矩阵操作库。它也被广泛用于其他操作系统中,如 FreeBSD 和 Solaris。\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Batches: 100%|██████████| 1/1 [00:00<00:00, 31.59it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['04bc84fff5074b7b8990441e92e6df07', 'e221906153bb4e03bc7095dadea144de', '126034ba51934093920d8732860f340b']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['04bc84fff5074b7b8990441e92e6df07',\n",
|
||||
" 'e221906153bb4e03bc7095dadea144de',\n",
|
||||
" '126034ba51934093920d8732860f340b']"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"你知道vearch是什么吗?\"\n",
|
||||
"response, history = model.chat(tokenizer, query, history=history)\n",
|
||||
"print(f\"Human: {query}\\nChatGLM:{response}\\n\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"vearch_info = [\"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用\",\n",
|
||||
" \"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库\",\n",
|
||||
" \"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装\"]\n",
|
||||
"vearch_source=[{'source': '/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt'},{'source': '/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt'},{'source': '/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt'}]\n",
|
||||
"vearch_db.add_texts(vearch_info,vearch_source)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Batches: 100%|██████████| 1/1 [00:00<00:00, 25.57it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"####################第1段相关文档####################\n",
|
||||
"\n",
|
||||
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用\n",
|
||||
"\n",
|
||||
"####################第2段相关文档####################\n",
|
||||
"\n",
|
||||
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库\n",
|
||||
"\n",
|
||||
"####################第3段相关文档####################\n",
|
||||
"\n",
|
||||
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装\n",
|
||||
"\n",
|
||||
"***************ChatGLM:是的,Varch是一个向量数据库,旨在存储和快速搜索模型embedding后的向量。它支持OpenAI、Llama和ChatGLM等模型,并可以直接通过pip安装。Varch是一个基于C语言和Go语言开发的项目,并提供了Python接口。\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query3 = \"你知道vearch是什么吗?\"\n",
|
||||
"res1 = vearch_db.similarity_search(query3, 3)\n",
|
||||
"for idx,tmp in enumerate(res1): \n",
|
||||
" print(f\"{'#'*20}第{idx+1}段相关文档{'#'*20}\\n\\n{tmp.page_content}\\n\")\n",
|
||||
"\n",
|
||||
"context1 = \"\".join([tmp.page_content for tmp in res1])\n",
|
||||
"new_query1 = f\"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\\n {context1} \\n 回答用户这个问题:{query3}\\n\\n\"\n",
|
||||
"response, history = model.chat(tokenizer, new_query1, history=[])\n",
|
||||
"\n",
|
||||
"print(f\"***************ChatGLM:{response}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"delete docid True\n",
|
||||
"Human: 你知道vearch是什么吗?\n",
|
||||
"ChatGLM:Vearch是一种高分子化合物,也称为聚合物、高分子材料或合成材料。它是由重复单元组成的大型聚合物,通常由一些重复单元组成,这些单元在聚合过程中结合在一起形成一个连续的高分子链。\n",
|
||||
"\n",
|
||||
"Vearch具有许多独特的性质,例如高强度、高刚性、耐磨、耐腐蚀、耐高温等。它们通常用于制造各种应用,例如塑料制品、橡胶、纤维、建筑材料等。\n",
|
||||
"\n",
|
||||
"after delete docid to query again: {}\n",
|
||||
"get existed docid {'7aae36236f784105a0004d8ff3c7c3ad': Document(page_content='《天龙八部》第二回 玉壁月华明\\n\\n再展帛卷,长卷上源源皆是裸女画像,或立或卧,或现前胸,或见后背。人像的面容都是一般,但或喜或愁,或含情凝眸,或轻嗔薄怒,神情各异。一共有三十六幅图像,每幅像上均有颜色细线,注明穴道部位及练功法诀。\\n\\n帛卷尽处题着“凌波微步”四字,其后绘的是无数足印,注明“妇妹”、“无妄”等等字样,尽是《易经》中的方位。段誉前几日还正全心全意地钻研《易经》,一见到这些名称,登时精神大振,便似遇到故交良友一般。只见足印密密麻麻,不知有几千百个,自一个足印至另一个足印均有绿线贯串,线上绘有箭头,最后写着一行字道:“步法神妙,保身避敌,待积内力,再取敌命。”\\n\\n段誉心道:“神仙姊姊所遗的步法,必定精妙之极,遇到强敌时脱身逃走,那就很好,‘再取敌命’也就不必了。”\\n卷好帛卷,对之作了两个揖,珍而重之地揣入怀中,转身对那玉像道:“神仙姊姊,你吩咐我朝午晚三次练功,段誉不敢有违。今后我对人加倍客气,别人不会来打我,我自然也不会去吸他内力。你这套‘凌波微步’我更要用心练熟,眼见不对,立刻溜之大吉,就吸不到他内力了。”至于“杀尽我逍遥派弟子”一节,却想也不敢去想。', metadata={'source': '/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt'}), '7e495d4e5962497db2080e84d52e75ed': Document(page_content='《天龙八部》第五回 微步縠纹生\\n\\n卷轴中此外诸种经脉修习之法甚多,皆是取人内力的法门,段誉虽自语宽解,总觉习之有违本性,单是贪多务得,便非好事,当下暂不理会。\\n\\n卷到卷轴末端,又见到了“凌波微步”那四字,登时便想起《洛神赋》中那些句子来:“凌波微步,罗袜生尘……转眄流精,光润玉颜。含辞未吐,气若幽兰。华容婀娜,令我忘餐。”曹子建那些千古名句,在脑海中缓缓流过:“秾纤得衷,修短合度,肩若削成,腰如约素。延颈秀项,皓质呈露。芳泽无加,铅华弗御。云髻峨峨,修眉连娟。丹唇外朗,皓齿内鲜。明眸善睐,靥辅承权。瑰姿艳逸,仪静体闲。柔情绰态,媚于语言……”这些句子用在木婉清身上,“这话倒也有理”;但如用之于神仙姊姊,只怕更为适合。想到神仙姊姊的姿容体态,“皎若太阳升朝霞,灼若芙蓉出绿波”,但觉依她吩咐行事,实为人生至乐,心想:“我先来练这‘凌波微步’,此乃逃命之妙法,非害人之手段也,练之有百利而无一害。”', metadata={'source': '/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt'})}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"##delete and get function need to maintian docids \n",
|
||||
"##your docid\n",
|
||||
"res_d=vearch_db.delete(['04bc84fff5074b7b8990441e92e6df07', 'e221906153bb4e03bc7095dadea144de', '126034ba51934093920d8732860f340b'])\n",
|
||||
"print(\"delete docid\",res_d)\n",
|
||||
"query = \"你知道vearch是什么吗?\"\n",
|
||||
"response, history = model.chat(tokenizer, query, history=[])\n",
|
||||
"print(f\"Human: {query}\\nChatGLM:{response}\\n\")\n",
|
||||
"get_id_doc=vearch_db.get(['04bc84fff5074b7b8990441e92e6df07'])\n",
|
||||
"print(\"after delete docid to query again:\",get_id_doc)\n",
|
||||
"get_delet_doc=vearch_db.get(['7aae36236f784105a0004d8ff3c7c3ad', '7e495d4e5962497db2080e84d52e75ed'])\n",
|
||||
"print(\"get existed docid\",get_delet_doc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.10.12 ('langchainGLM6B')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "1fd24e7ef183310e43cbf656d21568350c6a30580b6df7fe3b34654b3770f74d"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -141,7 +141,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -0,0 +1,472 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13afcae7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Redis self-querying \n",
|
||||
"\n",
|
||||
">[Redis](https://redis.com) is an open-source key-value store that can be used as a cache, message broker, database, vector database and more.\n",
|
||||
"\n",
|
||||
"In the notebook we'll demo the `SelfQueryRetriever` wrapped around a Redis vector store. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68e75fb9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating a Redis vector store\n",
|
||||
"First we'll want to create a Redis vector store and seed it with some data. We've created a small demo set of documents that contain summaries of movies.\n",
|
||||
"\n",
|
||||
"**Note:** The self-query retriever requires you to have `lark` installed (`pip install lark`) along with integration-specific requirements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "63a8af5b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install redis redisvl openai tiktoken lark"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "83811610-7df3-4ede-b268-68a6a83ba9e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "dd01b61b-7d32-4a55-85d6-b2d2d4f18840",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb4a5787",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Redis\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "bcbe04d9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n",
|
||||
" metadata={\"year\": 1993, \"rating\": 7.7, \"director\": \"Steven Spielberg\", \"genre\": \"science fiction\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n",
|
||||
" metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"genre\": \"science fiction\", \"rating\": 8.2},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n",
|
||||
" metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"genre\": \"science fiction\", \"rating\": 8.6},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n",
|
||||
" metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"genre\": \"drama\", \"rating\": 8.3},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Toys come alive and have a blast doing so\",\n",
|
||||
" metadata={\"year\": 1995, \"director\": \"John Lasseter\", \"genre\": \"animated\", \"rating\": 9.1,},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Three men walk into the Zone, three men walk out of the Zone\",\n",
|
||||
" metadata={\n",
|
||||
" \"year\": 1979,\n",
|
||||
" \"rating\": 9.9,\n",
|
||||
" \"director\": \"Andrei Tarkovsky\",\n",
|
||||
" \"genre\": \"science fiction\",\n",
|
||||
" },\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "393aff3b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"`index_schema` does not match generated metadata schema.\n",
|
||||
"If you meant to manually override the schema, please ignore this message.\n",
|
||||
"index_schema: {'tag': [{'name': 'genre'}], 'text': [{'name': 'director'}], 'numeric': [{'name': 'year'}, {'name': 'rating'}]}\n",
|
||||
"generated_schema: {'text': [{'name': 'director'}, {'name': 'genre'}], 'numeric': [{'name': 'year'}, {'name': 'rating'}], 'tag': []}\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"index_schema = {\n",
|
||||
" \"tag\": [{\"name\": \"genre\"}],\n",
|
||||
" \"text\": [{\"name\": \"director\"}],\n",
|
||||
" \"numeric\": [{\"name\": \"year\"}, {\"name\": \"rating\"}],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"vectorstore = Redis.from_documents(\n",
|
||||
" docs, \n",
|
||||
" embeddings, \n",
|
||||
" redis_url=\"redis://localhost:6379\",\n",
|
||||
" index_name=\"movie_reviews\",\n",
|
||||
" index_schema=index_schema,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ecaab6d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating our self-querying retriever\n",
|
||||
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "86e34dbf",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain.chains.query_constructor.base import AttributeInfo\n",
|
||||
"\n",
|
||||
"metadata_field_info = [\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"genre\",\n",
|
||||
" description=\"The genre of the movie\",\n",
|
||||
" type=\"string or list[string]\",\n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"year\",\n",
|
||||
" description=\"The year the movie was released\",\n",
|
||||
" type=\"integer\",\n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"director\",\n",
|
||||
" description=\"The name of the movie director\",\n",
|
||||
" type=\"string\",\n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"document_content_description = \"Brief summary of a movie\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ea1126cb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||
" llm, \n",
|
||||
" vectorstore, \n",
|
||||
" document_content_description, \n",
|
||||
" metadata_field_info, \n",
|
||||
" verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea9df8d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Testing it out\n",
|
||||
"And now we can try actually using our retriever!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "38a126e9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/bagatur/langchain/libs/langchain/langchain/chains/llm.py:278: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='dinosaur' filter=None limit=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'id': 'doc:movie_reviews:7b5481d753bc4135851b66fa61def7fb', 'director': 'Steven Spielberg', 'genre': 'science fiction', 'year': '1993', 'rating': '7.7'}),\n",
|
||||
" Document(page_content='Toys come alive and have a blast doing so', metadata={'id': 'doc:movie_reviews:9e4e84daa0374941a6aa4274e9bbb607', 'director': 'John Lasseter', 'genre': 'animated', 'year': '1995', 'rating': '9.1'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'id': 'doc:movie_reviews:2cc66f38bfbd438eb3a045d90a1a4088', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'year': '1979', 'rating': '9.9'}),\n",
|
||||
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'id': 'doc:movie_reviews:edf567b1d5334e02b2a4c692d853c80c', 'director': 'Satoshi Kon', 'genre': 'science fiction', 'year': '2006', 'rating': '8.6'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a relevant query\n",
|
||||
"retriever.get_relevant_documents(\"What are some movies about dinosaurs\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "fc3f1e6e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query=' ' filter=Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.4) limit=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'id': 'doc:movie_reviews:9e4e84daa0374941a6aa4274e9bbb607', 'director': 'John Lasseter', 'genre': 'animated', 'year': '1995', 'rating': '9.1'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'id': 'doc:movie_reviews:2cc66f38bfbd438eb3a045d90a1a4088', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'year': '1979', 'rating': '9.9'}),\n",
|
||||
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'id': 'doc:movie_reviews:edf567b1d5334e02b2a4c692d853c80c', 'director': 'Satoshi Kon', 'genre': 'science fiction', 'year': '2006', 'rating': '8.6'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a filter\n",
|
||||
"retriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "b19d4da0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig') limit=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'id': 'doc:movie_reviews:bb899807b93c442083fd45e75a4779d5', 'director': 'Greta Gerwig', 'genre': 'drama', 'year': '2019', 'rating': '8.3'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a query and a filter\n",
|
||||
"retriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "f900e40e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query=' ' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GTE: 'gte'>, attribute='rating', value=8.5), Comparison(comparator=<Comparator.CONTAIN: 'contain'>, attribute='genre', value='science fiction')]) limit=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'id': 'doc:movie_reviews:2cc66f38bfbd438eb3a045d90a1a4088', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'year': '1979', 'rating': '9.9'}),\n",
|
||||
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'id': 'doc:movie_reviews:edf567b1d5334e02b2a4c692d853c80c', 'director': 'Satoshi Kon', 'genre': 'science fiction', 'year': '2006', 'rating': '8.6'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a composite filter\n",
|
||||
"retriever.get_relevant_documents(\n",
|
||||
" \"What's a highly rated (above 8.5) science fiction film?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "12a51522",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='year', value=1990), Comparison(comparator=<Comparator.LT: 'lt'>, attribute='year', value=2005), Comparison(comparator=<Comparator.CONTAIN: 'contain'>, attribute='genre', value='animated')]) limit=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'id': 'doc:movie_reviews:9e4e84daa0374941a6aa4274e9bbb607', 'director': 'John Lasseter', 'genre': 'animated', 'year': '1995', 'rating': '9.1'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a query and composite filter\n",
|
||||
"retriever.get_relevant_documents(\n",
|
||||
" \"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39bd1de1-b9fe-4a98-89da-58d8a7a6ae51",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filter k\n",
|
||||
"\n",
|
||||
"We can also use the self query retriever to specify `k`: the number of documents to fetch.\n",
|
||||
"\n",
|
||||
"We can do this by passing `enable_limit=True` to the constructor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "bff36b88-b506-4877-9c63-e5a1a8d78e64",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||
" llm,\n",
|
||||
" vectorstore,\n",
|
||||
" document_content_description,\n",
|
||||
" metadata_field_info,\n",
|
||||
" enable_limit=True,\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "2758d229-4f97-499c-819f-888acaf8ee10",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='dinosaur' filter=None limit=2\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'id': 'doc:movie_reviews:7b5481d753bc4135851b66fa61def7fb', 'director': 'Steven Spielberg', 'genre': 'science fiction', 'year': '1993', 'rating': '7.7'}),\n",
|
||||
" Document(page_content='Toys come alive and have a blast doing so', metadata={'id': 'doc:movie_reviews:9e4e84daa0374941a6aa4274e9bbb607', 'director': 'John Lasseter', 'genre': 'animated', 'year': '1995', 'rating': '9.1'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a relevant query\n",
|
||||
"retriever.get_relevant_documents(\"what are two movies about dinosaurs\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
15
docs/integrations/vearch.md
Normal file
15
docs/integrations/vearch.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Vearch
|
||||
|
||||
Vearch is a scalable distributed system for efficient similarity search of deep learning vectors.
|
||||
|
||||
# Installation and Setup
|
||||
|
||||
Vearch Python SDK enables vearch to use locally. Vearch python sdk can be installed easily by pip install vearch.
|
||||
|
||||
# Vectorstore
|
||||
|
||||
Vearch also can used as vectorstore. Most detalis in [this notebook](docs/modules/indexes/vectorstores/examples/vearch.ipynb)
|
||||
|
||||
```python
|
||||
from langchain.vectorstores import Vearch
|
||||
```
|
||||
@@ -7,7 +7,16 @@ import logging
|
||||
import time
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
import yaml
|
||||
|
||||
@@ -36,6 +45,7 @@ from langchain.schema import (
|
||||
)
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.schema.messages import BaseMessage
|
||||
from langchain.schema.runnable import Runnable
|
||||
from langchain.tools.base import BaseTool
|
||||
from langchain.utilities.asyncio import asyncio_timeout
|
||||
from langchain.utils.input import get_color_mapping
|
||||
@@ -307,6 +317,71 @@ class AgentOutputParser(BaseOutputParser):
|
||||
"""Parse text into agent action/finish."""
|
||||
|
||||
|
||||
class RunnableAgent(BaseSingleActionAgent):
|
||||
"""Agent powered by runnables."""
|
||||
|
||||
runnable: Runnable[dict, Union[AgentAction, AgentFinish]]
|
||||
"""Runnable to call to get agent action."""
|
||||
specified_input_keys: List[str] = []
|
||||
"""Input keys."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Return the input keys.
|
||||
|
||||
Returns:
|
||||
List of input keys.
|
||||
"""
|
||||
return self.specified_input_keys
|
||||
|
||||
def plan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with the observations.
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}}
|
||||
output = self.runnable.invoke(inputs, config={"callbacks": callbacks})
|
||||
return output
|
||||
|
||||
async def aplan(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Given input, decided what to do.
|
||||
|
||||
Args:
|
||||
intermediate_steps: Steps the LLM has taken to date,
|
||||
along with observations
|
||||
callbacks: Callbacks to run.
|
||||
**kwargs: User inputs.
|
||||
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}}
|
||||
output = await self.runnable.ainvoke(inputs, config={"callbacks": callbacks})
|
||||
return output
|
||||
|
||||
|
||||
class LLMSingleActionAgent(BaseSingleActionAgent):
|
||||
"""Base class for single action agents."""
|
||||
|
||||
@@ -725,6 +800,14 @@ s
|
||||
)
|
||||
return values
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_runnable_agent(cls, values: Dict) -> Dict:
|
||||
"""Convert runnable to agent if passed in."""
|
||||
agent = values["agent"]
|
||||
if isinstance(agent, Runnable):
|
||||
values["agent"] = RunnableAgent(runnable=agent)
|
||||
return values
|
||||
|
||||
def save(self, file_path: Union[Path, str]) -> None:
|
||||
"""Raise error - saving not supported for Agent Executors."""
|
||||
raise ValueError(
|
||||
|
||||
188
libs/langchain/langchain/callbacks/confident_callback.py
Normal file
188
libs/langchain/langchain/callbacks/confident_callback.py
Normal file
@@ -0,0 +1,188 @@
|
||||
# flake8: noqa
|
||||
import os
|
||||
import warnings
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
|
||||
class DeepEvalCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback Handler that logs into deepeval.
|
||||
|
||||
Args:
|
||||
implementation_name: name of the `implementation` in deepeval
|
||||
metrics: A list of metrics
|
||||
|
||||
Raises:
|
||||
ImportError: if the `deepeval` package is not installed.
|
||||
|
||||
Examples:
|
||||
>>> from langchain.llms import OpenAI
|
||||
>>> from langchain.callbacks import DeepEvalCallbackHandler
|
||||
>>> from deepeval.metrics import AnswerRelevancy
|
||||
>>> metric = AnswerRelevancy(minimum_score=0.3)
|
||||
>>> deepeval_callback = DeepEvalCallbackHandler(
|
||||
... implementation_name="exampleImplementation",
|
||||
... metrics=[metric],
|
||||
... )
|
||||
>>> llm = OpenAI(
|
||||
... temperature=0,
|
||||
... callbacks=[deepeval_callback],
|
||||
... verbose=True,
|
||||
... openai_api_key="API_KEY_HERE",
|
||||
... )
|
||||
>>> llm.generate([
|
||||
... "What is the best evaluation tool out there? (no bias at all)",
|
||||
... ])
|
||||
"Deepeval, no doubt about it."
|
||||
"""
|
||||
|
||||
REPO_URL: str = "https://github.com/confident-ai/deepeval"
|
||||
ISSUES_URL: str = f"{REPO_URL}/issues"
|
||||
BLOG_URL: str = "https://docs.confident-ai.com" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metrics: List[Any],
|
||||
implementation_name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Initializes the `deepevalCallbackHandler`.
|
||||
|
||||
Args:
|
||||
implementation_name: Name of the implementation you want.
|
||||
metrics: What metrics do you want to track?
|
||||
|
||||
Raises:
|
||||
ImportError: if the `deepeval` package is not installed.
|
||||
ConnectionError: if the connection to deepeval fails.
|
||||
"""
|
||||
|
||||
super().__init__()
|
||||
|
||||
# Import deepeval (not via `import_deepeval` to keep hints in IDEs)
|
||||
try:
|
||||
import deepeval # ignore: F401,I001
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"""To use the deepeval callback manager you need to have the
|
||||
`deepeval` Python package installed. Please install it with
|
||||
`pip install deepeval`"""
|
||||
)
|
||||
|
||||
if os.path.exists(".deepeval"):
|
||||
warnings.warn(
|
||||
"""You are currently not logging anything to the dashboard, we
|
||||
recommend using `deepeval login`."""
|
||||
)
|
||||
|
||||
# Set the deepeval variables
|
||||
self.implementation_name = implementation_name
|
||||
self.metrics = metrics
|
||||
|
||||
warnings.warn(
|
||||
(
|
||||
"The `DeepEvalCallbackHandler` is currently in beta and is subject to"
|
||||
" change based on updates to `langchain`. Please report any issues to"
|
||||
f" {self.ISSUES_URL} as an `integration` issue."
|
||||
),
|
||||
)
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
"""Store the prompts"""
|
||||
self.prompts = prompts
|
||||
|
||||
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
||||
"""Do nothing when a new token is generated."""
|
||||
pass
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""Log records to deepeval when an LLM ends."""
|
||||
from deepeval.metrics.answer_relevancy import AnswerRelevancy
|
||||
from deepeval.metrics.bias_classifier import UnBiasedMetric
|
||||
from deepeval.metrics.metric import Metric
|
||||
from deepeval.metrics.toxic_classifier import NonToxicMetric
|
||||
|
||||
for metric in self.metrics:
|
||||
for i, generation in enumerate(response.generations):
|
||||
# Here, we only measure the first generation's output
|
||||
output = generation[0].text
|
||||
query = self.prompts[i]
|
||||
if isinstance(metric, AnswerRelevancy):
|
||||
result = metric.measure(
|
||||
output=output,
|
||||
query=query,
|
||||
)
|
||||
print(f"Answer Relevancy: {result}")
|
||||
elif isinstance(metric, UnBiasedMetric):
|
||||
score = metric.measure(output)
|
||||
print(f"Bias Score: {score}")
|
||||
elif isinstance(metric, NonToxicMetric):
|
||||
score = metric.measure(output)
|
||||
print(f"Toxic Score: {score}")
|
||||
else:
|
||||
raise ValueError(
|
||||
f"""Metric {metric.__name__} is not supported by deepeval
|
||||
callbacks."""
|
||||
)
|
||||
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing when LLM outputs an error."""
|
||||
pass
|
||||
|
||||
def on_chain_start(
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing when chain starts"""
|
||||
pass
|
||||
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Do nothing when chain ends."""
|
||||
pass
|
||||
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing when LLM chain outputs an error."""
|
||||
pass
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Do nothing when tool starts."""
|
||||
pass
|
||||
|
||||
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
||||
"""Do nothing when agent takes a specific action."""
|
||||
pass
|
||||
|
||||
def on_tool_end(
|
||||
self,
|
||||
output: str,
|
||||
observation_prefix: Optional[str] = None,
|
||||
llm_prefix: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Do nothing when tool ends."""
|
||||
pass
|
||||
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing when tool outputs an error."""
|
||||
pass
|
||||
|
||||
def on_text(self, text: str, **kwargs: Any) -> None:
|
||||
"""Do nothing"""
|
||||
pass
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
||||
"""Do nothing"""
|
||||
pass
|
||||
@@ -14,6 +14,70 @@ from langchain.schema.output import LLMResult
|
||||
DEFAULT_API_URL = "https://app.llmonitor.com"
|
||||
|
||||
|
||||
def _serialize(obj: Any) -> Union[Dict[str, Any], List[Any], Any]:
|
||||
if hasattr(obj, "to_json"):
|
||||
return obj.to_json()
|
||||
|
||||
if isinstance(obj, dict):
|
||||
return {key: _serialize(value) for key, value in obj.items()}
|
||||
|
||||
if isinstance(obj, list):
|
||||
return [_serialize(element) for element in obj]
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def _parse_input(raw_input: Any) -> Any:
|
||||
if not raw_input:
|
||||
return None
|
||||
|
||||
if not isinstance(raw_input, dict):
|
||||
return _serialize(raw_input)
|
||||
|
||||
input_value = raw_input.get("input")
|
||||
inputs_value = raw_input.get("inputs")
|
||||
question_value = raw_input.get("question")
|
||||
query_value = raw_input.get("query")
|
||||
|
||||
if input_value:
|
||||
return input_value
|
||||
if inputs_value:
|
||||
return inputs_value
|
||||
if question_value:
|
||||
return question_value
|
||||
if query_value:
|
||||
return query_value
|
||||
|
||||
return _serialize(raw_input)
|
||||
|
||||
|
||||
def _parse_output(raw_output: dict) -> Any:
|
||||
if not raw_output:
|
||||
return None
|
||||
|
||||
if not isinstance(raw_output, dict):
|
||||
return _serialize(raw_output)
|
||||
|
||||
text_value = raw_output.get("text")
|
||||
output_value = raw_output.get("output")
|
||||
output_text_value = raw_output.get("output_text")
|
||||
answer_value = raw_output.get("answer")
|
||||
result_value = raw_output.get("result")
|
||||
|
||||
if text_value:
|
||||
return text_value
|
||||
if answer_value:
|
||||
return answer_value
|
||||
if output_value:
|
||||
return output_value
|
||||
if output_text_value:
|
||||
return output_text_value
|
||||
if result_value:
|
||||
return result_value
|
||||
|
||||
return _serialize(raw_output)
|
||||
|
||||
|
||||
def _parse_lc_role(
|
||||
role: str,
|
||||
) -> Union[Literal["user", "ai", "system", "function"], None]:
|
||||
@@ -29,8 +93,27 @@ def _parse_lc_role(
|
||||
return None
|
||||
|
||||
|
||||
def _serialize_lc_message(message: BaseMessage) -> Dict[str, Any]:
|
||||
return {"text": message.content, "role": _parse_lc_role(message.type)}
|
||||
def _get_user_id(metadata: Any) -> Any:
|
||||
metadata = metadata or {}
|
||||
user_id = metadata.get("user_id")
|
||||
if user_id is None:
|
||||
user_id = metadata.get("userId")
|
||||
return user_id
|
||||
|
||||
|
||||
def _parse_lc_message(message: BaseMessage) -> Dict[str, Any]:
|
||||
parsed = {"text": message.content, "role": _parse_lc_role(message.type)}
|
||||
|
||||
function_call = (message.additional_kwargs or {}).get("function_call")
|
||||
|
||||
if function_call is not None:
|
||||
parsed["functionCall"] = function_call
|
||||
|
||||
return parsed
|
||||
|
||||
|
||||
def _parse_lc_messages(messages: Union[List[BaseMessage], Any]) -> List[Dict[str, Any]]:
|
||||
return [_parse_lc_message(message) for message in messages]
|
||||
|
||||
|
||||
class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
@@ -62,14 +145,20 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
__api_url: str
|
||||
__app_id: str
|
||||
__verbose: bool
|
||||
|
||||
def __init__(
|
||||
self, app_id: Union[str, None] = None, api_url: Union[str, None] = None
|
||||
self,
|
||||
app_id: Union[str, None] = None,
|
||||
api_url: Union[str, None] = None,
|
||||
verbose: bool = False,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.__api_url = api_url or os.getenv("LLMONITOR_API_URL") or DEFAULT_API_URL
|
||||
|
||||
self.__verbose = verbose or bool(os.getenv("LLMONITOR_VERBOSE"))
|
||||
|
||||
_app_id = app_id or os.getenv("LLMONITOR_APP_ID")
|
||||
if _app_id is None:
|
||||
raise ValueError(
|
||||
@@ -89,7 +178,12 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
def __send_event(self, event: Dict[str, Any]) -> None:
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
event = {**event, "app": self.__app_id, "timestamp": str(datetime.utcnow())}
|
||||
|
||||
if self.__verbose:
|
||||
print("llmonitor_callback", event)
|
||||
|
||||
data = {"events": event}
|
||||
requests.post(headers=headers, url=f"{self.__api_url}/api/report", json=data)
|
||||
|
||||
@@ -110,7 +204,7 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
"userId": (metadata or {}).get("userId"),
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"input": prompts[0],
|
||||
"input": _parse_input(prompts),
|
||||
"name": kwargs.get("invocation_params", {}).get("model_name"),
|
||||
"tags": tags,
|
||||
"metadata": metadata,
|
||||
@@ -128,13 +222,15 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
metadata: Union[Dict[str, Any], None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
user_id = _get_user_id(metadata)
|
||||
|
||||
event = {
|
||||
"event": "start",
|
||||
"type": "llm",
|
||||
"userId": (metadata or {}).get("userId"),
|
||||
"userId": user_id,
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"input": [_serialize_lc_message(message[0]) for message in messages],
|
||||
"input": _parse_lc_messages(messages[0]),
|
||||
"name": kwargs.get("invocation_params", {}).get("model_name"),
|
||||
"tags": tags,
|
||||
"metadata": metadata,
|
||||
@@ -151,36 +247,26 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
) -> None:
|
||||
token_usage = (response.llm_output or {}).get("token_usage", {})
|
||||
|
||||
parsed_output = _parse_lc_messages(
|
||||
map(
|
||||
lambda o: o.message if hasattr(o, "message") else None,
|
||||
response.generations[0],
|
||||
)
|
||||
)
|
||||
|
||||
event = {
|
||||
"event": "end",
|
||||
"type": "llm",
|
||||
"runId": str(run_id),
|
||||
"parent_run_id": str(parent_run_id) if parent_run_id else None,
|
||||
"output": {"text": response.generations[0][0].text, "role": "ai"},
|
||||
"output": parsed_output,
|
||||
"tokensUsage": {
|
||||
"prompt": token_usage.get("prompt_tokens", 0),
|
||||
"completion": token_usage.get("completion_tokens", 0),
|
||||
"prompt": token_usage.get("prompt_tokens"),
|
||||
"completion": token_usage.get("completion_tokens"),
|
||||
},
|
||||
}
|
||||
self.__send_event(event)
|
||||
|
||||
def on_llm_error(
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
event = {
|
||||
"event": "error",
|
||||
"type": "llm",
|
||||
"runId": str(run_id),
|
||||
"parent_run_id": str(parent_run_id) if parent_run_id else None,
|
||||
"error": {"message": str(error), "stack": traceback.format_exc()},
|
||||
}
|
||||
self.__send_event(event)
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
@@ -192,10 +278,11 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
metadata: Union[Dict[str, Any], None] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
user_id = _get_user_id(metadata)
|
||||
event = {
|
||||
"event": "start",
|
||||
"type": "tool",
|
||||
"userId": (metadata or {}).get("userId"),
|
||||
"userId": user_id,
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"name": serialized.get("name"),
|
||||
@@ -236,25 +323,34 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
) -> Any:
|
||||
name = serialized.get("id", [None, None, None, None])[3]
|
||||
type = "chain"
|
||||
metadata = metadata or {}
|
||||
|
||||
agentName = metadata.get("agent_name")
|
||||
if agentName is None:
|
||||
agentName = metadata.get("agentName")
|
||||
|
||||
agentName = (metadata or {}).get("agentName")
|
||||
if agentName is not None:
|
||||
type = "agent"
|
||||
name = agentName
|
||||
if name == "AgentExecutor" or name == "PlanAndExecute":
|
||||
type = "agent"
|
||||
|
||||
if parent_run_id is not None:
|
||||
type = "chain"
|
||||
|
||||
user_id = _get_user_id(metadata)
|
||||
|
||||
event = {
|
||||
"event": "start",
|
||||
"type": type,
|
||||
"userId": (metadata or {}).get("userId"),
|
||||
"userId": user_id,
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"input": inputs.get("input", inputs),
|
||||
"input": _parse_input(inputs),
|
||||
"tags": tags,
|
||||
"metadata": metadata,
|
||||
"name": serialized.get("id", [None, None, None, None])[3],
|
||||
"name": name,
|
||||
}
|
||||
|
||||
self.__send_event(event)
|
||||
|
||||
def on_chain_end(
|
||||
@@ -269,7 +365,42 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
"event": "end",
|
||||
"type": "chain",
|
||||
"runId": str(run_id),
|
||||
"output": outputs.get("output", outputs),
|
||||
"output": _parse_output(outputs),
|
||||
}
|
||||
self.__send_event(event)
|
||||
|
||||
def on_agent_action(
|
||||
self,
|
||||
action: AgentAction,
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
event = {
|
||||
"event": "start",
|
||||
"type": "tool",
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"name": action.tool,
|
||||
"input": _parse_input(action.tool_input),
|
||||
}
|
||||
self.__send_event(event)
|
||||
|
||||
def on_agent_finish(
|
||||
self,
|
||||
finish: AgentFinish,
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
event = {
|
||||
"event": "end",
|
||||
"type": "agent",
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"output": _parse_output(finish.return_values),
|
||||
}
|
||||
self.__send_event(event)
|
||||
|
||||
@@ -290,38 +421,37 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
}
|
||||
self.__send_event(event)
|
||||
|
||||
def on_agent_action(
|
||||
def on_tool_error(
|
||||
self,
|
||||
action: AgentAction,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
event = {
|
||||
"event": "start",
|
||||
"event": "error",
|
||||
"type": "tool",
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"name": action.tool,
|
||||
"input": action.tool_input,
|
||||
"parent_run_id": str(parent_run_id) if parent_run_id else None,
|
||||
"error": {"message": str(error), "stack": traceback.format_exc()},
|
||||
}
|
||||
self.__send_event(event)
|
||||
|
||||
def on_agent_finish(
|
||||
def on_llm_error(
|
||||
self,
|
||||
finish: AgentFinish,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
event = {
|
||||
"event": "end",
|
||||
"type": "agent",
|
||||
"event": "error",
|
||||
"type": "llm",
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"output": finish.return_values,
|
||||
"parent_run_id": str(parent_run_id) if parent_run_id else None,
|
||||
"error": {"message": str(error), "stack": traceback.format_exc()},
|
||||
}
|
||||
self.__send_event(event)
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ from langchain.chains.llm_checker.base import LLMCheckerChain
|
||||
from langchain.chains.llm_math.base import LLMMathChain
|
||||
from langchain.chains.llm_requests import LLMRequestsChain
|
||||
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
|
||||
from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA
|
||||
from langchain.llms.loading import load_llm, load_llm_from_config
|
||||
@@ -424,6 +425,30 @@ def _load_retrieval_qa(config: dict, **kwargs: Any) -> RetrievalQA:
|
||||
)
|
||||
|
||||
|
||||
def _load_retrieval_qa_with_sources_chain(
|
||||
config: dict, **kwargs: Any
|
||||
) -> RetrievalQAWithSourcesChain:
|
||||
if "retriever" in kwargs:
|
||||
retriever = kwargs.pop("retriever")
|
||||
else:
|
||||
raise ValueError("`retriever` must be present.")
|
||||
if "combine_documents_chain" in config:
|
||||
combine_documents_chain_config = config.pop("combine_documents_chain")
|
||||
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
|
||||
elif "combine_documents_chain_path" in config:
|
||||
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `combine_documents_chain` or "
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
return RetrievalQAWithSourcesChain(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
retriever=retriever,
|
||||
**config,
|
||||
)
|
||||
|
||||
|
||||
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
|
||||
if "vectorstore" in kwargs:
|
||||
vectorstore = kwargs.pop("vectorstore")
|
||||
@@ -537,6 +562,7 @@ type_to_loader_dict = {
|
||||
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
|
||||
"vector_db_qa": _load_vector_db_qa,
|
||||
"retrieval_qa": _load_retrieval_qa,
|
||||
"retrieval_qa_with_sources_chain": _load_retrieval_qa_with_sources_chain,
|
||||
"graph_cypher_chain": _load_graph_cypher_chain,
|
||||
}
|
||||
|
||||
|
||||
@@ -60,3 +60,8 @@ class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
|
||||
question, callbacks=run_manager.get_child()
|
||||
)
|
||||
return self._reduce_tokens_below_limit(docs)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
"""Return the chain type."""
|
||||
return "retrieval_qa_with_sources_chain"
|
||||
|
||||
@@ -4,13 +4,13 @@ from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Iterator, List, Optional, Union
|
||||
|
||||
from langchain import schema
|
||||
from langchain.chat_loaders import base as chat_loaders
|
||||
from langchain.chat_loaders.base import BaseChatLoader, ChatSession
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import sqlite3
|
||||
|
||||
|
||||
class IMessageChatLoader(chat_loaders.BaseChatLoader):
|
||||
class IMessageChatLoader(BaseChatLoader):
|
||||
"""Load chat sessions from the `iMessage` chat.db SQLite file.
|
||||
|
||||
It only works on macOS when you have iMessage enabled and have the chat.db file.
|
||||
@@ -18,8 +18,8 @@ class IMessageChatLoader(chat_loaders.BaseChatLoader):
|
||||
The chat.db file is likely located at ~/Library/Messages/chat.db. However, your
|
||||
terminal may not have permission to access this file. To resolve this, you can
|
||||
copy the file to a different location, change the permissions of the file, or
|
||||
grant full disk access for your terminal emulator in System Settings > Security
|
||||
and Privacy > Full Disk Access.
|
||||
grant full disk access for your terminal emulator
|
||||
in System Settings > Security and Privacy > Full Disk Access.
|
||||
"""
|
||||
|
||||
def __init__(self, path: Optional[Union[str, Path]] = None):
|
||||
@@ -46,7 +46,7 @@ class IMessageChatLoader(chat_loaders.BaseChatLoader):
|
||||
|
||||
def _load_single_chat_session(
|
||||
self, cursor: "sqlite3.Cursor", chat_id: int
|
||||
) -> chat_loaders.ChatSession:
|
||||
) -> ChatSession:
|
||||
"""
|
||||
Load a single chat session from the iMessage chat.db.
|
||||
|
||||
@@ -83,9 +83,9 @@ class IMessageChatLoader(chat_loaders.BaseChatLoader):
|
||||
)
|
||||
)
|
||||
|
||||
return chat_loaders.ChatSession(messages=results)
|
||||
return ChatSession(messages=results)
|
||||
|
||||
def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:
|
||||
def lazy_load(self) -> Iterator[ChatSession]:
|
||||
"""
|
||||
Lazy load the chat sessions from the iMessage chat.db
|
||||
and yield them in the required format.
|
||||
|
||||
@@ -6,12 +6,12 @@ from pathlib import Path
|
||||
from typing import Dict, Iterator, List, Union
|
||||
|
||||
from langchain import schema
|
||||
from langchain.chat_loaders import base as chat_loaders
|
||||
from langchain.chat_loaders.base import BaseChatLoader, ChatSession
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SlackChatLoader(chat_loaders.BaseChatLoader):
|
||||
class SlackChatLoader(BaseChatLoader):
|
||||
"""Load `Slack` conversations from a dump zip file."""
|
||||
|
||||
def __init__(
|
||||
@@ -27,9 +27,7 @@ class SlackChatLoader(chat_loaders.BaseChatLoader):
|
||||
if not self.zip_path.exists():
|
||||
raise FileNotFoundError(f"File {self.zip_path} not found")
|
||||
|
||||
def _load_single_chat_session(
|
||||
self, messages: List[Dict]
|
||||
) -> chat_loaders.ChatSession:
|
||||
def _load_single_chat_session(self, messages: List[Dict]) -> ChatSession:
|
||||
results: List[Union[schema.AIMessage, schema.HumanMessage]] = []
|
||||
previous_sender = None
|
||||
for message in messages:
|
||||
@@ -62,7 +60,7 @@ class SlackChatLoader(chat_loaders.BaseChatLoader):
|
||||
)
|
||||
)
|
||||
previous_sender = sender
|
||||
return chat_loaders.ChatSession(messages=results)
|
||||
return ChatSession(messages=results)
|
||||
|
||||
def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]:
|
||||
"""Read JSON data from a zip subfile."""
|
||||
@@ -72,7 +70,7 @@ class SlackChatLoader(chat_loaders.BaseChatLoader):
|
||||
raise ValueError(f"Expected list of dictionaries, got {type(data)}")
|
||||
return data
|
||||
|
||||
def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:
|
||||
def lazy_load(self) -> Iterator[ChatSession]:
|
||||
"""
|
||||
Lazy load the chat sessions from the Slack dump file and yield them
|
||||
in the required format.
|
||||
|
||||
@@ -7,12 +7,12 @@ from pathlib import Path
|
||||
from typing import Iterator, List, Union
|
||||
|
||||
from langchain import schema
|
||||
from langchain.chat_loaders import base as chat_loaders
|
||||
from langchain.chat_loaders.base import BaseChatLoader, ChatSession
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TelegramChatLoader(chat_loaders.BaseChatLoader):
|
||||
class TelegramChatLoader(BaseChatLoader):
|
||||
"""Load `telegram` conversations to LangChain chat messages.
|
||||
|
||||
To export, use the Telegram Desktop app from
|
||||
@@ -35,16 +35,14 @@ class TelegramChatLoader(chat_loaders.BaseChatLoader):
|
||||
"""
|
||||
self.path = path if isinstance(path, str) else str(path)
|
||||
|
||||
def _load_single_chat_session_html(
|
||||
self, file_path: str
|
||||
) -> chat_loaders.ChatSession:
|
||||
def _load_single_chat_session_html(self, file_path: str) -> ChatSession:
|
||||
"""Load a single chat session from an HTML file.
|
||||
|
||||
Args:
|
||||
file_path (str): Path to the HTML file.
|
||||
|
||||
Returns:
|
||||
chat_loaders.ChatSession: The loaded chat session.
|
||||
ChatSession: The loaded chat session.
|
||||
"""
|
||||
try:
|
||||
from bs4 import BeautifulSoup
|
||||
@@ -81,18 +79,16 @@ class TelegramChatLoader(chat_loaders.BaseChatLoader):
|
||||
)
|
||||
previous_sender = from_name
|
||||
|
||||
return chat_loaders.ChatSession(messages=results)
|
||||
return ChatSession(messages=results)
|
||||
|
||||
def _load_single_chat_session_json(
|
||||
self, file_path: str
|
||||
) -> chat_loaders.ChatSession:
|
||||
def _load_single_chat_session_json(self, file_path: str) -> ChatSession:
|
||||
"""Load a single chat session from a JSON file.
|
||||
|
||||
Args:
|
||||
file_path (str): Path to the JSON file.
|
||||
|
||||
Returns:
|
||||
chat_loaders.ChatSession: The loaded chat session.
|
||||
ChatSession: The loaded chat session.
|
||||
"""
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
data = json.load(file)
|
||||
@@ -114,7 +110,7 @@ class TelegramChatLoader(chat_loaders.BaseChatLoader):
|
||||
)
|
||||
)
|
||||
|
||||
return chat_loaders.ChatSession(messages=results)
|
||||
return ChatSession(messages=results)
|
||||
|
||||
def _iterate_files(self, path: str) -> Iterator[str]:
|
||||
"""Iterate over files in a directory or zip file.
|
||||
@@ -139,12 +135,12 @@ class TelegramChatLoader(chat_loaders.BaseChatLoader):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
yield zip_file.extract(file, path=temp_dir)
|
||||
|
||||
def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:
|
||||
def lazy_load(self) -> Iterator[ChatSession]:
|
||||
"""Lazy load the messages from the chat file and yield them
|
||||
in as chat sessions.
|
||||
|
||||
Yields:
|
||||
chat_loaders.ChatSession: The loaded chat session.
|
||||
ChatSession: The loaded chat session.
|
||||
"""
|
||||
for file_path in self._iterate_files(self.path):
|
||||
if file_path.endswith(".html"):
|
||||
|
||||
@@ -5,13 +5,13 @@ import zipfile
|
||||
from typing import Iterator, List, Union
|
||||
|
||||
from langchain import schema
|
||||
from langchain.chat_loaders import base as chat_loaders
|
||||
from langchain.chat_loaders.base import BaseChatLoader, ChatSession
|
||||
from langchain.schema import messages
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WhatsAppChatLoader(chat_loaders.BaseChatLoader):
|
||||
class WhatsAppChatLoader(BaseChatLoader):
|
||||
"""Load `WhatsApp` conversations from a dump zip file or directory."""
|
||||
|
||||
def __init__(self, path: str):
|
||||
@@ -42,7 +42,7 @@ class WhatsAppChatLoader(chat_loaders.BaseChatLoader):
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
|
||||
def _load_single_chat_session(self, file_path: str) -> chat_loaders.ChatSession:
|
||||
def _load_single_chat_session(self, file_path: str) -> ChatSession:
|
||||
"""Load a single chat session from a file.
|
||||
|
||||
Args:
|
||||
@@ -84,7 +84,7 @@ class WhatsAppChatLoader(chat_loaders.BaseChatLoader):
|
||||
)
|
||||
else:
|
||||
logger.debug(f"Could not parse line: {line}")
|
||||
return chat_loaders.ChatSession(messages=results)
|
||||
return ChatSession(messages=results)
|
||||
|
||||
def _iterate_files(self, path: str) -> Iterator[str]:
|
||||
"""Iterate over the files in a directory or zip file.
|
||||
@@ -108,7 +108,7 @@ class WhatsAppChatLoader(chat_loaders.BaseChatLoader):
|
||||
if file.endswith(".txt"):
|
||||
yield zip_file.extract(file)
|
||||
|
||||
def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:
|
||||
def lazy_load(self) -> Iterator[ChatSession]:
|
||||
"""Lazy load the messages from the chat file and yield
|
||||
them as chat sessions.
|
||||
|
||||
|
||||
@@ -20,12 +20,12 @@ an interface where "chat messages" are the inputs and outputs.
|
||||
from langchain.chat_models.anthropic import ChatAnthropic
|
||||
from langchain.chat_models.anyscale import ChatAnyscale
|
||||
from langchain.chat_models.azure_openai import AzureChatOpenAI
|
||||
from langchain.chat_models.bedrock import BedrockChat
|
||||
from langchain.chat_models.ernie import ErnieBotChat
|
||||
from langchain.chat_models.fake import FakeListChatModel
|
||||
from langchain.chat_models.google_palm import ChatGooglePalm
|
||||
from langchain.chat_models.human import HumanInputChatModel
|
||||
from langchain.chat_models.jinachat import JinaChat
|
||||
from langchain.chat_models.konko import ChatKonko
|
||||
from langchain.chat_models.litellm import ChatLiteLLM
|
||||
from langchain.chat_models.mlflow_ai_gateway import ChatMLflowAIGateway
|
||||
from langchain.chat_models.ollama import ChatOllama
|
||||
@@ -36,7 +36,6 @@ from langchain.chat_models.vertexai import ChatVertexAI
|
||||
__all__ = [
|
||||
"ChatOpenAI",
|
||||
"AzureChatOpenAI",
|
||||
"BedrockChat",
|
||||
"FakeListChatModel",
|
||||
"PromptLayerChatOpenAI",
|
||||
"ChatAnthropic",
|
||||
@@ -49,4 +48,5 @@ __all__ = [
|
||||
"ChatAnyscale",
|
||||
"ChatLiteLLM",
|
||||
"ErnieBotChat",
|
||||
"ChatKonko",
|
||||
]
|
||||
|
||||
292
libs/langchain/langchain/chat_models/konko.py
Normal file
292
libs/langchain/langchain/chat_models/konko.py
Normal file
@@ -0,0 +1,292 @@
|
||||
"""KonkoAI chat wrapper."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Iterator,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
import requests
|
||||
|
||||
from langchain.adapters.openai import convert_dict_to_message, convert_message_to_dict
|
||||
from langchain.callbacks.manager import (
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain.chat_models.openai import ChatOpenAI, _convert_delta_to_message_chunk
|
||||
from langchain.pydantic_v1 import Field, root_validator
|
||||
from langchain.schema import ChatGeneration, ChatResult
|
||||
from langchain.schema.messages import AIMessageChunk, BaseMessage
|
||||
from langchain.schema.output import ChatGenerationChunk
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
|
||||
DEFAULT_API_BASE = "https://api.konko.ai/v1"
|
||||
DEFAULT_MODEL = "meta-llama/Llama-2-13b-chat-hf"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatKonko(ChatOpenAI):
|
||||
"""`ChatKonko` Chat large language models API.
|
||||
|
||||
To use, you should have the ``konko`` python package installed, and the
|
||||
environment variable ``KONKO_API_KEY`` and ``OPENAI_API_KEY`` set with your API key.
|
||||
|
||||
Any parameters that are valid to be passed to the konko.create call can be passed
|
||||
in, even if not explicitly saved on this class.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.chat_models import ChatKonko
|
||||
llm = ChatKonko(model="meta-llama/Llama-2-13b-chat-hf")
|
||||
"""
|
||||
|
||||
@property
|
||||
def lc_secrets(self) -> Dict[str, str]:
|
||||
return {"konko_api_key": "KONKO_API_KEY", "openai_api_key": "OPENAI_API_KEY"}
|
||||
|
||||
@property
|
||||
def lc_serializable(self) -> bool:
|
||||
return True
|
||||
|
||||
client: Any = None #: :meta private:
|
||||
model: str = Field(default=DEFAULT_MODEL, alias="model")
|
||||
"""Model name to use."""
|
||||
temperature: float = 0.7
|
||||
"""What sampling temperature to use."""
|
||||
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Holds any model parameters valid for `create` call not explicitly specified."""
|
||||
openai_api_key: Optional[str] = None
|
||||
konko_api_key: Optional[str] = None
|
||||
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
|
||||
"""Timeout for requests to Konko completion API."""
|
||||
max_retries: int = 6
|
||||
"""Maximum number of retries to make when generating."""
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results or not."""
|
||||
n: int = 1
|
||||
"""Number of chat completions to generate for each prompt."""
|
||||
max_tokens: int = 20
|
||||
"""Maximum number of tokens to generate."""
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
values["konko_api_key"] = get_from_dict_or_env(
|
||||
values, "konko_api_key", "KONKO_API_KEY"
|
||||
)
|
||||
try:
|
||||
import konko
|
||||
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import konko python package. "
|
||||
"Please install it with `pip install konko`."
|
||||
)
|
||||
try:
|
||||
values["client"] = konko.ChatCompletion
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"`konko` has no `ChatCompletion` attribute, this is likely "
|
||||
"due to an old version of the konko package. Try upgrading it "
|
||||
"with `pip install --upgrade konko`."
|
||||
)
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling Konko API."""
|
||||
return {
|
||||
"model": self.model,
|
||||
"request_timeout": self.request_timeout,
|
||||
"max_tokens": self.max_tokens,
|
||||
"stream": self.streaming,
|
||||
"n": self.n,
|
||||
"temperature": self.temperature,
|
||||
**self.model_kwargs,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def get_available_models(
|
||||
konko_api_key: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None,
|
||||
konko_api_base: str = DEFAULT_API_BASE,
|
||||
) -> Set[str]:
|
||||
"""Get available models from Konko API."""
|
||||
|
||||
# Try to retrieve the OpenAI API key if it's not passed as an argument
|
||||
if not openai_api_key:
|
||||
try:
|
||||
openai_api_key = os.environ["OPENAI_API_KEY"]
|
||||
except KeyError:
|
||||
pass # It's okay if it's not set, we just won't use it
|
||||
|
||||
# Try to retrieve the Konko API key if it's not passed as an argument
|
||||
if not konko_api_key:
|
||||
try:
|
||||
konko_api_key = os.environ["KONKO_API_KEY"]
|
||||
except KeyError:
|
||||
raise ValueError(
|
||||
"Konko API key must be passed as keyword argument or "
|
||||
"set in environment variable KONKO_API_KEY."
|
||||
)
|
||||
|
||||
models_url = f"{konko_api_base}/models"
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {konko_api_key}",
|
||||
}
|
||||
|
||||
if openai_api_key:
|
||||
headers["X-OpenAI-Api-Key"] = openai_api_key
|
||||
|
||||
models_response = requests.get(models_url, headers=headers)
|
||||
|
||||
if models_response.status_code != 200:
|
||||
raise ValueError(
|
||||
f"Error getting models from {models_url}: "
|
||||
f"{models_response.status_code}"
|
||||
)
|
||||
|
||||
return {model["id"] for model in models_response.json()["data"]}
|
||||
|
||||
def completion_with_retry(
|
||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
def _completion_with_retry(**kwargs: Any) -> Any:
|
||||
return self.client.create(**kwargs)
|
||||
|
||||
return _completion_with_retry(**kwargs)
|
||||
|
||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||
overall_token_usage: dict = {}
|
||||
for output in llm_outputs:
|
||||
if output is None:
|
||||
# Happens in streaming
|
||||
continue
|
||||
token_usage = output["token_usage"]
|
||||
for k, v in token_usage.items():
|
||||
if k in overall_token_usage:
|
||||
overall_token_usage[k] += v
|
||||
else:
|
||||
overall_token_usage[k] = v
|
||||
return {"token_usage": overall_token_usage, "model_name": self.model}
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[ChatGenerationChunk]:
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
|
||||
default_chunk_class = AIMessageChunk
|
||||
for chunk in self.completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
):
|
||||
if len(chunk["choices"]) == 0:
|
||||
continue
|
||||
choice = chunk["choices"][0]
|
||||
chunk = _convert_delta_to_message_chunk(
|
||||
choice["delta"], default_chunk_class
|
||||
)
|
||||
finish_reason = choice.get("finish_reason")
|
||||
generation_info = (
|
||||
dict(finish_reason=finish_reason) if finish_reason is not None else None
|
||||
)
|
||||
default_chunk_class = chunk.__class__
|
||||
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(chunk.content, chunk=chunk)
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
stream: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
if stream if stream is not None else self.streaming:
|
||||
generation: Optional[ChatGenerationChunk] = None
|
||||
for chunk in self._stream(
|
||||
messages=messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
):
|
||||
if generation is None:
|
||||
generation = chunk
|
||||
else:
|
||||
generation += chunk
|
||||
assert generation is not None
|
||||
return ChatResult(generations=[generation])
|
||||
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
params = {**params, **kwargs}
|
||||
response = self.completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
)
|
||||
return self._create_chat_result(response)
|
||||
|
||||
def _create_message_dicts(
|
||||
self, messages: List[BaseMessage], stop: Optional[List[str]]
|
||||
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
|
||||
params = self._client_params
|
||||
if stop is not None:
|
||||
if "stop" in params:
|
||||
raise ValueError("`stop` found in both the input and default params.")
|
||||
params["stop"] = stop
|
||||
message_dicts = [convert_message_to_dict(m) for m in messages]
|
||||
return message_dicts, params
|
||||
|
||||
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
|
||||
generations = []
|
||||
for res in response["choices"]:
|
||||
message = convert_dict_to_message(res["message"])
|
||||
gen = ChatGeneration(
|
||||
message=message,
|
||||
generation_info=dict(finish_reason=res.get("finish_reason")),
|
||||
)
|
||||
generations.append(gen)
|
||||
token_usage = response.get("usage", {})
|
||||
llm_output = {"token_usage": token_usage, "model_name": self.model}
|
||||
return ChatResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
"""Get the identifying parameters."""
|
||||
return {**{"model_name": self.model}, **self._default_params}
|
||||
|
||||
@property
|
||||
def _client_params(self) -> Dict[str, Any]:
|
||||
"""Get the parameters used for the konko client."""
|
||||
return {**self._default_params}
|
||||
|
||||
def _get_invocation_params(
|
||||
self, stop: Optional[List[str]] = None, **kwargs: Any
|
||||
) -> Dict[str, Any]:
|
||||
"""Get the parameters used to invoke the model."""
|
||||
return {
|
||||
"model": self.model,
|
||||
**super()._get_invocation_params(stop=stop),
|
||||
**self._default_params,
|
||||
**kwargs,
|
||||
}
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of chat model."""
|
||||
return "konko-chat"
|
||||
@@ -1,5 +1,7 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import threading
|
||||
from functools import partial
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import requests
|
||||
@@ -14,6 +16,7 @@ logger = logging.getLogger(__name__)
|
||||
class ErnieEmbeddings(BaseModel, Embeddings):
|
||||
"""`Ernie Embeddings V1` embedding models."""
|
||||
|
||||
ernie_api_base: Optional[str] = None
|
||||
ernie_client_id: Optional[str] = None
|
||||
ernie_client_secret: Optional[str] = None
|
||||
access_token: Optional[str] = None
|
||||
@@ -26,6 +29,9 @@ class ErnieEmbeddings(BaseModel, Embeddings):
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
values["ernie_api_base"] = get_from_dict_or_env(
|
||||
values, "ernie_api_base", "ERNIE_API_BASE", "https://aip.baidubce.com"
|
||||
)
|
||||
values["ernie_client_id"] = get_from_dict_or_env(
|
||||
values,
|
||||
"ernie_client_id",
|
||||
@@ -40,7 +46,7 @@ class ErnieEmbeddings(BaseModel, Embeddings):
|
||||
|
||||
def _embedding(self, json: object) -> dict:
|
||||
base_url = (
|
||||
"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings"
|
||||
f"{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings"
|
||||
)
|
||||
resp = requests.post(
|
||||
f"{base_url}/embedding-v1",
|
||||
@@ -71,6 +77,15 @@ class ErnieEmbeddings(BaseModel, Embeddings):
|
||||
self.access_token = str(resp.json().get("access_token"))
|
||||
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Embed search docs.
|
||||
|
||||
Args:
|
||||
texts: The list of texts to embed
|
||||
|
||||
Returns:
|
||||
List[List[float]]: List of embeddings, one for each text.
|
||||
"""
|
||||
|
||||
if not self.access_token:
|
||||
self._refresh_access_token_with_lock()
|
||||
text_in_chunks = [
|
||||
@@ -90,6 +105,15 @@ class ErnieEmbeddings(BaseModel, Embeddings):
|
||||
return lst
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
"""Embed query text.
|
||||
|
||||
Args:
|
||||
text: The text to embed.
|
||||
|
||||
Returns:
|
||||
List[float]: Embeddings for the text.
|
||||
"""
|
||||
|
||||
if not self.access_token:
|
||||
self._refresh_access_token_with_lock()
|
||||
resp = self._embedding({"input": [text]})
|
||||
@@ -100,3 +124,31 @@ class ErnieEmbeddings(BaseModel, Embeddings):
|
||||
else:
|
||||
raise ValueError(f"Error from Ernie: {resp}")
|
||||
return resp["data"][0]["embedding"]
|
||||
|
||||
async def aembed_query(self, text: str) -> List[float]:
|
||||
"""Asynchronous Embed query text.
|
||||
|
||||
Args:
|
||||
text: The text to embed.
|
||||
|
||||
Returns:
|
||||
List[float]: Embeddings for the text.
|
||||
"""
|
||||
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
None, partial(self.embed_query, text)
|
||||
)
|
||||
|
||||
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Asynchronous Embed search docs.
|
||||
|
||||
Args:
|
||||
texts: The list of texts to embed
|
||||
|
||||
Returns:
|
||||
List[List[float]]: List of embeddings, one for each text.
|
||||
"""
|
||||
|
||||
result = await asyncio.gather(*[self.aembed_query(text) for text in texts])
|
||||
|
||||
return list(result)
|
||||
|
||||
@@ -37,6 +37,7 @@ from langchain.llms.chatglm import ChatGLM
|
||||
from langchain.llms.clarifai import Clarifai
|
||||
from langchain.llms.cohere import Cohere
|
||||
from langchain.llms.ctransformers import CTransformers
|
||||
from langchain.llms.ctranslate2 import CTranslate2
|
||||
from langchain.llms.databricks import Databricks
|
||||
from langchain.llms.deepinfra import DeepInfra
|
||||
from langchain.llms.deepsparse import DeepSparse
|
||||
@@ -100,6 +101,7 @@ __all__ = [
|
||||
"Beam",
|
||||
"Bedrock",
|
||||
"CTransformers",
|
||||
"CTranslate2",
|
||||
"CerebriumAI",
|
||||
"ChatGLM",
|
||||
"Clarifai",
|
||||
@@ -178,6 +180,7 @@ type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
|
||||
"clarifai": Clarifai,
|
||||
"cohere": Cohere,
|
||||
"ctransformers": CTransformers,
|
||||
"ctranslate2": CTranslate2,
|
||||
"databricks": Databricks,
|
||||
"deepinfra": DeepInfra,
|
||||
"deepsparse": DeepSparse,
|
||||
|
||||
128
libs/langchain/langchain/llms/ctranslate2.py
Normal file
128
libs/langchain/langchain/llms/ctranslate2.py
Normal file
@@ -0,0 +1,128 @@
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.pydantic_v1 import Field, root_validator
|
||||
from langchain.schema.output import Generation, LLMResult
|
||||
|
||||
|
||||
class CTranslate2(BaseLLM):
|
||||
"""CTranslate2 language model."""
|
||||
|
||||
model_path: str = ""
|
||||
"""Path to the CTranslate2 model directory."""
|
||||
|
||||
tokenizer_name: str = ""
|
||||
"""Name of the original Hugging Face model needed to load the proper tokenizer."""
|
||||
|
||||
device: str = "cpu"
|
||||
"""Device to use (possible values are: cpu, cuda, auto)."""
|
||||
|
||||
device_index: Union[int, List[int]] = 0
|
||||
"""Device IDs where to place this generator on."""
|
||||
|
||||
compute_type: Union[str, Dict[str, str]] = "default"
|
||||
"""
|
||||
Model computation type or a dictionary mapping a device name to the computation type
|
||||
(possible values are: default, auto, int8, int8_float32, int8_float16,
|
||||
int8_bfloat16, int16, float16, bfloat16, float32).
|
||||
"""
|
||||
|
||||
max_length: int = 512
|
||||
"""Maximum generation length."""
|
||||
|
||||
sampling_topk: int = 1
|
||||
"""Randomly sample predictions from the top K candidates."""
|
||||
|
||||
sampling_topp: float = 1
|
||||
"""Keep the most probable tokens whose cumulative probability exceeds this value."""
|
||||
|
||||
sampling_temperature: float = 1
|
||||
"""Sampling temperature to generate more random samples."""
|
||||
|
||||
client: Any #: :meta private:
|
||||
|
||||
tokenizer: Any #: :meta private:
|
||||
|
||||
ctranslate2_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""
|
||||
Holds any model parameters valid for `ctranslate2.Generator` call not
|
||||
explicitly specified.
|
||||
"""
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that python package exists in environment."""
|
||||
|
||||
try:
|
||||
import ctranslate2
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import ctranslate2 python package. "
|
||||
"Please install it with `pip install ctranslate2`."
|
||||
)
|
||||
|
||||
try:
|
||||
import transformers
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import transformers python package. "
|
||||
"Please install it with `pip install transformers`."
|
||||
)
|
||||
|
||||
values["client"] = ctranslate2.Generator(
|
||||
model_path=values["model_path"],
|
||||
device=values["device"],
|
||||
device_index=values["device_index"],
|
||||
compute_type=values["compute_type"],
|
||||
**values["ctranslate2_kwargs"],
|
||||
)
|
||||
|
||||
values["tokenizer"] = transformers.AutoTokenizer.from_pretrained(
|
||||
values["tokenizer_name"]
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters."""
|
||||
return {
|
||||
"max_length": self.max_length,
|
||||
"sampling_topk": self.sampling_topk,
|
||||
"sampling_topp": self.sampling_topp,
|
||||
"sampling_temperature": self.sampling_temperature,
|
||||
}
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
prompts: List[str],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
# build sampling parameters
|
||||
params = {**self._default_params, **kwargs}
|
||||
|
||||
# call the model
|
||||
encoded_prompts = self.tokenizer(prompts)["input_ids"]
|
||||
tokenized_prompts = [
|
||||
self.tokenizer.convert_ids_to_tokens(encoded_prompt)
|
||||
for encoded_prompt in encoded_prompts
|
||||
]
|
||||
|
||||
results = self.client.generate_batch(tokenized_prompts, **params)
|
||||
|
||||
sequences = [result.sequences_ids[0] for result in results]
|
||||
decoded_sequences = [self.tokenizer.decode(seq) for seq in sequences]
|
||||
|
||||
generations = []
|
||||
for text in decoded_sequences:
|
||||
generations.append([Generation(text=text)])
|
||||
|
||||
return LLMResult(generations=generations)
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of llm."""
|
||||
return "ctranslate2"
|
||||
@@ -169,7 +169,7 @@ class VertexAI(_VertexAICommon, LLM):
|
||||
tuned_model_name = values.get("tuned_model_name")
|
||||
model_name = values["model_name"]
|
||||
try:
|
||||
if tuned_model_name or not is_codey_model(model_name):
|
||||
if not is_codey_model(model_name):
|
||||
from vertexai.preview.language_models import TextGenerationModel
|
||||
|
||||
if tuned_model_name:
|
||||
@@ -181,7 +181,12 @@ class VertexAI(_VertexAICommon, LLM):
|
||||
else:
|
||||
from vertexai.preview.language_models import CodeGenerationModel
|
||||
|
||||
values["client"] = CodeGenerationModel.from_pretrained(model_name)
|
||||
if tuned_model_name:
|
||||
values["client"] = CodeGenerationModel.get_tuned_model(
|
||||
tuned_model_name
|
||||
)
|
||||
else:
|
||||
values["client"] = CodeGenerationModel.from_pretrained(model_name)
|
||||
except ImportError:
|
||||
raise_vertex_import_error()
|
||||
return values
|
||||
|
||||
@@ -229,7 +229,7 @@ class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
|
||||
|
||||
class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
"""Human message prompt template. This is a message that is sent to the user."""
|
||||
"""Human message prompt template. This is a message sent from the user."""
|
||||
|
||||
def format(self, **kwargs: Any) -> BaseMessage:
|
||||
"""Format the prompt template.
|
||||
@@ -245,7 +245,7 @@ class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
|
||||
|
||||
class AIMessagePromptTemplate(BaseStringMessagePromptTemplate):
|
||||
"""AI message prompt template. This is a message that is not sent to the user."""
|
||||
"""AI message prompt template. This is a message sent from the AI."""
|
||||
|
||||
def format(self, **kwargs: Any) -> BaseMessage:
|
||||
"""Format the prompt template.
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
from typing import Any, Dict, List, Optional, Type, cast
|
||||
|
||||
from langchain import LLMChain
|
||||
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.chains.query_constructor.base import load_query_constructor_chain
|
||||
from langchain.chains.query_constructor.ir import StructuredQuery, Visitor
|
||||
from langchain.chains.query_constructor.schema import AttributeInfo
|
||||
@@ -16,6 +16,7 @@ from langchain.retrievers.self_query.milvus import MilvusTranslator
|
||||
from langchain.retrievers.self_query.myscale import MyScaleTranslator
|
||||
from langchain.retrievers.self_query.pinecone import PineconeTranslator
|
||||
from langchain.retrievers.self_query.qdrant import QdrantTranslator
|
||||
from langchain.retrievers.self_query.redis import RedisTranslator
|
||||
from langchain.retrievers.self_query.supabase import SupabaseVectorTranslator
|
||||
from langchain.retrievers.self_query.vectara import VectaraTranslator
|
||||
from langchain.retrievers.self_query.weaviate import WeaviateTranslator
|
||||
@@ -30,6 +31,7 @@ from langchain.vectorstores import (
|
||||
MyScale,
|
||||
Pinecone,
|
||||
Qdrant,
|
||||
Redis,
|
||||
SupabaseVectorStore,
|
||||
Vectara,
|
||||
VectorStore,
|
||||
@@ -39,7 +41,6 @@ from langchain.vectorstores import (
|
||||
|
||||
def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
|
||||
"""Get the translator class corresponding to the vector store class."""
|
||||
vectorstore_cls = vectorstore.__class__
|
||||
BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = {
|
||||
Pinecone: PineconeTranslator,
|
||||
Chroma: ChromaTranslator,
|
||||
@@ -53,16 +54,19 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
|
||||
Milvus: MilvusTranslator,
|
||||
SupabaseVectorStore: SupabaseVectorTranslator,
|
||||
}
|
||||
if vectorstore_cls not in BUILTIN_TRANSLATORS:
|
||||
raise ValueError(
|
||||
f"Self query retriever with Vector Store type {vectorstore_cls}"
|
||||
f" not supported."
|
||||
)
|
||||
if isinstance(vectorstore, Qdrant):
|
||||
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
|
||||
elif isinstance(vectorstore, MyScale):
|
||||
return MyScaleTranslator(metadata_key=vectorstore.metadata_column)
|
||||
return BUILTIN_TRANSLATORS[vectorstore_cls]()
|
||||
elif isinstance(vectorstore, Redis):
|
||||
return RedisTranslator.from_vectorstore(vectorstore)
|
||||
elif vectorstore.__class__ in BUILTIN_TRANSLATORS:
|
||||
return BUILTIN_TRANSLATORS[vectorstore.__class__]()
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Self query retriever with Vector Store type {vectorstore.__class__}"
|
||||
f" not supported."
|
||||
)
|
||||
|
||||
|
||||
class SelfQueryRetriever(BaseRetriever, BaseModel):
|
||||
@@ -80,8 +84,9 @@ class SelfQueryRetriever(BaseRetriever, BaseModel):
|
||||
structured_query_translator: Visitor
|
||||
"""Translator for turning internal query language into vectorstore search params."""
|
||||
verbose: bool = False
|
||||
"""Use original query instead of the revised new query from LLM"""
|
||||
|
||||
use_original_query: bool = False
|
||||
"""Use original query instead of the revised new query from LLM"""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
102
libs/langchain/langchain/retrievers/self_query/redis.py
Normal file
102
libs/langchain/langchain/retrievers/self_query/redis.py
Normal file
@@ -0,0 +1,102 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Tuple
|
||||
|
||||
from langchain.chains.query_constructor.ir import (
|
||||
Comparator,
|
||||
Comparison,
|
||||
Operation,
|
||||
Operator,
|
||||
StructuredQuery,
|
||||
Visitor,
|
||||
)
|
||||
from langchain.vectorstores.redis import Redis
|
||||
from langchain.vectorstores.redis.filters import (
|
||||
RedisFilterExpression,
|
||||
RedisFilterField,
|
||||
RedisFilterOperator,
|
||||
RedisNum,
|
||||
RedisTag,
|
||||
RedisText,
|
||||
)
|
||||
from langchain.vectorstores.redis.schema import RedisModel
|
||||
|
||||
_COMPARATOR_TO_BUILTIN_METHOD = {
|
||||
Comparator.EQ: "__eq__",
|
||||
Comparator.NE: "__ne__",
|
||||
Comparator.LT: "__lt__",
|
||||
Comparator.GT: "__gt__",
|
||||
Comparator.LTE: "__le__",
|
||||
Comparator.GTE: "__ge__",
|
||||
Comparator.CONTAIN: "__eq__",
|
||||
Comparator.LIKE: "__mod__",
|
||||
}
|
||||
|
||||
|
||||
class RedisTranslator(Visitor):
|
||||
"""Translate"""
|
||||
|
||||
allowed_comparators = (
|
||||
Comparator.EQ,
|
||||
Comparator.NE,
|
||||
Comparator.LT,
|
||||
Comparator.LTE,
|
||||
Comparator.GT,
|
||||
Comparator.GTE,
|
||||
Comparator.CONTAIN,
|
||||
Comparator.LIKE,
|
||||
)
|
||||
"""Subset of allowed logical comparators."""
|
||||
allowed_operators = (Operator.AND, Operator.OR)
|
||||
"""Subset of allowed logical operators."""
|
||||
|
||||
def __init__(self, schema: RedisModel) -> None:
|
||||
self._schema = schema
|
||||
|
||||
def _attribute_to_filter_field(self, attribute: str) -> RedisFilterField:
|
||||
if attribute in [tf.name for tf in self._schema.text]:
|
||||
return RedisText(attribute)
|
||||
elif attribute in [tf.name for tf in self._schema.tag or []]:
|
||||
return RedisTag(attribute)
|
||||
elif attribute in [tf.name for tf in self._schema.numeric or []]:
|
||||
return RedisNum(attribute)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid attribute {attribute} not in vector store schema. Schema is:"
|
||||
f"\n{self._schema.as_dict()}"
|
||||
)
|
||||
|
||||
def visit_comparison(self, comparison: Comparison) -> RedisFilterExpression:
|
||||
filter_field = self._attribute_to_filter_field(comparison.attribute)
|
||||
comparison_method = _COMPARATOR_TO_BUILTIN_METHOD[comparison.comparator]
|
||||
return getattr(filter_field, comparison_method)(comparison.value)
|
||||
|
||||
def visit_operation(self, operation: Operation) -> Any:
|
||||
left = operation.arguments[0].accept(self)
|
||||
if len(operation.arguments) > 2:
|
||||
right = self.visit_operation(
|
||||
Operation(
|
||||
operator=operation.operator, arguments=operation.arguments[1:]
|
||||
)
|
||||
)
|
||||
else:
|
||||
right = operation.arguments[1].accept(self)
|
||||
redis_operator = (
|
||||
RedisFilterOperator.OR
|
||||
if operation.operator == Operator.OR
|
||||
else RedisFilterOperator.AND
|
||||
)
|
||||
return RedisFilterExpression(operator=redis_operator, left=left, right=right)
|
||||
|
||||
def visit_structured_query(
|
||||
self, structured_query: StructuredQuery
|
||||
) -> Tuple[str, dict]:
|
||||
if structured_query.filter is None:
|
||||
kwargs = {}
|
||||
else:
|
||||
kwargs = {"filter": structured_query.filter.accept(self)}
|
||||
return structured_query.query, kwargs
|
||||
|
||||
@classmethod
|
||||
def from_vectorstore(cls, vectorstore: Redis) -> RedisTranslator:
|
||||
return cls(vectorstore._schema)
|
||||
@@ -254,7 +254,7 @@ class Runnable(Generic[Input, Output], ABC):
|
||||
def with_retry(
|
||||
self,
|
||||
*,
|
||||
retry_if_exception_type: Tuple[Type[BaseException]] = (Exception,),
|
||||
retry_if_exception_type: Tuple[Type[BaseException], ...] = (Exception,),
|
||||
wait_exponential_jitter: bool = True,
|
||||
stop_after_attempt: int = 3,
|
||||
) -> Runnable[Input, Output]:
|
||||
@@ -280,7 +280,7 @@ class Runnable(Generic[Input, Output], ABC):
|
||||
self,
|
||||
fallbacks: Sequence[Runnable[Input, Output]],
|
||||
*,
|
||||
exceptions_to_handle: Tuple[Type[BaseException]] = (Exception,),
|
||||
exceptions_to_handle: Tuple[Type[BaseException], ...] = (Exception,),
|
||||
) -> RunnableWithFallbacks[Input, Output]:
|
||||
return RunnableWithFallbacks(
|
||||
runnable=self,
|
||||
@@ -653,7 +653,7 @@ class RunnableWithFallbacks(Serializable, Runnable[Input, Output]):
|
||||
|
||||
runnable: Runnable[Input, Output]
|
||||
fallbacks: Sequence[Runnable[Input, Output]]
|
||||
exceptions_to_handle: Tuple[Type[BaseException]] = (Exception,)
|
||||
exceptions_to_handle: Tuple[Type[BaseException], ...] = (Exception,)
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@@ -24,7 +24,7 @@ U = TypeVar("U")
|
||||
class RunnableRetry(RunnableBinding[Input, Output]):
|
||||
"""Retry a Runnable if it fails."""
|
||||
|
||||
retry_exception_types: Tuple[Type[BaseException]] = (Exception,)
|
||||
retry_exception_types: Tuple[Type[BaseException], ...] = (Exception,)
|
||||
|
||||
wait_exponential_jitter: bool = True
|
||||
|
||||
|
||||
@@ -627,6 +627,7 @@ class Language(str, Enum):
|
||||
LATEX = "latex"
|
||||
HTML = "html"
|
||||
SOL = "sol"
|
||||
CSHARP = "csharp"
|
||||
|
||||
|
||||
class RecursiveCharacterTextSplitter(TextSplitter):
|
||||
@@ -1002,6 +1003,43 @@ class RecursiveCharacterTextSplitter(TextSplitter):
|
||||
"<title",
|
||||
"",
|
||||
]
|
||||
elif language == Language.CSHARP:
|
||||
return [
|
||||
"\ninterface ",
|
||||
"\nenum ",
|
||||
"\nimplements ",
|
||||
"\ndelegate ",
|
||||
"\nevent ",
|
||||
# Split along class definitions
|
||||
"\nclass ",
|
||||
"\nabstract ",
|
||||
# Split along method definitions
|
||||
"\npublic ",
|
||||
"\nprotected ",
|
||||
"\nprivate ",
|
||||
"\nstatic ",
|
||||
"\nreturn ",
|
||||
# Split along control flow statements
|
||||
"\nif ",
|
||||
"\ncontinue ",
|
||||
"\nfor ",
|
||||
"\nforeach ",
|
||||
"\nwhile ",
|
||||
"\nswitch ",
|
||||
"\nbreak ",
|
||||
"\ncase ",
|
||||
"\nelse ",
|
||||
# Split by exceptions
|
||||
"\ntry ",
|
||||
"\nthrow ",
|
||||
"\nfinally ",
|
||||
"\ncatch ",
|
||||
# Split by the normal type of lines
|
||||
"\n\n",
|
||||
"\n",
|
||||
" ",
|
||||
"",
|
||||
]
|
||||
elif language == Language.SOL:
|
||||
return [
|
||||
# Split along compiler information definitions
|
||||
@@ -1032,6 +1070,7 @@ class RecursiveCharacterTextSplitter(TextSplitter):
|
||||
" ",
|
||||
"",
|
||||
]
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Language {language} is not supported! "
|
||||
@@ -1042,7 +1081,9 @@ class RecursiveCharacterTextSplitter(TextSplitter):
|
||||
class NLTKTextSplitter(TextSplitter):
|
||||
"""Splitting text using NLTK package."""
|
||||
|
||||
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
|
||||
def __init__(
|
||||
self, separator: str = "\n\n", language: str = "english", **kwargs: Any
|
||||
) -> None:
|
||||
"""Initialize the NLTK splitter."""
|
||||
super().__init__(**kwargs)
|
||||
try:
|
||||
@@ -1054,11 +1095,12 @@ class NLTKTextSplitter(TextSplitter):
|
||||
"NLTK is not installed, please install it with `pip install nltk`."
|
||||
)
|
||||
self._separator = separator
|
||||
self._language = language
|
||||
|
||||
def split_text(self, text: str) -> List[str]:
|
||||
"""Split incoming text and return chunks."""
|
||||
# First we naively split the large input into a bunch of smaller ones.
|
||||
splits = self._tokenizer(text)
|
||||
splits = self._tokenizer(text, language=self._language)
|
||||
return self._merge_splits(splits, self._separator)
|
||||
|
||||
|
||||
|
||||
@@ -17,6 +17,10 @@ def _array_to_buffer(array: List[float], dtype: Any = np.float32) -> bytes:
|
||||
return np.array(array).astype(dtype).tobytes()
|
||||
|
||||
|
||||
def _buffer_to_array(buffer: bytes, dtype: Any = np.float32) -> List[float]:
|
||||
return np.frombuffer(buffer, dtype=dtype).tolist()
|
||||
|
||||
|
||||
class TokenEscaper:
|
||||
"""
|
||||
Escape punctuation within an input string.
|
||||
|
||||
@@ -142,6 +142,7 @@ class Chroma(VectorStore):
|
||||
query_embeddings: Optional[List[List[float]]] = None,
|
||||
n_results: int = 4,
|
||||
where: Optional[Dict[str, str]] = None,
|
||||
where_document: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Query the chroma collection."""
|
||||
@@ -157,6 +158,7 @@ class Chroma(VectorStore):
|
||||
query_embeddings=query_embeddings,
|
||||
n_results=n_results,
|
||||
where=where,
|
||||
where_document=where_document,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@@ -264,6 +266,7 @@ class Chroma(VectorStore):
|
||||
embedding: List[float],
|
||||
k: int = DEFAULT_K,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
where_document: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Return docs most similar to embedding vector.
|
||||
@@ -275,7 +278,10 @@ class Chroma(VectorStore):
|
||||
List of Documents most similar to the query vector.
|
||||
"""
|
||||
results = self.__query_collection(
|
||||
query_embeddings=embedding, n_results=k, where=filter
|
||||
query_embeddings=embedding,
|
||||
n_results=k,
|
||||
where=filter,
|
||||
where_document=where_document,
|
||||
)
|
||||
return _results_to_docs(results)
|
||||
|
||||
@@ -284,6 +290,7 @@ class Chroma(VectorStore):
|
||||
embedding: List[float],
|
||||
k: int = DEFAULT_K,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
where_document: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
"""
|
||||
@@ -300,7 +307,10 @@ class Chroma(VectorStore):
|
||||
Lower score represents more similarity.
|
||||
"""
|
||||
results = self.__query_collection(
|
||||
query_embeddings=embedding, n_results=k, where=filter
|
||||
query_embeddings=embedding,
|
||||
n_results=k,
|
||||
where=filter,
|
||||
where_document=where_document,
|
||||
)
|
||||
return _results_to_docs_and_scores(results)
|
||||
|
||||
@@ -309,6 +319,7 @@ class Chroma(VectorStore):
|
||||
query: str,
|
||||
k: int = DEFAULT_K,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
where_document: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
"""Run similarity search with Chroma with distance.
|
||||
@@ -325,12 +336,18 @@ class Chroma(VectorStore):
|
||||
"""
|
||||
if self._embedding_function is None:
|
||||
results = self.__query_collection(
|
||||
query_texts=[query], n_results=k, where=filter
|
||||
query_texts=[query],
|
||||
n_results=k,
|
||||
where=filter,
|
||||
where_document=where_document,
|
||||
)
|
||||
else:
|
||||
query_embedding = self._embedding_function.embed_query(query)
|
||||
results = self.__query_collection(
|
||||
query_embeddings=[query_embedding], n_results=k, where=filter
|
||||
query_embeddings=[query_embedding],
|
||||
n_results=k,
|
||||
where=filter,
|
||||
where_document=where_document,
|
||||
)
|
||||
|
||||
return _results_to_docs_and_scores(results)
|
||||
@@ -374,6 +391,7 @@ class Chroma(VectorStore):
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
where_document: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Return docs selected using the maximal marginal relevance.
|
||||
@@ -398,6 +416,7 @@ class Chroma(VectorStore):
|
||||
query_embeddings=embedding,
|
||||
n_results=fetch_k,
|
||||
where=filter,
|
||||
where_document=where_document,
|
||||
include=["metadatas", "documents", "distances", "embeddings"],
|
||||
)
|
||||
mmr_selected = maximal_marginal_relevance(
|
||||
@@ -419,6 +438,7 @@ class Chroma(VectorStore):
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
where_document: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Return docs selected using the maximal marginal relevance.
|
||||
@@ -445,7 +465,12 @@ class Chroma(VectorStore):
|
||||
|
||||
embedding = self._embedding_function.embed_query(query)
|
||||
docs = self.max_marginal_relevance_search_by_vector(
|
||||
embedding, k, fetch_k, lambda_mult=lambda_mult, filter=filter
|
||||
embedding,
|
||||
k,
|
||||
fetch_k,
|
||||
lambda_mult=lambda_mult,
|
||||
filter=filter,
|
||||
where_document=where_document,
|
||||
)
|
||||
return docs
|
||||
|
||||
@@ -472,7 +497,7 @@ class Chroma(VectorStore):
|
||||
offset: The offset to start returning results from.
|
||||
Useful for paging results with limit. Optional.
|
||||
where_document: A WhereDocument type dict used to filter by the documents.
|
||||
E.g. `{$contains: {"text": "hello"}}`. Optional.
|
||||
E.g. `{$contains: "hello"}`. Optional.
|
||||
include: A list of what to include in the results.
|
||||
Can contain `"embeddings"`, `"metadatas"`, `"documents"`.
|
||||
Ids are always included.
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import enum
|
||||
import logging
|
||||
import uuid
|
||||
from functools import partial
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -17,6 +19,7 @@ from typing import (
|
||||
Type,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
import sqlalchemy
|
||||
from sqlalchemy import delete
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
@@ -26,6 +29,7 @@ from langchain.docstore.document import Document
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
from langchain.vectorstores.utils import maximal_marginal_relevance
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.vectorstores._pgvector_data_models import CollectionStore
|
||||
@@ -54,6 +58,11 @@ class BaseModel(Base):
|
||||
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
|
||||
|
||||
def _results_to_docs(docs_and_scores: Any) -> List[Document]:
|
||||
"""Return docs from docs and scores."""
|
||||
return [doc for doc, _ in docs_and_scores]
|
||||
|
||||
|
||||
class PGVector(VectorStore):
|
||||
"""`Postgres`/`PGVector` vector store.
|
||||
|
||||
@@ -339,7 +348,7 @@ class PGVector(VectorStore):
|
||||
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List of Documents most similar to the query and score for each
|
||||
List of Documents most similar to the query and score for each.
|
||||
"""
|
||||
embedding = self.embedding_function.embed_query(query)
|
||||
docs = self.similarity_search_with_score_by_vector(
|
||||
@@ -367,6 +376,31 @@ class PGVector(VectorStore):
|
||||
k: int = 4,
|
||||
filter: Optional[dict] = None,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
results = self.__query_collection(embedding=embedding, k=k, filter=filter)
|
||||
|
||||
return self._results_to_docs_and_scores(results)
|
||||
|
||||
def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]:
|
||||
"""Return docs and scores from results."""
|
||||
docs = [
|
||||
(
|
||||
Document(
|
||||
page_content=result.EmbeddingStore.document,
|
||||
metadata=result.EmbeddingStore.cmetadata,
|
||||
),
|
||||
result.distance if self.embedding_function is not None else None,
|
||||
)
|
||||
for result in results
|
||||
]
|
||||
return docs
|
||||
|
||||
def __query_collection(
|
||||
self,
|
||||
embedding: List[float],
|
||||
k: int = 4,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
) -> List[Any]:
|
||||
"""Query the collection."""
|
||||
with Session(self._conn) as session:
|
||||
collection = self.get_collection(session)
|
||||
if not collection:
|
||||
@@ -410,18 +444,7 @@ class PGVector(VectorStore):
|
||||
.limit(k)
|
||||
.all()
|
||||
)
|
||||
|
||||
docs = [
|
||||
(
|
||||
Document(
|
||||
page_content=result.EmbeddingStore.document,
|
||||
metadata=result.EmbeddingStore.cmetadata,
|
||||
),
|
||||
result.distance if self.embedding_function is not None else None,
|
||||
)
|
||||
for result in results
|
||||
]
|
||||
return docs
|
||||
return results
|
||||
|
||||
def similarity_search_by_vector(
|
||||
self,
|
||||
@@ -443,7 +466,7 @@ class PGVector(VectorStore):
|
||||
docs_and_scores = self.similarity_search_with_score_by_vector(
|
||||
embedding=embedding, k=k, filter=filter
|
||||
)
|
||||
return [doc for doc, _ in docs_and_scores]
|
||||
return _results_to_docs(docs_and_scores)
|
||||
|
||||
@classmethod
|
||||
def from_texts(
|
||||
@@ -640,3 +663,190 @@ class PGVector(VectorStore):
|
||||
f" for distance_strategy of {self._distance_strategy}."
|
||||
"Consider providing relevance_score_fn to PGVector constructor."
|
||||
)
|
||||
|
||||
def max_marginal_relevance_search_with_score_by_vector(
|
||||
self,
|
||||
embedding: List[float],
|
||||
k: int = 4,
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
"""Return docs selected using the maximal marginal relevance with score
|
||||
to embedding vector.
|
||||
|
||||
Maximal marginal relevance optimizes for similarity to query AND diversity
|
||||
among selected documents.
|
||||
|
||||
Args:
|
||||
embedding: Embedding to look up documents similar to.
|
||||
k (int): Number of Documents to return. Defaults to 4.
|
||||
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
|
||||
Defaults to 20.
|
||||
lambda_mult (float): Number between 0 and 1 that determines the degree
|
||||
of diversity among the results with 0 corresponding
|
||||
to maximum diversity and 1 to minimum diversity.
|
||||
Defaults to 0.5.
|
||||
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
|
||||
relevance to the query and score for each.
|
||||
"""
|
||||
results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter)
|
||||
|
||||
embedding_list = [result.EmbeddingStore.embedding for result in results]
|
||||
|
||||
mmr_selected = maximal_marginal_relevance(
|
||||
np.array(embedding, dtype=np.float32),
|
||||
embedding_list,
|
||||
k=k,
|
||||
lambda_mult=lambda_mult,
|
||||
)
|
||||
|
||||
candidates = self._results_to_docs_and_scores(results)
|
||||
|
||||
return [r for i, r in enumerate(candidates) if i in mmr_selected]
|
||||
|
||||
def max_marginal_relevance_search(
|
||||
self,
|
||||
query: str,
|
||||
k: int = 4,
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Return docs selected using the maximal marginal relevance.
|
||||
|
||||
Maximal marginal relevance optimizes for similarity to query AND diversity
|
||||
among selected documents.
|
||||
|
||||
Args:
|
||||
query (str): Text to look up documents similar to.
|
||||
k (int): Number of Documents to return. Defaults to 4.
|
||||
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
|
||||
Defaults to 20.
|
||||
lambda_mult (float): Number between 0 and 1 that determines the degree
|
||||
of diversity among the results with 0 corresponding
|
||||
to maximum diversity and 1 to minimum diversity.
|
||||
Defaults to 0.5.
|
||||
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List[Document]: List of Documents selected by maximal marginal relevance.
|
||||
"""
|
||||
embedding = self.embedding_function.embed_query(query)
|
||||
return self.max_marginal_relevance_search_by_vector(
|
||||
embedding,
|
||||
k=k,
|
||||
fetch_k=fetch_k,
|
||||
lambda_mult=lambda_mult,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def max_marginal_relevance_search_with_score(
|
||||
self,
|
||||
query: str,
|
||||
k: int = 4,
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
filter: Optional[dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
"""Return docs selected using the maximal marginal relevance with score.
|
||||
|
||||
Maximal marginal relevance optimizes for similarity to query AND diversity
|
||||
among selected documents.
|
||||
|
||||
Args:
|
||||
query (str): Text to look up documents similar to.
|
||||
k (int): Number of Documents to return. Defaults to 4.
|
||||
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
|
||||
Defaults to 20.
|
||||
lambda_mult (float): Number between 0 and 1 that determines the degree
|
||||
of diversity among the results with 0 corresponding
|
||||
to maximum diversity and 1 to minimum diversity.
|
||||
Defaults to 0.5.
|
||||
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
|
||||
relevance to the query and score for each.
|
||||
"""
|
||||
embedding = self.embedding_function.embed_query(query)
|
||||
docs = self.max_marginal_relevance_search_with_score_by_vector(
|
||||
embedding=embedding,
|
||||
k=k,
|
||||
fetch_k=fetch_k,
|
||||
lambda_mult=lambda_mult,
|
||||
filter=filter,
|
||||
**kwargs,
|
||||
)
|
||||
return docs
|
||||
|
||||
def max_marginal_relevance_search_by_vector(
|
||||
self,
|
||||
embedding: List[float],
|
||||
k: int = 4,
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Return docs selected using the maximal marginal relevance
|
||||
to embedding vector.
|
||||
|
||||
Maximal marginal relevance optimizes for similarity to query AND diversity
|
||||
among selected documents.
|
||||
|
||||
Args:
|
||||
embedding (str): Text to look up documents similar to.
|
||||
k (int): Number of Documents to return. Defaults to 4.
|
||||
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
|
||||
Defaults to 20.
|
||||
lambda_mult (float): Number between 0 and 1 that determines the degree
|
||||
of diversity among the results with 0 corresponding
|
||||
to maximum diversity and 1 to minimum diversity.
|
||||
Defaults to 0.5.
|
||||
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List[Document]: List of Documents selected by maximal marginal relevance.
|
||||
"""
|
||||
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
|
||||
embedding,
|
||||
k=k,
|
||||
fetch_k=fetch_k,
|
||||
lambda_mult=lambda_mult,
|
||||
filter=filter,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return _results_to_docs(docs_and_scores)
|
||||
|
||||
async def amax_marginal_relevance_search_by_vector(
|
||||
self,
|
||||
embedding: List[float],
|
||||
k: int = 4,
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
filter: Optional[Dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Return docs selected using the maximal marginal relevance."""
|
||||
|
||||
# This is a temporary workaround to make the similarity search
|
||||
# asynchronous. The proper solution is to make the similarity search
|
||||
# asynchronous in the vector store implementations.
|
||||
func = partial(
|
||||
self.max_marginal_relevance_search_by_vector,
|
||||
embedding,
|
||||
k=k,
|
||||
fetch_k=fetch_k,
|
||||
lambda_mult=lambda_mult,
|
||||
filter=filter,
|
||||
**kwargs,
|
||||
)
|
||||
return await asyncio.get_event_loop().run_in_executor(None, func)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from .base import Redis
|
||||
from .base import Redis, RedisVectorStoreRetriever
|
||||
from .filters import (
|
||||
RedisFilter,
|
||||
RedisNum,
|
||||
@@ -6,4 +6,11 @@ from .filters import (
|
||||
RedisText,
|
||||
)
|
||||
|
||||
__all__ = ["Redis", "RedisFilter", "RedisTag", "RedisText", "RedisNum"]
|
||||
__all__ = [
|
||||
"Redis",
|
||||
"RedisFilter",
|
||||
"RedisTag",
|
||||
"RedisText",
|
||||
"RedisNum",
|
||||
"RedisVectorStoreRetriever",
|
||||
]
|
||||
|
||||
@@ -17,8 +17,10 @@ from typing import (
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
import yaml
|
||||
|
||||
from langchain._api import deprecated
|
||||
@@ -30,6 +32,7 @@ from langchain.docstore.document import Document
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.utilities.redis import (
|
||||
_array_to_buffer,
|
||||
_buffer_to_array,
|
||||
check_redis_module_exist,
|
||||
get_client,
|
||||
)
|
||||
@@ -39,6 +42,7 @@ from langchain.vectorstores.redis.constants import (
|
||||
REDIS_REQUIRED_MODULES,
|
||||
REDIS_TAG_SEPARATOR,
|
||||
)
|
||||
from langchain.vectorstores.utils import maximal_marginal_relevance
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -370,6 +374,11 @@ class Redis(VectorStore):
|
||||
if "generate" in kwargs:
|
||||
kwargs.pop("generate")
|
||||
|
||||
# see if the user specified keys
|
||||
keys = None
|
||||
if "keys" in kwargs:
|
||||
keys = kwargs.pop("keys")
|
||||
|
||||
# Name of the search index if not given
|
||||
if not index_name:
|
||||
index_name = uuid.uuid4().hex
|
||||
@@ -418,7 +427,7 @@ class Redis(VectorStore):
|
||||
instance._create_index(dim=len(embeddings[0]))
|
||||
|
||||
# Add data to Redis
|
||||
keys = instance.add_texts(texts, metadatas, embeddings)
|
||||
keys = instance.add_texts(texts, metadatas, embeddings, keys=keys)
|
||||
return instance, keys
|
||||
|
||||
@classmethod
|
||||
@@ -803,8 +812,10 @@ class Redis(VectorStore):
|
||||
+ "score_threshold will be removed in a future release.",
|
||||
)
|
||||
|
||||
query_embedding = self._embeddings.embed_query(query)
|
||||
|
||||
redis_query, params_dict = self._prepare_query(
|
||||
query,
|
||||
query_embedding,
|
||||
k=k,
|
||||
filter=filter,
|
||||
with_metadata=return_metadata,
|
||||
@@ -858,13 +869,48 @@ class Redis(VectorStore):
|
||||
Defaults to None.
|
||||
return_metadata (bool, optional): Whether to return metadata.
|
||||
Defaults to True.
|
||||
distance_threshold (Optional[float], optional): Distance threshold
|
||||
for vector distance from query vector. Defaults to None.
|
||||
distance_threshold (Optional[float], optional): Maximum vector distance
|
||||
between selected documents and the query vector. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of documents that are most similar to the query
|
||||
text.
|
||||
"""
|
||||
query_embedding = self._embeddings.embed_query(query)
|
||||
return self.similarity_search_by_vector(
|
||||
query_embedding,
|
||||
k=k,
|
||||
filter=filter,
|
||||
return_metadata=return_metadata,
|
||||
distance_threshold=distance_threshold,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def similarity_search_by_vector(
|
||||
self,
|
||||
embedding: List[float],
|
||||
k: int = 4,
|
||||
filter: Optional[RedisFilterExpression] = None,
|
||||
return_metadata: bool = True,
|
||||
distance_threshold: Optional[float] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Run similarity search between a query vector and the indexed vectors.
|
||||
|
||||
Args:
|
||||
embedding (List[float]): The query vector for which to find similar
|
||||
documents.
|
||||
k (int): The number of documents to return. Default is 4.
|
||||
filter (RedisFilterExpression, optional): Optional metadata filter.
|
||||
Defaults to None.
|
||||
return_metadata (bool, optional): Whether to return metadata.
|
||||
Defaults to True.
|
||||
distance_threshold (Optional[float], optional): Maximum vector distance
|
||||
between selected documents and the query vector. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of documents that are most similar to the query
|
||||
text.
|
||||
"""
|
||||
try:
|
||||
import redis
|
||||
@@ -884,7 +930,7 @@ class Redis(VectorStore):
|
||||
)
|
||||
|
||||
redis_query, params_dict = self._prepare_query(
|
||||
query,
|
||||
embedding,
|
||||
k=k,
|
||||
filter=filter,
|
||||
distance_threshold=distance_threshold,
|
||||
@@ -920,6 +966,74 @@ class Redis(VectorStore):
|
||||
)
|
||||
return docs
|
||||
|
||||
def max_marginal_relevance_search(
|
||||
self,
|
||||
query: str,
|
||||
k: int = 4,
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
filter: Optional[RedisFilterExpression] = None,
|
||||
return_metadata: bool = True,
|
||||
distance_threshold: Optional[float] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Return docs selected using the maximal marginal relevance.
|
||||
|
||||
Maximal marginal relevance optimizes for similarity to query AND diversity
|
||||
among selected documents.
|
||||
|
||||
Args:
|
||||
query (str): Text to look up documents similar to.
|
||||
k (int): Number of Documents to return. Defaults to 4.
|
||||
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
|
||||
lambda_mult (float): Number between 0 and 1 that determines the degree
|
||||
of diversity among the results with 0 corresponding
|
||||
to maximum diversity and 1 to minimum diversity.
|
||||
Defaults to 0.5.
|
||||
filter (RedisFilterExpression, optional): Optional metadata filter.
|
||||
Defaults to None.
|
||||
return_metadata (bool, optional): Whether to return metadata.
|
||||
Defaults to True.
|
||||
distance_threshold (Optional[float], optional): Maximum vector distance
|
||||
between selected documents and the query vector. Defaults to None.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of Documents selected by maximal marginal relevance.
|
||||
"""
|
||||
# Embed the query
|
||||
query_embedding = self._embeddings.embed_query(query)
|
||||
|
||||
# Fetch the initial documents
|
||||
prefetch_docs = self.similarity_search_by_vector(
|
||||
query_embedding,
|
||||
k=fetch_k,
|
||||
filter=filter,
|
||||
return_metadata=return_metadata,
|
||||
distance_threshold=distance_threshold,
|
||||
**kwargs,
|
||||
)
|
||||
prefetch_ids = [doc.metadata["id"] for doc in prefetch_docs]
|
||||
|
||||
# Get the embeddings for the fetched documents
|
||||
prefetch_embeddings = [
|
||||
_buffer_to_array(
|
||||
cast(
|
||||
bytes,
|
||||
self.client.hget(prefetch_id, self._schema.content_vector_key),
|
||||
),
|
||||
dtype=self._schema.vector_dtype,
|
||||
)
|
||||
for prefetch_id in prefetch_ids
|
||||
]
|
||||
|
||||
# Select documents using maximal marginal relevance
|
||||
selected_indices = maximal_marginal_relevance(
|
||||
np.array(query_embedding), prefetch_embeddings, lambda_mult=lambda_mult, k=k
|
||||
)
|
||||
selected_docs = [prefetch_docs[i] for i in selected_indices]
|
||||
|
||||
return selected_docs
|
||||
|
||||
def _collect_metadata(self, result: "Document") -> Dict[str, Any]:
|
||||
"""Collect metadata from Redis.
|
||||
|
||||
@@ -952,19 +1066,16 @@ class Redis(VectorStore):
|
||||
|
||||
def _prepare_query(
|
||||
self,
|
||||
query: str,
|
||||
query_embedding: List[float],
|
||||
k: int = 4,
|
||||
filter: Optional[RedisFilterExpression] = None,
|
||||
distance_threshold: Optional[float] = None,
|
||||
with_metadata: bool = True,
|
||||
with_distance: bool = False,
|
||||
) -> Tuple["Query", Dict[str, Any]]:
|
||||
# Creates embedding vector from user query
|
||||
embedding = self._embeddings.embed_query(query)
|
||||
|
||||
# Creates Redis query
|
||||
params_dict: Dict[str, Union[str, bytes, float]] = {
|
||||
"vector": _array_to_buffer(embedding, self._schema.vector_dtype),
|
||||
"vector": _array_to_buffer(query_embedding, self._schema.vector_dtype),
|
||||
}
|
||||
|
||||
# prepare return fields including score
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from enum import Enum
|
||||
from functools import wraps
|
||||
from numbers import Number
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
from langchain.utilities.redis import TokenEscaper
|
||||
@@ -56,14 +57,15 @@ class RedisFilterField:
|
||||
if operator not in self.OPERATORS:
|
||||
raise ValueError(
|
||||
f"Operator {operator} not supported by {self.__class__.__name__}. "
|
||||
+ f"Supported operators are {self.OPERATORS.values()}"
|
||||
+ f"Supported operators are {self.OPERATORS.values()}."
|
||||
)
|
||||
|
||||
if not isinstance(val, val_type):
|
||||
raise TypeError(
|
||||
f"Right side argument passed to operator {self.OPERATORS[operator]} "
|
||||
f"with left side "
|
||||
f"argument {self.__class__.__name__} must be of type {val_type}"
|
||||
f"argument {self.__class__.__name__} must be of type {val_type}, "
|
||||
f"received value {val}"
|
||||
)
|
||||
self._value = val
|
||||
self._operator = operator
|
||||
@@ -181,12 +183,12 @@ class RedisNum(RedisFilterField):
|
||||
RedisFilterOperator.GE: ">=",
|
||||
}
|
||||
OPERATOR_MAP: Dict[RedisFilterOperator, str] = {
|
||||
RedisFilterOperator.EQ: "@%s:[%i %i]",
|
||||
RedisFilterOperator.NE: "(-@%s:[%i %i])",
|
||||
RedisFilterOperator.GT: "@%s:[(%i +inf]",
|
||||
RedisFilterOperator.LT: "@%s:[-inf (%i]",
|
||||
RedisFilterOperator.GE: "@%s:[%i +inf]",
|
||||
RedisFilterOperator.LE: "@%s:[-inf %i]",
|
||||
RedisFilterOperator.EQ: "@%s:[%f %f]",
|
||||
RedisFilterOperator.NE: "(-@%s:[%f %f])",
|
||||
RedisFilterOperator.GT: "@%s:[(%f +inf]",
|
||||
RedisFilterOperator.LT: "@%s:[-inf (%f]",
|
||||
RedisFilterOperator.GE: "@%s:[%f +inf]",
|
||||
RedisFilterOperator.LE: "@%s:[-inf %f]",
|
||||
}
|
||||
|
||||
def __str__(self) -> str:
|
||||
@@ -210,83 +212,83 @@ class RedisNum(RedisFilterField):
|
||||
return self.OPERATOR_MAP[self._operator] % (self._field, self._value)
|
||||
|
||||
@check_operator_misuse
|
||||
def __eq__(self, other: int) -> "RedisFilterExpression":
|
||||
def __eq__(self, other: Union[int, float]) -> "RedisFilterExpression":
|
||||
"""Create a Numeric equality filter expression
|
||||
|
||||
Args:
|
||||
other (int): The value to filter on.
|
||||
other (Number): The value to filter on.
|
||||
|
||||
Example:
|
||||
>>> from langchain.vectorstores.redis import RedisNum
|
||||
>>> filter = RedisNum("zipcode") == 90210
|
||||
"""
|
||||
self._set_value(other, int, RedisFilterOperator.EQ)
|
||||
self._set_value(other, Number, RedisFilterOperator.EQ)
|
||||
return RedisFilterExpression(str(self))
|
||||
|
||||
@check_operator_misuse
|
||||
def __ne__(self, other: int) -> "RedisFilterExpression":
|
||||
def __ne__(self, other: Union[int, float]) -> "RedisFilterExpression":
|
||||
"""Create a Numeric inequality filter expression
|
||||
|
||||
Args:
|
||||
other (int): The value to filter on.
|
||||
other (Number): The value to filter on.
|
||||
|
||||
Example:
|
||||
>>> from langchain.vectorstores.redis import RedisNum
|
||||
>>> filter = RedisNum("zipcode") != 90210
|
||||
"""
|
||||
self._set_value(other, int, RedisFilterOperator.NE)
|
||||
self._set_value(other, Number, RedisFilterOperator.NE)
|
||||
return RedisFilterExpression(str(self))
|
||||
|
||||
def __gt__(self, other: int) -> "RedisFilterExpression":
|
||||
def __gt__(self, other: Union[int, float]) -> "RedisFilterExpression":
|
||||
"""Create a RedisNumeric greater than filter expression
|
||||
|
||||
Args:
|
||||
other (int): The value to filter on.
|
||||
other (Number): The value to filter on.
|
||||
|
||||
Example:
|
||||
>>> from langchain.vectorstores.redis import RedisNum
|
||||
>>> filter = RedisNum("age") > 18
|
||||
"""
|
||||
self._set_value(other, int, RedisFilterOperator.GT)
|
||||
self._set_value(other, Number, RedisFilterOperator.GT)
|
||||
return RedisFilterExpression(str(self))
|
||||
|
||||
def __lt__(self, other: int) -> "RedisFilterExpression":
|
||||
def __lt__(self, other: Union[int, float]) -> "RedisFilterExpression":
|
||||
"""Create a Numeric less than filter expression
|
||||
|
||||
Args:
|
||||
other (int): The value to filter on.
|
||||
other (Number): The value to filter on.
|
||||
|
||||
Example:
|
||||
>>> from langchain.vectorstores.redis import RedisNum
|
||||
>>> filter = RedisNum("age") < 18
|
||||
"""
|
||||
self._set_value(other, int, RedisFilterOperator.LT)
|
||||
self._set_value(other, Number, RedisFilterOperator.LT)
|
||||
return RedisFilterExpression(str(self))
|
||||
|
||||
def __ge__(self, other: int) -> "RedisFilterExpression":
|
||||
def __ge__(self, other: Union[int, float]) -> "RedisFilterExpression":
|
||||
"""Create a Numeric greater than or equal to filter expression
|
||||
|
||||
Args:
|
||||
other (int): The value to filter on.
|
||||
other (Number): The value to filter on.
|
||||
|
||||
Example:
|
||||
>>> from langchain.vectorstores.redis import RedisNum
|
||||
>>> filter = RedisNum("age") >= 18
|
||||
"""
|
||||
self._set_value(other, int, RedisFilterOperator.GE)
|
||||
self._set_value(other, Number, RedisFilterOperator.GE)
|
||||
return RedisFilterExpression(str(self))
|
||||
|
||||
def __le__(self, other: int) -> "RedisFilterExpression":
|
||||
def __le__(self, other: Union[int, float]) -> "RedisFilterExpression":
|
||||
"""Create a Numeric less than or equal to filter expression
|
||||
|
||||
Args:
|
||||
other (int): The value to filter on.
|
||||
other (Number): The value to filter on.
|
||||
|
||||
Example:
|
||||
>>> from langchain.vectorstores.redis import RedisNum
|
||||
>>> filter = RedisNum("age") <= 18
|
||||
"""
|
||||
self._set_value(other, int, RedisFilterOperator.LE)
|
||||
self._set_value(other, Number, RedisFilterOperator.LE)
|
||||
return RedisFilterExpression(str(self))
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
@@ -5,19 +7,19 @@ from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import yaml
|
||||
|
||||
# ignore type error here as it's a redis-py type problem
|
||||
from redis.commands.search.field import ( # type: ignore
|
||||
NumericField,
|
||||
TagField,
|
||||
TextField,
|
||||
VectorField,
|
||||
)
|
||||
from typing_extensions import Literal
|
||||
from typing_extensions import TYPE_CHECKING, Literal
|
||||
|
||||
from langchain.pydantic_v1 import BaseModel, Field, validator
|
||||
from langchain.vectorstores.redis.constants import REDIS_VECTOR_DTYPE_MAP
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from redis.commands.search.field import ( # type: ignore
|
||||
NumericField,
|
||||
TagField,
|
||||
TextField,
|
||||
VectorField,
|
||||
)
|
||||
|
||||
|
||||
class RedisDistanceMetric(str, Enum):
|
||||
l2 = "L2"
|
||||
@@ -38,6 +40,8 @@ class TextFieldSchema(RedisField):
|
||||
sortable: Optional[bool] = False
|
||||
|
||||
def as_field(self) -> TextField:
|
||||
from redis.commands.search.field import TextField # type: ignore
|
||||
|
||||
return TextField(
|
||||
self.name,
|
||||
weight=self.weight,
|
||||
@@ -55,6 +59,8 @@ class TagFieldSchema(RedisField):
|
||||
sortable: Optional[bool] = False
|
||||
|
||||
def as_field(self) -> TagField:
|
||||
from redis.commands.search.field import TagField # type: ignore
|
||||
|
||||
return TagField(
|
||||
self.name,
|
||||
separator=self.separator,
|
||||
@@ -69,6 +75,8 @@ class NumericFieldSchema(RedisField):
|
||||
sortable: Optional[bool] = False
|
||||
|
||||
def as_field(self) -> NumericField:
|
||||
from redis.commands.search.field import NumericField # type: ignore
|
||||
|
||||
return NumericField(self.name, sortable=self.sortable, no_index=self.no_index)
|
||||
|
||||
|
||||
@@ -97,6 +105,8 @@ class FlatVectorField(RedisVectorField):
|
||||
block_size: int = Field(default=1000)
|
||||
|
||||
def as_field(self) -> VectorField:
|
||||
from redis.commands.search.field import VectorField # type: ignore
|
||||
|
||||
return VectorField(
|
||||
self.name,
|
||||
self.algorithm,
|
||||
@@ -118,6 +128,8 @@ class HNSWVectorField(RedisVectorField):
|
||||
epsilon: float = Field(default=0.8)
|
||||
|
||||
def as_field(self) -> VectorField:
|
||||
from redis.commands.search.field import VectorField # type: ignore
|
||||
|
||||
return VectorField(
|
||||
self.name,
|
||||
self.algorithm,
|
||||
|
||||
401
libs/langchain/langchain/vectorstores/vearch.py
Normal file
401
libs/langchain/langchain/vectorstores/vearch.py
Normal file
@@ -0,0 +1,401 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
|
||||
|
||||
import numpy as np
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import vearch
|
||||
DEFAULT_TOPN = 4
|
||||
|
||||
|
||||
class VearchDb(VectorStore):
|
||||
_DEFAULT_TABLE_NAME = "langchain_vearch"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedding_function: Embeddings,
|
||||
table_name: str = _DEFAULT_TABLE_NAME,
|
||||
metadata_path: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize vearch vector store"""
|
||||
try:
|
||||
import vearch
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import vearch python package. "
|
||||
"Please install it with `pip install vearch`."
|
||||
)
|
||||
|
||||
if metadata_path is None:
|
||||
metadata_path = os.getcwd().replace("\\", "/")
|
||||
if not os.path.isdir(metadata_path):
|
||||
os.makedirs(metadata_path)
|
||||
log_path = os.path.join(metadata_path, "log")
|
||||
if not os.path.isdir(log_path):
|
||||
os.makedirs(log_path)
|
||||
self.vearch_engine = vearch.Engine(metadata_path, log_path)
|
||||
|
||||
if not table_name:
|
||||
table_name = self._DEFAULT_TABLE_NAME
|
||||
table_name += "_"
|
||||
table_name += str(uuid.uuid4()).split("-")[-1]
|
||||
self.using_table_name = table_name
|
||||
self.using_metapath = metadata_path
|
||||
self.embedding_func = embedding_function
|
||||
|
||||
@property
|
||||
def embeddings(self) -> Optional[Embeddings]:
|
||||
return self.embedding_func
|
||||
|
||||
@classmethod
|
||||
def from_documents(
|
||||
cls: Type[VearchDb],
|
||||
documents: List[Document],
|
||||
embedding: Embeddings,
|
||||
table_name: str = "langchain_vearch",
|
||||
metadata_path: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> VearchDb:
|
||||
"""Return Vearch VectorStore"""
|
||||
|
||||
texts = [d.page_content for d in documents]
|
||||
metadatas = [d.metadata for d in documents]
|
||||
|
||||
return cls.from_texts(
|
||||
texts=texts,
|
||||
embedding=embedding,
|
||||
metadatas=metadatas,
|
||||
table_name=table_name,
|
||||
metadata_path=metadata_path,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_texts(
|
||||
cls: Type[VearchDb],
|
||||
texts: List[str],
|
||||
embedding: Embeddings,
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
table_name: str = _DEFAULT_TABLE_NAME,
|
||||
metadata_path: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> VearchDb:
|
||||
"""Return Vearch VectorStore"""
|
||||
|
||||
vearch_db = cls(
|
||||
embedding_function=embedding,
|
||||
table_name=table_name,
|
||||
metadata_path=metadata_path,
|
||||
)
|
||||
vearch_db.add_texts(texts=texts, metadatas=metadatas)
|
||||
return vearch_db
|
||||
|
||||
def _create_table(
|
||||
self,
|
||||
dim: int = 1024,
|
||||
filed_list: List[dict] = [
|
||||
{"filed": "text", "type": "str"},
|
||||
{"filed": "metadata", "type": "str"},
|
||||
],
|
||||
) -> int:
|
||||
"""
|
||||
Create VectorStore Table
|
||||
Args:
|
||||
dim:dimension of vector
|
||||
fileds_list: the filed you want to store
|
||||
Return:
|
||||
code,0 for success,1 for failed
|
||||
"""
|
||||
type_dict = {"int": vearch.dataType.INT, "str": vearch.dataType.STRING}
|
||||
engine_info = {
|
||||
"index_size": 10000,
|
||||
"retrieval_type": "IVFPQ",
|
||||
"retrieval_param": {"ncentroids": 2048, "nsubvector": 32},
|
||||
}
|
||||
fields = [
|
||||
vearch.GammaFieldInfo(fi["filed"], type_dict[fi["type"]])
|
||||
for fi in filed_list
|
||||
]
|
||||
vector_field = vearch.GammaVectorInfo(
|
||||
name="text_embedding",
|
||||
type=vearch.dataType.VECTOR,
|
||||
is_index=True,
|
||||
dimension=dim,
|
||||
model_id="",
|
||||
store_type="MemoryOnly",
|
||||
store_param={"cache_size": 10000},
|
||||
has_source=False,
|
||||
)
|
||||
response_code = self.vearch_engine.create_table(
|
||||
engine_info,
|
||||
name=self.using_table_name,
|
||||
fields=fields,
|
||||
vector_field=vector_field,
|
||||
)
|
||||
return response_code
|
||||
|
||||
def add_texts(
|
||||
self,
|
||||
texts: Iterable[str],
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[str]:
|
||||
"""
|
||||
Returns:
|
||||
List of ids from adding the texts into the vectorstore.
|
||||
"""
|
||||
embeddings = None
|
||||
if self.embedding_func is not None:
|
||||
embeddings = self.embedding_func.embed_documents(list(texts))
|
||||
table_path = os.path.join(
|
||||
self.using_metapath, self.using_table_name + ".schema"
|
||||
)
|
||||
if not os.path.exists(table_path):
|
||||
if embeddings is None:
|
||||
raise ValueError("embeddings is None")
|
||||
dim = len(embeddings[0])
|
||||
response_code = self._create_table(dim)
|
||||
if response_code:
|
||||
raise ValueError("create table failed!!!")
|
||||
if embeddings is not None and metadatas is not None:
|
||||
doc_items = []
|
||||
for text, metadata, embed in zip(texts, metadatas, embeddings):
|
||||
profiles: dict[str, Any] = {}
|
||||
profiles["text"] = text
|
||||
profiles["metadata"] = metadata["source"]
|
||||
profiles["text_embedding"] = embed
|
||||
doc_items.append(profiles)
|
||||
|
||||
docid = self.vearch_engine.add(doc_items)
|
||||
t_time = 0
|
||||
while len(docid) != len(embeddings):
|
||||
time.sleep(0.5)
|
||||
if t_time > 6:
|
||||
break
|
||||
t_time += 1
|
||||
self.vearch_engine.dump()
|
||||
return docid
|
||||
|
||||
def _load(self) -> None:
|
||||
"""
|
||||
load vearch engine
|
||||
"""
|
||||
self.vearch_engine.load()
|
||||
|
||||
@classmethod
|
||||
def load_local(
|
||||
cls,
|
||||
embedding: Embeddings,
|
||||
table_name: str = _DEFAULT_TABLE_NAME,
|
||||
metadata_path: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> VearchDb:
|
||||
"""Load the local specified table.
|
||||
Returns:
|
||||
Success or failure of loading the local specified table
|
||||
"""
|
||||
if not metadata_path:
|
||||
raise ValueError("No metadata path!!!")
|
||||
if not table_name:
|
||||
raise ValueError("No table name!!!")
|
||||
table_path = os.path.join(metadata_path, table_name + ".schema")
|
||||
if not os.path.exists(table_path):
|
||||
raise ValueError("vearch vectorbase table not exist!!!")
|
||||
vearch_db = cls(
|
||||
embedding_function=embedding,
|
||||
table_name=table_name,
|
||||
metadata_path=metadata_path,
|
||||
)
|
||||
vearch_db._load()
|
||||
return vearch_db
|
||||
|
||||
def similarity_search(
|
||||
self,
|
||||
query: str,
|
||||
k: int = DEFAULT_TOPN,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""
|
||||
Return docs most similar to query.
|
||||
|
||||
"""
|
||||
if self.vearch_engine is None:
|
||||
raise ValueError("Vearch engine is None!!!")
|
||||
if self.embedding_func is None:
|
||||
raise ValueError("embedding_func is None!!!")
|
||||
embeddings = self.embedding_func.embed_query(query)
|
||||
docs = self.similarity_search_by_vector(embeddings, k)
|
||||
return docs
|
||||
|
||||
def similarity_search_by_vector(
|
||||
self,
|
||||
embedding: List[float],
|
||||
k: int = DEFAULT_TOPN,
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""The most k similar documents and scores of the specified query.
|
||||
|
||||
Args:
|
||||
embeddings: embedding vector of the query.
|
||||
k: The k most similar documents to the text query.
|
||||
min_score: the score of similar documents to the text query
|
||||
Returns:
|
||||
The k most similar documents to the specified text query.
|
||||
0 is dissimilar, 1 is the most similar.
|
||||
"""
|
||||
query_data = {
|
||||
"vector": [
|
||||
{
|
||||
"field": "text_embedding",
|
||||
"feature": np.array(embedding),
|
||||
}
|
||||
],
|
||||
"fields": [],
|
||||
"is_brute_search": 1,
|
||||
"retrieval_param": {"metric_type": "InnerProduct", "nprobe": 20},
|
||||
"topn": k,
|
||||
}
|
||||
query_result = self.vearch_engine.search(query_data)
|
||||
docs = []
|
||||
for item in query_result[0]["result_items"]:
|
||||
content = ""
|
||||
meta_data = {}
|
||||
for item_key in item:
|
||||
if item_key == "text":
|
||||
content = item[item_key]
|
||||
continue
|
||||
if item_key == "metadata":
|
||||
meta_data["source"] = item[item_key]
|
||||
continue
|
||||
docs.append(Document(page_content=content, metadata=meta_data))
|
||||
return docs
|
||||
|
||||
def similarity_search_with_score(
|
||||
self,
|
||||
query: str,
|
||||
k: int = DEFAULT_TOPN,
|
||||
**kwargs: Any,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
"""The most k similar documents and scores of the specified query.
|
||||
|
||||
Args:
|
||||
embeddings: embedding vector of the query.
|
||||
k: The k most similar documents to the text query.
|
||||
min_score: the score of similar documents to the text query
|
||||
Returns:
|
||||
The k most similar documents to the specified text query.
|
||||
0 is dissimilar, 1 is the most similar.
|
||||
"""
|
||||
if self.embedding_func is None:
|
||||
raise ValueError("embedding_func is None!!!")
|
||||
embeddings = self.embedding_func.embed_query(query)
|
||||
query_data = {
|
||||
"vector": [
|
||||
{
|
||||
"field": "text_embedding",
|
||||
"feature": np.array(embeddings),
|
||||
}
|
||||
],
|
||||
"fields": [],
|
||||
"is_brute_search": 1,
|
||||
"retrieval_param": {"metric_type": "InnerProduct", "nprobe": 20},
|
||||
"topn": k,
|
||||
}
|
||||
query_result = self.vearch_engine.search(query_data)
|
||||
results: List[Tuple[Document, float]] = []
|
||||
for item in query_result[0]["result_items"]:
|
||||
content = ""
|
||||
meta_data = {}
|
||||
for item_key in item:
|
||||
if item_key == "text":
|
||||
content = item[item_key]
|
||||
continue
|
||||
if item_key == "metadata":
|
||||
meta_data["source"] = item[item_key]
|
||||
continue
|
||||
if item_key == "score":
|
||||
score = item[item_key]
|
||||
continue
|
||||
tmp_res = (Document(page_content=content, metadata=meta_data), score)
|
||||
results.append(tmp_res)
|
||||
return results
|
||||
|
||||
def _similarity_search_with_relevance_scores(
|
||||
self,
|
||||
query: str,
|
||||
k: int = 4,
|
||||
**kwargs: Any,
|
||||
) -> List[Tuple[Document, float]]:
|
||||
return self.similarity_search_with_score(query, k, **kwargs)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
ids: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Optional[bool]:
|
||||
"""Delete the documents which have the specified ids.
|
||||
|
||||
Args:
|
||||
ids: The ids of the embedding vectors.
|
||||
**kwargs: Other keyword arguments that subclasses might use.
|
||||
Returns:
|
||||
Optional[bool]: True if deletion is successful.
|
||||
False otherwise, None if not implemented.
|
||||
"""
|
||||
if self.vearch_engine is None:
|
||||
raise ValueError("Verach Engine is None!!!")
|
||||
ret: Optional[bool] = None
|
||||
tmp_res = []
|
||||
if ids is None or ids.__len__() == 0:
|
||||
return ret
|
||||
for _id in ids:
|
||||
ret = self.vearch_engine.del_doc(_id)
|
||||
tmp_res.append(ret)
|
||||
ret = all(i == 0 for i in tmp_res)
|
||||
return ret
|
||||
|
||||
def get(
|
||||
self,
|
||||
ids: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Dict[str, Document]:
|
||||
"""Return docs according ids.
|
||||
|
||||
Args:
|
||||
ids: The ids of the embedding vectors.
|
||||
Returns:
|
||||
Documents which satisfy the input conditions.
|
||||
"""
|
||||
|
||||
if self.vearch_engine is None:
|
||||
raise ValueError("vearch engine is None!!!")
|
||||
results: Dict[str, Document] = {}
|
||||
if ids is None or ids.__len__() == 0:
|
||||
return results
|
||||
for id in ids:
|
||||
docs_detail = self.vearch_engine.get_doc_by_id(id)
|
||||
if docs_detail == {}:
|
||||
continue
|
||||
|
||||
content = ""
|
||||
meta_info = {}
|
||||
for field in docs_detail:
|
||||
if field == "text":
|
||||
content = docs_detail[field]
|
||||
continue
|
||||
elif field == "metadata":
|
||||
meta_info["source"] = docs_detail[field]
|
||||
continue
|
||||
results[docs_detail["_id"]] = Document(
|
||||
page_content=content, metadata=meta_info
|
||||
)
|
||||
return results
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain"
|
||||
version = "0.0.285"
|
||||
version = "0.0.286"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
"""Test RetrievalQA functionality."""
|
||||
from langchain.chains import RetrievalQAWithSourcesChain
|
||||
from langchain.chains.loading import load_chain
|
||||
from langchain.document_loaders import DirectoryLoader
|
||||
from langchain.embeddings.openai import OpenAIEmbeddings
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.text_splitter import CharacterTextSplitter
|
||||
from langchain.vectorstores import FAISS
|
||||
|
||||
|
||||
def test_retrieval_qa_with_sources_chain_saving_loading(tmp_path: str) -> None:
|
||||
"""Test saving and loading."""
|
||||
loader = DirectoryLoader("docs/extras/modules/", glob="*.txt")
|
||||
documents = loader.load()
|
||||
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
|
||||
texts = text_splitter.split_documents(documents)
|
||||
embeddings = OpenAIEmbeddings()
|
||||
docsearch = FAISS.from_documents(texts, embeddings)
|
||||
qa = RetrievalQAWithSourcesChain.from_llm(
|
||||
llm=OpenAI(), retriever=docsearch.as_retriever()
|
||||
)
|
||||
qa("What did the president say about Ketanji Brown Jackson?")
|
||||
|
||||
file_path = tmp_path + "/RetrievalQAWithSourcesChain.yaml"
|
||||
qa.save(file_path=file_path)
|
||||
qa_loaded = load_chain(file_path, retriever=docsearch.as_retriever())
|
||||
|
||||
assert qa_loaded == qa
|
||||
178
libs/langchain/tests/integration_tests/chat_models/test_konko.py
Normal file
178
libs/langchain/tests/integration_tests/chat_models/test_konko.py
Normal file
@@ -0,0 +1,178 @@
|
||||
"""Evaluate ChatKonko Interface."""
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.callbacks.manager import CallbackManager
|
||||
from langchain.chat_models.konko import ChatKonko
|
||||
from langchain.schema import (
|
||||
ChatGeneration,
|
||||
ChatResult,
|
||||
LLMResult,
|
||||
)
|
||||
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
|
||||
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
||||
|
||||
|
||||
def test_konko_chat_test() -> None:
|
||||
"""Evaluate basic ChatKonko functionality."""
|
||||
chat_instance = ChatKonko(max_tokens=10)
|
||||
msg = HumanMessage(content="Hi")
|
||||
chat_response = chat_instance([msg])
|
||||
assert isinstance(chat_response, BaseMessage)
|
||||
assert isinstance(chat_response.content, str)
|
||||
|
||||
|
||||
def test_konko_chat_test_openai() -> None:
|
||||
"""Evaluate basic ChatKonko functionality."""
|
||||
chat_instance = ChatKonko(max_tokens=10, model="gpt-3.5-turbo")
|
||||
msg = HumanMessage(content="Hi")
|
||||
chat_response = chat_instance([msg])
|
||||
assert isinstance(chat_response, BaseMessage)
|
||||
assert isinstance(chat_response.content, str)
|
||||
|
||||
|
||||
def test_konko_model_test() -> None:
|
||||
"""Check how ChatKonko manages model_name."""
|
||||
chat_instance = ChatKonko(model="alpha")
|
||||
assert chat_instance.model == "alpha"
|
||||
chat_instance = ChatKonko(model="beta")
|
||||
assert chat_instance.model == "beta"
|
||||
|
||||
|
||||
def test_konko_available_model_test() -> None:
|
||||
"""Check how ChatKonko manages model_name."""
|
||||
chat_instance = ChatKonko(max_tokens=10, n=2)
|
||||
res = chat_instance.get_available_models()
|
||||
assert isinstance(res, set)
|
||||
|
||||
|
||||
def test_konko_system_msg_test() -> None:
|
||||
"""Evaluate ChatKonko's handling of system messages."""
|
||||
chat_instance = ChatKonko(max_tokens=10)
|
||||
sys_msg = SystemMessage(content="Initiate user chat.")
|
||||
user_msg = HumanMessage(content="Hi there")
|
||||
chat_response = chat_instance([sys_msg, user_msg])
|
||||
assert isinstance(chat_response, BaseMessage)
|
||||
assert isinstance(chat_response.content, str)
|
||||
|
||||
|
||||
def test_konko_generation_test() -> None:
|
||||
"""Check ChatKonko's generation ability."""
|
||||
chat_instance = ChatKonko(max_tokens=10, n=2)
|
||||
msg = HumanMessage(content="Hi")
|
||||
gen_response = chat_instance.generate([[msg], [msg]])
|
||||
assert isinstance(gen_response, LLMResult)
|
||||
assert len(gen_response.generations) == 2
|
||||
for gen_list in gen_response.generations:
|
||||
assert len(gen_list) == 2
|
||||
for gen in gen_list:
|
||||
assert isinstance(gen, ChatGeneration)
|
||||
assert isinstance(gen.text, str)
|
||||
assert gen.text == gen.message.content
|
||||
|
||||
|
||||
def test_konko_multiple_outputs_test() -> None:
|
||||
"""Test multiple completions with ChatKonko."""
|
||||
chat_instance = ChatKonko(max_tokens=10, n=5)
|
||||
msg = HumanMessage(content="Hi")
|
||||
gen_response = chat_instance._generate([msg])
|
||||
assert isinstance(gen_response, ChatResult)
|
||||
assert len(gen_response.generations) == 5
|
||||
for gen in gen_response.generations:
|
||||
assert isinstance(gen.message, BaseMessage)
|
||||
assert isinstance(gen.message.content, str)
|
||||
|
||||
|
||||
def test_konko_streaming_callback_test() -> None:
|
||||
"""Evaluate streaming's token callback functionality."""
|
||||
callback_instance = FakeCallbackHandler()
|
||||
callback_mgr = CallbackManager([callback_instance])
|
||||
chat_instance = ChatKonko(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_mgr,
|
||||
verbose=True,
|
||||
)
|
||||
msg = HumanMessage(content="Hi")
|
||||
chat_response = chat_instance([msg])
|
||||
assert callback_instance.llm_streams > 0
|
||||
assert isinstance(chat_response, BaseMessage)
|
||||
|
||||
|
||||
def test_konko_streaming_info_test() -> None:
|
||||
"""Ensure generation details are retained during streaming."""
|
||||
|
||||
class TestCallback(FakeCallbackHandler):
|
||||
data_store: dict = {}
|
||||
|
||||
def on_llm_end(self, *args: Any, **kwargs: Any) -> Any:
|
||||
self.data_store["generation"] = args[0]
|
||||
|
||||
callback_instance = TestCallback()
|
||||
callback_mgr = CallbackManager([callback_instance])
|
||||
chat_instance = ChatKonko(
|
||||
max_tokens=2,
|
||||
temperature=0,
|
||||
callback_manager=callback_mgr,
|
||||
)
|
||||
list(chat_instance.stream("hey"))
|
||||
gen_data = callback_instance.data_store["generation"]
|
||||
assert gen_data.generations[0][0].text == " Hey"
|
||||
|
||||
|
||||
def test_konko_llm_model_name_test() -> None:
|
||||
"""Check if llm_output has model info."""
|
||||
chat_instance = ChatKonko(max_tokens=10)
|
||||
msg = HumanMessage(content="Hi")
|
||||
llm_data = chat_instance.generate([[msg]])
|
||||
assert llm_data.llm_output is not None
|
||||
assert llm_data.llm_output["model_name"] == chat_instance.model
|
||||
|
||||
|
||||
def test_konko_streaming_model_name_test() -> None:
|
||||
"""Check model info during streaming."""
|
||||
chat_instance = ChatKonko(max_tokens=10, streaming=True)
|
||||
msg = HumanMessage(content="Hi")
|
||||
llm_data = chat_instance.generate([[msg]])
|
||||
assert llm_data.llm_output is not None
|
||||
assert llm_data.llm_output["model_name"] == chat_instance.model
|
||||
|
||||
|
||||
def test_konko_streaming_param_validation_test() -> None:
|
||||
"""Ensure correct token callback during streaming."""
|
||||
with pytest.raises(ValueError):
|
||||
ChatKonko(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
n=5,
|
||||
)
|
||||
|
||||
|
||||
def test_konko_additional_args_test() -> None:
|
||||
"""Evaluate extra arguments for ChatKonko."""
|
||||
chat_instance = ChatKonko(extra=3, max_tokens=10)
|
||||
assert chat_instance.max_tokens == 10
|
||||
assert chat_instance.model_kwargs == {"extra": 3}
|
||||
|
||||
chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2})
|
||||
assert chat_instance.model_kwargs == {"extra": 3, "addition": 2}
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
ChatKonko(extra=3, model_kwargs={"extra": 2})
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
ChatKonko(model_kwargs={"temperature": 0.2})
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
ChatKonko(model_kwargs={"model": "text-davinci-003"})
|
||||
|
||||
|
||||
def test_konko_token_streaming_test() -> None:
|
||||
"""Check token streaming for ChatKonko."""
|
||||
chat_instance = ChatKonko(max_tokens=10)
|
||||
|
||||
for token in chat_instance.stream("Just a test"):
|
||||
assert isinstance(token.content, str)
|
||||
@@ -0,0 +1,26 @@
|
||||
"""Test Confident."""
|
||||
|
||||
|
||||
def test_confident_deepeval() -> None:
|
||||
"""Test valid call to Beam."""
|
||||
from deepeval.metrics.answer_relevancy import AnswerRelevancy
|
||||
|
||||
from langchain.callbacks.confident_callback import DeepEvalCallbackHandler
|
||||
from langchain.llms import OpenAI
|
||||
|
||||
answer_relevancy = AnswerRelevancy(minimum_score=0.3)
|
||||
deepeval_callback = DeepEvalCallbackHandler(
|
||||
implementation_name="exampleImplementation", metrics=[answer_relevancy]
|
||||
)
|
||||
llm = OpenAI(
|
||||
temperature=0,
|
||||
callbacks=[deepeval_callback],
|
||||
verbose=True,
|
||||
openai_api_key="<YOUR_API_KEY>",
|
||||
)
|
||||
llm.generate(
|
||||
[
|
||||
"What is the best evaluation tool out there? (no bias at all)",
|
||||
]
|
||||
)
|
||||
assert answer_relevancy.is_successful(), "Answer not relevant"
|
||||
@@ -279,3 +279,31 @@ def test_pgvector_retriever_search_threshold_custom_normalization_fn() -> None:
|
||||
)
|
||||
output = retriever.get_relevant_documents("foo")
|
||||
assert output == []
|
||||
|
||||
|
||||
def test_pgvector_max_marginal_relevance_search() -> None:
|
||||
"""Test max marginal relevance search."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = PGVector.from_texts(
|
||||
texts=texts,
|
||||
collection_name="test_collection",
|
||||
embedding=FakeEmbeddingsWithAdaDimension(),
|
||||
connection_string=CONNECTION_STRING,
|
||||
pre_delete_collection=True,
|
||||
)
|
||||
output = docsearch.max_marginal_relevance_search("foo", k=1, fetch_k=3)
|
||||
assert output == [Document(page_content="foo")]
|
||||
|
||||
|
||||
def test_pgvector_max_marginal_relevance_search_with_score() -> None:
|
||||
"""Test max marginal relevance search with relevance scores."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = PGVector.from_texts(
|
||||
texts=texts,
|
||||
collection_name="test_collection",
|
||||
embedding=FakeEmbeddingsWithAdaDimension(),
|
||||
connection_string=CONNECTION_STRING,
|
||||
pre_delete_collection=True,
|
||||
)
|
||||
output = docsearch.max_marginal_relevance_search_with_score("foo", k=1, fetch_k=3)
|
||||
assert output == [(Document(page_content="foo"), 0.0)]
|
||||
|
||||
@@ -136,6 +136,32 @@ def test_redis_from_documents(texts: List[str]) -> None:
|
||||
assert drop(docsearch.index_name)
|
||||
|
||||
|
||||
def test_custom_keys(texts: List[str]) -> None:
|
||||
keys_in = ["test_key_1", "test_key_2", "test_key_3"]
|
||||
docsearch, keys_out = Redis.from_texts_return_keys(
|
||||
texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL, keys=keys_in
|
||||
)
|
||||
assert keys_in == keys_out
|
||||
assert drop(docsearch.index_name)
|
||||
|
||||
|
||||
def test_custom_keys_from_docs(texts: List[str]) -> None:
|
||||
keys_in = ["test_key_1", "test_key_2", "test_key_3"]
|
||||
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
|
||||
|
||||
docsearch = Redis.from_documents(
|
||||
docs, FakeEmbeddings(), redis_url=TEST_REDIS_URL, keys=keys_in
|
||||
)
|
||||
client = docsearch.client
|
||||
# test keys are correct
|
||||
assert client.hget("test_key_1", "content")
|
||||
# test metadata is stored
|
||||
assert client.hget("test_key_1", "a") == bytes("b", "utf-8")
|
||||
# test all keys are stored
|
||||
assert client.hget("test_key_2", "content")
|
||||
assert drop(docsearch.index_name)
|
||||
|
||||
|
||||
# -- test filters -- #
|
||||
|
||||
|
||||
@@ -187,12 +213,21 @@ def test_redis_filters_1(
|
||||
documents, FakeEmbeddings(), redis_url=TEST_REDIS_URL
|
||||
)
|
||||
|
||||
output = docsearch.similarity_search("foo", k=3, filter=filter_expr)
|
||||
sim_output = docsearch.similarity_search("foo", k=3, filter=filter_expr)
|
||||
mmr_output = docsearch.max_marginal_relevance_search(
|
||||
"foo", k=3, fetch_k=5, filter=filter_expr
|
||||
)
|
||||
|
||||
assert len(output) == expected_length
|
||||
assert len(sim_output) == expected_length
|
||||
assert len(mmr_output) == expected_length
|
||||
|
||||
if expected_nums is not None:
|
||||
for out in output:
|
||||
for out in sim_output:
|
||||
assert (
|
||||
out.metadata["text"] in expected_nums
|
||||
or int(out.metadata["num"]) in expected_nums
|
||||
)
|
||||
for out in mmr_output:
|
||||
assert (
|
||||
out.metadata["text"] in expected_nums
|
||||
or int(out.metadata["num"]) in expected_nums
|
||||
@@ -302,7 +337,6 @@ def test_similarity_search_limit_distance(texts: List[str]) -> None:
|
||||
|
||||
def test_similarity_search_with_score_with_limit_distance(texts: List[str]) -> None:
|
||||
"""Test similarity search with score with limit score."""
|
||||
|
||||
docsearch = Redis.from_texts(
|
||||
texts, ConsistentFakeEmbeddings(), redis_url=TEST_REDIS_URL
|
||||
)
|
||||
@@ -317,6 +351,32 @@ def test_similarity_search_with_score_with_limit_distance(texts: List[str]) -> N
|
||||
assert drop(docsearch.index_name)
|
||||
|
||||
|
||||
def test_max_marginal_relevance_search(texts: List[str]) -> None:
|
||||
"""Test max marginal relevance search."""
|
||||
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
|
||||
|
||||
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=3)
|
||||
sim_output = docsearch.similarity_search(texts[0], k=3)
|
||||
assert mmr_output == sim_output
|
||||
|
||||
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2, fetch_k=3)
|
||||
assert len(mmr_output) == 2
|
||||
assert mmr_output[0].page_content == texts[0]
|
||||
assert mmr_output[1].page_content == texts[1]
|
||||
|
||||
mmr_output = docsearch.max_marginal_relevance_search(
|
||||
texts[0], k=2, fetch_k=3, lambda_mult=0.1 # more diversity
|
||||
)
|
||||
assert len(mmr_output) == 2
|
||||
assert mmr_output[0].page_content == texts[0]
|
||||
assert mmr_output[1].page_content == texts[2]
|
||||
|
||||
# if fetch_k < k, then the output will be less than k
|
||||
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=2)
|
||||
assert len(mmr_output) == 2
|
||||
assert drop(docsearch.index_name)
|
||||
|
||||
|
||||
def test_delete(texts: List[str]) -> None:
|
||||
"""Test deleting a new document"""
|
||||
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
|
||||
|
||||
@@ -0,0 +1,122 @@
|
||||
from typing import Dict, Tuple
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.chains.query_constructor.ir import (
|
||||
Comparator,
|
||||
Comparison,
|
||||
Operation,
|
||||
Operator,
|
||||
StructuredQuery,
|
||||
)
|
||||
from langchain.retrievers.self_query.redis import RedisTranslator
|
||||
from langchain.vectorstores.redis.filters import (
|
||||
RedisFilterExpression,
|
||||
RedisNum,
|
||||
RedisTag,
|
||||
RedisText,
|
||||
)
|
||||
from langchain.vectorstores.redis.schema import (
|
||||
NumericFieldSchema,
|
||||
RedisModel,
|
||||
TagFieldSchema,
|
||||
TextFieldSchema,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def translator() -> RedisTranslator:
|
||||
schema = RedisModel(
|
||||
text=[TextFieldSchema(name="bar")],
|
||||
numeric=[NumericFieldSchema(name="foo")],
|
||||
tag=[TagFieldSchema(name="tag")],
|
||||
)
|
||||
return RedisTranslator(schema)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("comp", "expected"),
|
||||
[
|
||||
(
|
||||
Comparison(comparator=Comparator.LT, attribute="foo", value=1),
|
||||
RedisNum("foo") < 1,
|
||||
),
|
||||
(
|
||||
Comparison(comparator=Comparator.LIKE, attribute="bar", value="baz*"),
|
||||
RedisText("bar") % "baz*",
|
||||
),
|
||||
(
|
||||
Comparison(
|
||||
comparator=Comparator.CONTAIN, attribute="tag", value=["blue", "green"]
|
||||
),
|
||||
RedisTag("tag") == ["blue", "green"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_visit_comparison(
|
||||
translator: RedisTranslator, comp: Comparison, expected: RedisFilterExpression
|
||||
) -> None:
|
||||
comp = Comparison(comparator=Comparator.LT, attribute="foo", value=1)
|
||||
expected = RedisNum("foo") < 1
|
||||
actual = translator.visit_comparison(comp)
|
||||
assert str(expected) == str(actual)
|
||||
|
||||
|
||||
def test_visit_operation(translator: RedisTranslator) -> None:
|
||||
op = Operation(
|
||||
operator=Operator.AND,
|
||||
arguments=[
|
||||
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
|
||||
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
|
||||
Comparison(comparator=Comparator.EQ, attribute="tag", value="high"),
|
||||
],
|
||||
)
|
||||
expected = (RedisNum("foo") < 2) & (
|
||||
(RedisText("bar") == "baz") & (RedisTag("tag") == "high")
|
||||
)
|
||||
actual = translator.visit_operation(op)
|
||||
assert str(expected) == str(actual)
|
||||
|
||||
|
||||
def test_visit_structured_query_no_filter(translator: RedisTranslator) -> None:
|
||||
query = "What is the capital of France?"
|
||||
|
||||
structured_query = StructuredQuery(
|
||||
query=query,
|
||||
filter=None,
|
||||
)
|
||||
expected: Tuple[str, Dict] = (query, {})
|
||||
actual = translator.visit_structured_query(structured_query)
|
||||
assert expected == actual
|
||||
|
||||
|
||||
def test_visit_structured_query_comparison(translator: RedisTranslator) -> None:
|
||||
query = "What is the capital of France?"
|
||||
comp = Comparison(comparator=Comparator.GTE, attribute="foo", value=2)
|
||||
structured_query = StructuredQuery(
|
||||
query=query,
|
||||
filter=comp,
|
||||
)
|
||||
expected_filter = RedisNum("foo") >= 2
|
||||
actual_query, actual_filter = translator.visit_structured_query(structured_query)
|
||||
assert actual_query == query
|
||||
assert str(actual_filter["filter"]) == str(expected_filter)
|
||||
|
||||
|
||||
def test_visit_structured_query_operation(translator: RedisTranslator) -> None:
|
||||
query = "What is the capital of France?"
|
||||
op = Operation(
|
||||
operator=Operator.OR,
|
||||
arguments=[
|
||||
Comparison(comparator=Comparator.EQ, attribute="foo", value=2),
|
||||
Comparison(comparator=Comparator.CONTAIN, attribute="bar", value="baz"),
|
||||
],
|
||||
)
|
||||
structured_query = StructuredQuery(
|
||||
query=query,
|
||||
filter=op,
|
||||
)
|
||||
expected_filter = (RedisNum("foo") == 2) | (RedisText("bar") == "baz")
|
||||
actual_query, actual_filter = translator.visit_structured_query(structured_query)
|
||||
assert actual_query == query
|
||||
assert str(actual_filter["filter"]) == str(expected_filter)
|
||||
@@ -1507,7 +1507,7 @@ async def test_async_retrying(mocker: MockerFixture) -> None:
|
||||
with pytest.raises(ValueError):
|
||||
await runnable.with_retry(
|
||||
stop_after_attempt=2,
|
||||
retry_if_exception_type=(ValueError,),
|
||||
retry_if_exception_type=(ValueError, KeyError),
|
||||
).ainvoke(1)
|
||||
|
||||
assert _lambda_mock.call_count == 2 # retried
|
||||
|
||||
@@ -498,6 +498,73 @@ public class HelloWorld {
|
||||
]
|
||||
|
||||
|
||||
def test_csharp_code_splitter() -> None:
|
||||
splitter = RecursiveCharacterTextSplitter.from_language(
|
||||
Language.CSHARP, chunk_size=CHUNK_SIZE, chunk_overlap=0
|
||||
)
|
||||
code = """
|
||||
using System;
|
||||
class Program
|
||||
{
|
||||
static void Main()
|
||||
{
|
||||
int age = 30; // Change the age value as needed
|
||||
|
||||
// Categorize the age without any console output
|
||||
if (age < 18)
|
||||
{
|
||||
// Age is under 18
|
||||
}
|
||||
else if (age >= 18 && age < 65)
|
||||
{
|
||||
// Age is an adult
|
||||
}
|
||||
else
|
||||
{
|
||||
// Age is a senior citizen
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
chunks = splitter.split_text(code)
|
||||
assert chunks == [
|
||||
"using System;",
|
||||
"class Program\n{",
|
||||
"static void",
|
||||
"Main()",
|
||||
"{",
|
||||
"int age",
|
||||
"= 30; // Change",
|
||||
"the age value",
|
||||
"as needed",
|
||||
"//",
|
||||
"Categorize the",
|
||||
"age without any",
|
||||
"console output",
|
||||
"if (age",
|
||||
"< 18)",
|
||||
"{",
|
||||
"//",
|
||||
"Age is under 18",
|
||||
"}",
|
||||
"else if",
|
||||
"(age >= 18 &&",
|
||||
"age < 65)",
|
||||
"{",
|
||||
"//",
|
||||
"Age is an adult",
|
||||
"}",
|
||||
"else",
|
||||
"{",
|
||||
"//",
|
||||
"Age is a senior",
|
||||
"citizen",
|
||||
"}\n }",
|
||||
"}",
|
||||
]
|
||||
|
||||
|
||||
def test_cpp_code_splitter() -> None:
|
||||
splitter = RecursiveCharacterTextSplitter.from_language(
|
||||
Language.CPP, chunk_size=CHUNK_SIZE, chunk_overlap=0
|
||||
|
||||
97
tests/integration_tests/vectorstores/test_vearch.py
Normal file
97
tests/integration_tests/vectorstores/test_vearch.py
Normal file
@@ -0,0 +1,97 @@
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.vectorstores.vearch import VearchDb
|
||||
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
||||
|
||||
|
||||
def test_vearch() -> None:
|
||||
"""
|
||||
Test end to end create vearch ,store vector into it and search
|
||||
"""
|
||||
texts = [
|
||||
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
|
||||
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
|
||||
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
|
||||
]
|
||||
metadatas = [
|
||||
{
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
{
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
{
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
]
|
||||
vearch_db = VearchDb.from_texts(
|
||||
texts=texts,
|
||||
embedding=FakeEmbeddings(),
|
||||
metadatas=metadatas,
|
||||
table_name="test_vearch",
|
||||
metadata_path="./",
|
||||
)
|
||||
result = vearch_db.similarity_search(
|
||||
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", 1
|
||||
)
|
||||
assert result == [
|
||||
Document(
|
||||
page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
|
||||
metadata={
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def test_vearch_add_texts() -> None:
|
||||
"""Test end to end adding of texts."""
|
||||
texts = [
|
||||
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
|
||||
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
|
||||
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
|
||||
]
|
||||
|
||||
metadatas = [
|
||||
{
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
{
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
{
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
]
|
||||
vearch_db = VearchDb.from_texts(
|
||||
texts=texts,
|
||||
embedding=FakeEmbeddings(),
|
||||
metadatas=metadatas,
|
||||
table_name="test_vearch",
|
||||
metadata_path="./",
|
||||
)
|
||||
|
||||
vearch_db.add_texts(
|
||||
texts=["Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库"],
|
||||
metadatas={
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
)
|
||||
result = vearch_db.similarity_search(
|
||||
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", 2
|
||||
)
|
||||
|
||||
assert result == [
|
||||
Document(
|
||||
page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
|
||||
metadata={
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
),
|
||||
Document(
|
||||
page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
|
||||
metadata={
|
||||
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user