mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-19 13:25:35 +00:00
Compare commits
67 Commits
rlm/LLaMA2
...
rlm/text-t
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
487488f540 | ||
|
|
ae374461a6 | ||
|
|
95a1b598fe | ||
|
|
7c4f340cc0 | ||
|
|
3df0f03928 | ||
|
|
f3cc9bba5b | ||
|
|
1afdb40b48 | ||
|
|
325fdde8b4 | ||
|
|
2719e49718 | ||
|
|
02dce74b97 | ||
|
|
d0ce374731 | ||
|
|
6fcba975d0 | ||
|
|
dd0374560a | ||
|
|
ee69116761 | ||
|
|
03bf6ef473 | ||
|
|
acb82cf25e | ||
|
|
9d9198de0b | ||
|
|
ef8b180d6d | ||
|
|
c4f8fefe74 | ||
|
|
78d186fb44 | ||
|
|
85302a9ec1 | ||
|
|
5dbe456aae | ||
|
|
39f65fb1c9 | ||
|
|
82f4c0589c | ||
|
|
d5400f6502 | ||
|
|
e542bf1b6b | ||
|
|
2e8637da2f | ||
|
|
89bc73c6c3 | ||
|
|
f5be2d525a | ||
|
|
5fee61a207 | ||
|
|
b01a443ee5 | ||
|
|
34ec2da701 | ||
|
|
56c279015e | ||
|
|
54a8d70eb5 | ||
|
|
52b103dd13 | ||
|
|
8cabb4ee8e | ||
|
|
a4c3a44712 | ||
|
|
25418b9b4d | ||
|
|
44d7763580 | ||
|
|
4188f046ec | ||
|
|
68599d98c2 | ||
|
|
0006075b08 | ||
|
|
8eb40b5fe2 | ||
|
|
85bac75729 | ||
|
|
85eaa4ccee | ||
|
|
a46eef64a7 | ||
|
|
d392e030be | ||
|
|
62efe1ffb9 | ||
|
|
76d3afaef0 | ||
|
|
5dd2161c4b | ||
|
|
720ecacb1c | ||
|
|
8425f33363 | ||
|
|
4adabd33ac | ||
|
|
c9f1768cb9 | ||
|
|
84d250f781 | ||
|
|
7db6aabf65 | ||
|
|
ed62984cb2 | ||
|
|
f818ec49b8 | ||
|
|
1da6d92369 | ||
|
|
a6b483dcbc | ||
|
|
008c7df80d | ||
|
|
77fc2f7644 | ||
|
|
2661dc94f3 | ||
|
|
4b6fdd7bf0 | ||
|
|
2038c7fd5d | ||
|
|
dfb4baa3f9 | ||
|
|
12f8e87a0e |
2
.github/workflows/_lint.yml
vendored
2
.github/workflows/_lint.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
- "3.8"
|
||||
- "3.11"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
# Fetch the last FETCH_DEPTH commits, so the mtime-changing script
|
||||
# can accurately set the mtimes of files modified in the last FETCH_DEPTH commits.
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
- "3.11"
|
||||
name: Pydantic v1/v2 compatibility - Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
|
||||
2
.github/workflows/_release.yml
vendored
2
.github/workflows/_release.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
|
||||
10
.github/workflows/_test.yml
vendored
10
.github/workflows/_test.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
@@ -44,6 +44,14 @@ jobs:
|
||||
shell: bash
|
||||
run: make test
|
||||
|
||||
- name: Install integration dependencies
|
||||
shell: bash
|
||||
run: poetry install --with=test_integration
|
||||
|
||||
- name: Check integration tests compile
|
||||
shell: bash
|
||||
run: poetry run pytest -m compile tests/integration_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
2
.github/workflows/codespell.yml
vendored
2
.github/workflows/codespell.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
|
||||
2
.github/workflows/langchain_ci.yml
vendored
2
.github/workflows/langchain_ci.yml
vendored
@@ -65,7 +65,7 @@ jobs:
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
- "3.11"
|
||||
name: test with unpublished langchain - Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
@@ -97,7 +97,7 @@ jobs:
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
|
||||
2
.github/workflows/scheduled_test.yml
vendored
2
.github/workflows/scheduled_test.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
|
||||
@@ -93,7 +93,7 @@ Memory refers to persisting state between calls of a chain/agent. LangChain prov
|
||||
|
||||
**🧐 Evaluation:**
|
||||
|
||||
[BETA] Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
|
||||
[BETA] Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is by using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
|
||||
|
||||
For more information on these concepts, please see our [full documentation](https://python.langchain.com).
|
||||
|
||||
|
||||
@@ -40,6 +40,7 @@ Notebook | Description
|
||||
[openai_functions_retrieval_qa....](https://github.com/langchain-ai/langchain/tree/master/cookbook/openai_functions_retrieval_qa.ipynb) | Structure response output in a question answering system by incorporating openai functions into a retrieval pipeline.
|
||||
[petting_zoo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/petting_zoo.ipynb) | Create multi-agent simulations with simulated environments using the petting zoo library.
|
||||
[plan_and_execute_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/plan_and_execute_agent.ipynb) | Create plan-and-execute agents that accomplish objectives by planning tasks with a language model (llm) and executing them with a separate agent.
|
||||
[press_releases.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/press_releases.ipynb) | Retrieve and query company press release data powered by [Kay.ai](https://kay.ai).
|
||||
[program_aided_language_model.i...](https://github.com/langchain-ai/langchain/tree/master/cookbook/program_aided_language_model.ipynb) | Implement program-aided language models as described in the provided research paper.
|
||||
[sales_agent_with_context.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/sales_agent_with_context.ipynb) | Implement a context-aware ai sales agent, salesgpt, that can have natural sales conversations, interact with other systems, and use a product knowledge base to discuss a company's offerings.
|
||||
[self_query_hotel_search.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/self_query_hotel_search.ipynb) | Build a hotel room search feature with self-querying retrieval, using a specific hotel recommendation dataset.
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
152
cookbook/press_releases.ipynb
Normal file
152
cookbook/press_releases.ipynb
Normal file
@@ -0,0 +1,152 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "62ee82e4-2ad8-498b-8438-fac388afe1a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Press Releases Data\n",
|
||||
"=\n",
|
||||
"\n",
|
||||
"Press Releases data powered by [Kay.ai](https://kay.ai).\n",
|
||||
"\n",
|
||||
">Press releases are used by companies to announce something noteworthy, including product launches, financial performance reports, partnerships, and other significant news. They are widely used by analysts to track corporate strategy, operational updates and financial performance.\n",
|
||||
"Kay.ai obtains press releases of all US public companies from a variety of sources, which include the company's official press room and partnerships with various data API providers. \n",
|
||||
"This data is updated till Sept 30th for free access, if you want to access the real-time feed, reach out to us at hello@kay.ai or [tweet at us](https://twitter.com/vishalrohra_)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8183d85d-365f-4672-a963-52b533547de0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Setup\n",
|
||||
"=\n",
|
||||
"\n",
|
||||
"First you will need to install the `kay` package. You will also need an API key: you can get one for free at [https://kay.ai](https://kay.ai/). Once you have an API key, you must set it as an environment variable `KAY_API_KEY`.\n",
|
||||
"\n",
|
||||
"In this example we're going to use the `KayAiRetriever`. Take a look at the [kay notebook](/docs/integrations/retrievers/kay) for more detailed information for the parmeters that it accepts."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02ec21c7-49fe-4844-b58a-bf064ad40b2a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Examples\n",
|
||||
"="
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "bf0395f7-6ebe-4136-8b0d-00b9dea3becd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ········\n",
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Setup API keys for Kay and OpenAI\n",
|
||||
"from getpass import getpass\n",
|
||||
"KAY_API_KEY = getpass()\n",
|
||||
"OPENAI_API_KEY = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f7fcaf70-29a4-444b-8f07-9784f808c300",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"KAY_API_KEY\"] = KAY_API_KEY\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ac00bf93-3635-4ffe-b9a6-a8b4f35c0c85",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.retrievers import KayAiRetriever\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
|
||||
"retriever = KayAiRetriever.create(dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6)\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8d9d927c-35b2-4a7b-8ea7-4d0350797941",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"-> **Question**: How is the healthcare industry adopting generative AI tools? \n",
|
||||
"\n",
|
||||
"**Answer**: The healthcare industry is adopting generative AI tools to improve various aspects of patient care and administrative tasks. Companies like HCA Healthcare Inc, Amazon Com Inc, and Mayo Clinic have collaborated with technology providers like Google Cloud, AWS, and Microsoft to implement generative AI solutions.\n",
|
||||
"\n",
|
||||
"HCA Healthcare is testing a nurse handoff tool that generates draft reports quickly and accurately, which nurses have shown interest in using. They are also exploring the use of Google's medically-tuned Med-PaLM 2 LLM to support caregivers in asking complex medical questions.\n",
|
||||
"\n",
|
||||
"Amazon Web Services (AWS) has introduced AWS HealthScribe, a generative AI-powered service that automatically creates clinical documentation. However, integrating multiple AI systems into a cohesive solution requires significant engineering resources, including access to AI experts, healthcare data, and compute capacity.\n",
|
||||
"\n",
|
||||
"Mayo Clinic is among the first healthcare organizations to deploy Microsoft 365 Copilot, a generative AI service that combines large language models with organizational data from Microsoft 365. This tool has the potential to automate tasks like form-filling, relieving administrative burdens on healthcare providers and allowing them to focus more on patient care.\n",
|
||||
"\n",
|
||||
"Overall, the healthcare industry is recognizing the potential benefits of generative AI tools in improving efficiency, automating tasks, and enhancing patient care. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# More sample questions in the Playground on https://kay.ai\n",
|
||||
"questions = [\n",
|
||||
" \"How is the healthcare industry adopting generative AI tools?\",\n",
|
||||
" #\"What are some recent challenges faced by the renewable energy sector?\",\n",
|
||||
"]\n",
|
||||
"chat_history = []\n",
|
||||
"\n",
|
||||
"for question in questions:\n",
|
||||
" result = qa({\"question\": question, \"chat_history\": chat_history})\n",
|
||||
" chat_history.append((question, result[\"answer\"]))\n",
|
||||
" print(f\"-> **Question**: {question} \\n\")\n",
|
||||
" print(f\"**Answer**: {result['answer']} \\n\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
263
cookbook/rag_fusion.ipynb
Normal file
263
cookbook/rag_fusion.ipynb
Normal file
@@ -0,0 +1,263 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "993c2768",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RAG Fusion\n",
|
||||
"\n",
|
||||
"Re-implemented from [this GitHub repo](https://github.com/Raudaschl/rag-fusion), all credit to original author\n",
|
||||
"\n",
|
||||
"> RAG-Fusion, a search methodology that aims to bridge the gap between traditional search paradigms and the multifaceted dimensions of human queries. Inspired by the capabilities of Retrieval Augmented Generation (RAG), this project goes a step further by employing multiple query generation and Reciprocal Rank Fusion to re-rank search results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ebcc6791",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"For this example, we will use Pinecone and some fake data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "661a1c36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pinecone\n",
|
||||
"from langchain.vectorstores import Pinecone\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"pinecone.init(api_key=\"...\",environment=\"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "48ef7e93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"all_documents = {\n",
|
||||
" \"doc1\": \"Climate change and economic impact.\",\n",
|
||||
" \"doc2\": \"Public health concerns due to climate change.\",\n",
|
||||
" \"doc3\": \"Climate change: A social perspective.\",\n",
|
||||
" \"doc4\": \"Technological solutions to climate change.\",\n",
|
||||
" \"doc5\": \"Policy changes needed to combat climate change.\",\n",
|
||||
" \"doc6\": \"Climate change and its impact on biodiversity.\",\n",
|
||||
" \"doc7\": \"Climate change: The science and models.\",\n",
|
||||
" \"doc8\": \"Global warming: A subset of climate change.\",\n",
|
||||
" \"doc9\": \"How climate change affects daily weather.\",\n",
|
||||
" \"doc10\": \"The history of climate change activism.\"\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fde89f0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_texts(list(all_documents.values()), OpenAIEmbeddings(), index_name='rag-fusion')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22ddd041",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the Query Generator\n",
|
||||
"\n",
|
||||
"We will now define a chain to do the query generation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "1d547524",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 68,
|
||||
"id": "af9ab4db",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"prompt = hub.pull('langchain-ai/rag-fusion-query-generation')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3628b552",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# prompt = ChatPromptTemplate.from_messages([\n",
|
||||
"# (\"system\", \"You are a helpful assistant that generates multiple search queries based on a single input query.\"),\n",
|
||||
"# (\"user\", \"Generate multiple search queries related to: {original_query}\"),\n",
|
||||
"# (\"user\", \"OUTPUT (4 queries):\")\n",
|
||||
"# ])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8d6cbb73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"generate_queries = prompt | ChatOpenAI(temperature=0) | StrOutputParser() | (lambda x: x.split(\"\\n\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ee2824cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the full chain\n",
|
||||
"\n",
|
||||
"We can now put it all together and define the full chain. This chain:\n",
|
||||
" \n",
|
||||
" 1. Generates a bunch of queries\n",
|
||||
" 2. Looks up each query in the retriever\n",
|
||||
" 3. Joins all the results together using reciprocal rank fusion\n",
|
||||
" \n",
|
||||
" \n",
|
||||
"Note that it does NOT do a final generation step"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "ca0bfec4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"original_query = \"impact of climate change\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 75,
|
||||
"id": "02437d65",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_existing_index(\"rag-fusion\", OpenAIEmbeddings())\n",
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 76,
|
||||
"id": "46a9a0e6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.load import dumps, loads\n",
|
||||
"def reciprocal_rank_fusion(results: list[list], k=60):\n",
|
||||
" fused_scores = {}\n",
|
||||
" for docs in results:\n",
|
||||
" # Assumes the docs are returned in sorted order of relevance\n",
|
||||
" for rank, doc in enumerate(docs):\n",
|
||||
" doc_str = dumps(doc)\n",
|
||||
" if doc_str not in fused_scores:\n",
|
||||
" fused_scores[doc_str] = 0\n",
|
||||
" previous_score = fused_scores[doc_str]\n",
|
||||
" fused_scores[doc_str] += 1 / (rank + k)\n",
|
||||
" \n",
|
||||
" reranked_results = [(loads(doc), score) for doc, score in sorted(fused_scores.items(), key=lambda x: x[1], reverse=True)]\n",
|
||||
" return reranked_results "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 77,
|
||||
"id": "3f9d4502",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = generate_queries | retriever.map() | reciprocal_rank_fusion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 78,
|
||||
"id": "d70c4fcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[(Document(page_content='Climate change and economic impact.'),\n",
|
||||
" 0.06558258417063283),\n",
|
||||
" (Document(page_content='Climate change: A social perspective.'),\n",
|
||||
" 0.06400409626216078),\n",
|
||||
" (Document(page_content='How climate change affects daily weather.'),\n",
|
||||
" 0.04787506400409626),\n",
|
||||
" (Document(page_content='Climate change and its impact on biodiversity.'),\n",
|
||||
" 0.03306010928961749),\n",
|
||||
" (Document(page_content='Public health concerns due to climate change.'),\n",
|
||||
" 0.016666666666666666),\n",
|
||||
" (Document(page_content='Technological solutions to climate change.'),\n",
|
||||
" 0.016666666666666666),\n",
|
||||
" (Document(page_content='Policy changes needed to combat climate change.'),\n",
|
||||
" 0.01639344262295082)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 78,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"original_query\": original_query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7866e551",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
631
cookbook/retrieval_in_sql.ipynb
Normal file
631
cookbook/retrieval_in_sql.ipynb
Normal file
@@ -0,0 +1,631 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Incoporating semantic similarity in tabular databases\n",
|
||||
"\n",
|
||||
"In this notebook we will cover how to run semantic search over a specific table column within a single SQL query, combining tabular query with RAG.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Overall workflow\n",
|
||||
"\n",
|
||||
"1. Generating embeddings for a specific column\n",
|
||||
" \n",
|
||||
"2. Storing the embeddings in a new column (if column has low cardinality, it's better to use another table containing unique values and their embeddings)\n",
|
||||
" \n",
|
||||
"3. Querying using standard SQL queries with [PGVector](https://github.com/pgvector/pgvector) extension which allows using:\n",
|
||||
"* L2 distance (`<->`)\n",
|
||||
"* Cosine distance (`<=>` or cosine similarity using `1 - <=>`)\n",
|
||||
"* Inner product (`<#>`)\n",
|
||||
" \n",
|
||||
"4. Running standard SQL query\n",
|
||||
"\n",
|
||||
"### Requirements\n",
|
||||
"\n",
|
||||
"We will need a PostgreSQL database with [pgvector](https://github.com/pgvector/pgvector) extension enabled. \n",
|
||||
"\n",
|
||||
"For this example, we will use a `Chinook` database using a local PostgreSQL server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = os.environ.get('OPENAI_API_KEY') or getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.sql_database import SQLDatabase\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"CONNECTION_STRING = \"postgresql+psycopg2://postgres:test@localhost:5432/vectordb\" # Replace with your own\n",
|
||||
"db = SQLDatabase.from_uri(CONNECTION_STRING)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embedding the song titles"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this example, we will run queries based on semantic meaning of song titles. \n",
|
||||
"\n",
|
||||
"In order to do this, let's start by adding a new column in the table for storing the embeddings:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# db.run('ALTER TABLE \"Track\" ADD COLUMN \"embeddings\" vector;')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's generate the embedding for each *track title* and store it as a new column in our \"Track\" table"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"tracks = db.run('SELECT \"Name\" FROM \"Track\"')\n",
|
||||
"song_titles = [s[0] for s in eval(tracks)]\n",
|
||||
"title_embeddings = embeddings_model.embed_documents(song_titles)\n",
|
||||
"len(title_embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's insert the embeddings in the into the new column from our table"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from tqdm import tqdm\n",
|
||||
"\n",
|
||||
"for i in tqdm(range(len(title_embeddings))):\n",
|
||||
" title = titles[i].replace(\"'\",\"''\")\n",
|
||||
" embedding = title_embeddings[i]\n",
|
||||
" sql_command = f'UPDATE \"Track\" SET \"embeddings\" = ARRAY{embedding} WHERE \"Name\" =' + f\"'{title}'\"\n",
|
||||
" db.run(sql_command)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can test the semantic search running the following query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'[(\"Tomorrow\\'s Dream\",), (\\'Remember Tomorrow\\',), (\\'Remember Tomorrow\\',), (\\'The Best Is Yet To Come\\',), (\"Thinking \\'Bout Tomorrow\",)]'"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeded_title = embeddings_model.embed_query(\"hope about the future\")\n",
|
||||
"query = 'SELECT \"Track\".\"Name\" FROM \"Track\" WHERE \"Track\".\"embeddings\" IS NOT NULL ORDER BY \"embeddings\" <-> ' + f\"'{embeded_title}' LIMIT 5\"\n",
|
||||
"db.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see the the song titles are conceptually similar to our search term `\"hope about the future\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Creating the SQL Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's start by defining useful functions to get info from database and running the query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_schema(_):\n",
|
||||
" return db.get_table_info()\n",
|
||||
"\n",
|
||||
"def run_query(query):\n",
|
||||
" return db.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's build the **prompt** we will use:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"You are a Postgres expert. Given an input question, first create a syntactically correct Postgres query to run, then look at the results of the query and return the answer to the input question.\n",
|
||||
"Unless the user specifies in the question a specific number of examples to obtain, query for at most 5 results using the LIMIT clause as per Postgres. You can order the results to return the most informative data in the database.\n",
|
||||
"Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (\") to denote them as delimited identifiers.\n",
|
||||
"Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n",
|
||||
"Pay attention to use date('now') function to get the current date, if the question involves \"today\".\n",
|
||||
"\n",
|
||||
"You can use an extra extension which allows you to run semantic similarity using <-> operator on tables containing columns named \"embeddings\".\n",
|
||||
"<-> operator can ONLY be used on embeddings columns.\n",
|
||||
"The embeddings value for a given row typically represents the semantic meaning of that row.\n",
|
||||
"The vector represents an embedding representation of the question, given below. \n",
|
||||
"Do NOT fill in the vector values directly, but rather specify a `[search_word]` placeholder, which should contain the word that would be embedded for filtering.\n",
|
||||
"For example, if the user asks for songs about 'the feeling of loneliness' the query could be:\n",
|
||||
"'SELECT \"[whatever_table_name]\".\"SongName\" FROM \"[whatever_table_name]\" ORDER BY \"embeddings\" <-> '[loneliness]' LIMIT 5'\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: <Question here>\n",
|
||||
"SQLQuery: <SQL Query to run>\n",
|
||||
"SQLResult: <Result of the SQLQuery>\n",
|
||||
"Answer: <Final answer here>\n",
|
||||
"\n",
|
||||
"Only use the following tables:\n",
|
||||
"\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"QUESTION: {question}\n",
|
||||
"SQLQuery:\n",
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", \"Given an input question, convert it to a SQL query. No pre-amble.\"),\n",
|
||||
" (\"human\", template)\n",
|
||||
"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we can create the chain using **[LangChain Expression Language](https://python.langchain.com/docs/expression_language/)**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/manuelsoria/miniconda3/envs/auto-gpt/lib/python3.8/site-packages/langchain/utilities/sql_database.py:112: SAWarning: Did not recognize type 'vector' of column 'title_embedding'\n",
|
||||
" self._metadata.reflect(\n",
|
||||
"/Users/manuelsoria/miniconda3/envs/auto-gpt/lib/python3.8/site-packages/langchain/utilities/sql_database.py:112: SAWarning: Did not recognize type 'vector' of column 'embeddings'\n",
|
||||
" self._metadata.reflect(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(CONNECTION_STRING) # We reconnect to db so the new columns are loaded as well.\n",
|
||||
"llm = ChatOpenAI(model_name='gpt-4', temperature=0)\n",
|
||||
"\n",
|
||||
"sql_query_chain = (\n",
|
||||
" RunnablePassthrough.assign(schema=get_schema)\n",
|
||||
" | prompt\n",
|
||||
" | llm.bind(stop=[\"\\nSQLResult:\"])\n",
|
||||
" | StrOutputParser()\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This chain simply generates the query. Now we will create the full chain that also handles the execution and the final result for the user:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"from langchain.schema.runnable import RunnableLambda\n",
|
||||
"\n",
|
||||
"# Inject the embedding for any words within brackets \n",
|
||||
"def replace_brackets(match):\n",
|
||||
" words_inside_brackets = match.group(1).split(', ')\n",
|
||||
" embedded_words = [str(embeddings_model.embed_query(word)) for word in words_inside_brackets]\n",
|
||||
" return \"', '\".join(embedded_words)\n",
|
||||
"\n",
|
||||
"def get_query(query):\n",
|
||||
" sql_query = re.sub(r'\\[([\\w\\s,]+)\\]', replace_brackets, query)\n",
|
||||
" return sql_query\n",
|
||||
" \n",
|
||||
"template = \"\"\"Based on the table schema below, question, sql query, and sql response, write a natural language response:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query: {query}\n",
|
||||
"SQL Response: {response}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt_response = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", \"Given an input question and SQL response, convert it to a natural langugae answer. No pre-amble.\"),\n",
|
||||
" (\"human\", template)\n",
|
||||
"])\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(query=sql_query_chain)\n",
|
||||
" | RunnablePassthrough.assign(\n",
|
||||
" schema=get_schema,\n",
|
||||
" response=RunnableLambda(lambda x: db.run(get_query(x[\"query\"]))),\n",
|
||||
" )\n",
|
||||
" | prompt_response \n",
|
||||
" | llm\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example 1: Filtering a column based on semantic meaning"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's say we want to retrieve songs that express `deep feeling of dispair`, but filtering based on genre:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"The 5 rock songs with titles about deep feeling of despair are 'Sea Of Sorrow', 'Surrender', 'Indifference', 'Hard Luck Woman', and 'Desire'.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\":\"Which are the 5 rock songs with titles about deep feeling of dispair?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"What is substantially different in implementing this method is that we have combined:\n",
|
||||
"- Semantic search (songs that have titles with some semantic meaning)\n",
|
||||
"- Traditional tabular querying (running JOIN statements to filter track based on genre)\n",
|
||||
"\n",
|
||||
"This is something we _could_ potentially achieve using metadata filtering, but it's more complex to do so (we would need to use a vector database containing the embeddings, and use metadata filtering based on genre).\n",
|
||||
"\n",
|
||||
"However, for other use cases metadata filtering **wouldn't be enough**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example 2: Combining filters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"The three albums which have the most amount of songs in the top 150 saddest songs are 'International Superhits' with 5 songs, 'Ten' with 4 songs, and 'Album Of The Year' with 3 songs.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\": \"I want to know the 3 albums which have the most amount of songs in the top 150 saddest songs\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So we have result for 3 albums with most amount of songs in top 150 saddest ones. This **wouldn't** be possible using only standard metadata filtering. Without this _hybdrid query_, we would need some postprocessing to get the result.\n",
|
||||
"\n",
|
||||
"Another similar exmaple:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"The 6 albums with the shortest titles that contain songs which are in the 20 saddest song list are 'Ten', 'Core', 'Big Ones', 'One By One', 'Black Album', and 'Miles Ahead'.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\": \"I need the 6 albums with shortest title, as long as they contain songs which are in the 20 saddest song list.\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's see what the query looks like to double check:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WITH \"SadSongs\" AS (\n",
|
||||
" SELECT \"TrackId\" FROM \"Track\" \n",
|
||||
" ORDER BY \"embeddings\" <-> '[sad]' LIMIT 20\n",
|
||||
"),\n",
|
||||
"\"SadAlbums\" AS (\n",
|
||||
" SELECT DISTINCT \"AlbumId\" FROM \"Track\" \n",
|
||||
" WHERE \"TrackId\" IN (SELECT \"TrackId\" FROM \"SadSongs\")\n",
|
||||
")\n",
|
||||
"SELECT \"Album\".\"Title\" FROM \"Album\" \n",
|
||||
"WHERE \"AlbumId\" IN (SELECT \"AlbumId\" FROM \"SadAlbums\") \n",
|
||||
"ORDER BY \"title_len\" ASC \n",
|
||||
"LIMIT 6\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(sql_query_chain.invoke({\"question\": \"I need the 6 albums with shortest title, as long as they contain songs which are in the 20 saddest song list.\"}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example 3: Combining two separate semantic searches\n",
|
||||
"\n",
|
||||
"One interesting aspect of this approach which is **substantially different from using standar RAG** is that we can even **combine** two semantic search filters:\n",
|
||||
"- _Get 5 saddest songs..._\n",
|
||||
"- _**...obtained from albums with \"lovely\" titles**_\n",
|
||||
"\n",
|
||||
"This could generalize to **any kind of combined RAG** (paragraphs discussing _X_ topic belonging from books about _Y_, replies to a tweet about _ABC_ topic that express _XYZ_ feeling)\n",
|
||||
"\n",
|
||||
"We will combine semantic search on songs and album titles, so we need to do the same for `Album` table:\n",
|
||||
"1. Generate the embeddings\n",
|
||||
"2. Add them to the table as a new column (which we need to add in the table)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 60,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# db.run('ALTER TABLE \"Album\" ADD COLUMN \"embeddings\" vector;')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 347/347 [00:01<00:00, 179.64it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"albums = db.run('SELECT \"Title\" FROM \"Album\"')\n",
|
||||
"album_titles = [title[0] for title in eval(albums)]\n",
|
||||
"album_title_embeddings = embeddings_model.embed_documents(album_titles)\n",
|
||||
"for i in tqdm(range(len(album_title_embeddings))):\n",
|
||||
" album_title = album_titles[i].replace(\"'\",\"''\")\n",
|
||||
" album_embedding = album_title_embeddings[i]\n",
|
||||
" sql_command = f'UPDATE \"Album\" SET \"embeddings\" = ARRAY{album_embedding} WHERE \"Title\" =' + f\"'{album_title}'\"\n",
|
||||
" db.run(sql_command)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"[('Realize',), ('Morning Dance',), ('Into The Light',), ('New Adventures In Hi-Fi',), ('Miles Ahead',)]\""
|
||||
]
|
||||
},
|
||||
"execution_count": 45,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeded_title = embeddings_model.embed_query(\"hope about the future\")\n",
|
||||
"query = 'SELECT \"Album\".\"Title\" FROM \"Album\" WHERE \"Album\".\"embeddings\" IS NOT NULL ORDER BY \"embeddings\" <-> ' + f\"'{embeded_title}' LIMIT 5\"\n",
|
||||
"db.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can combine both filters:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 54,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = SQLDatabase.from_uri(CONNECTION_STRING) # We reconnect to dbso the new columns are loaded as well."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The songs about breakouts obtained from the top 5 albums about love are \\'Royal Orleans\\', \"Nobody\\'s Fault But Mine\", \\'Achilles Last Stand\\', \\'For Your Life\\', and \\'Hots On For Nowhere\\'.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 49,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\": \"I want to know songs about breakouts obtained from top 5 albums about love\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is something **different** that **couldn't be achieved** using standard metadata filtering over a vectordb."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
351
cookbook/rewrite.ipynb
Normal file
351
cookbook/rewrite.ipynb
Normal file
@@ -0,0 +1,351 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "260629f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Rewrite-Retrieve-Read\n",
|
||||
"\n",
|
||||
"**Rewrite-Retrieve-Read** is a method proposed in the paper [Query Rewriting for Retrieval-Augmented Large Language Models](https://arxiv.org/pdf/2305.14283.pdf)\n",
|
||||
"\n",
|
||||
"> Because the original query can not be always optimal to retrieve for the LLM, especially in the real world... we first prompt an LLM to rewrite the queries, then conduct retrieval-augmented reading\n",
|
||||
"\n",
|
||||
"We show how you can easily do that with LangChain Expression Language"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eda93712",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Baseline\n",
|
||||
"\n",
|
||||
"Baseline RAG (**Retrieve-and-read**) can be done like the following:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "1d2edbd2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough, RunnableLambda\n",
|
||||
"from langchain.utilities import DuckDuckGoSearchAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "86a46aa9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Answer the users question based only on the following context:\n",
|
||||
"\n",
|
||||
"<context>\n",
|
||||
"{context}\n",
|
||||
"</context>\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"search = DuckDuckGoSearchAPIWrapper()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def retriever(query):\n",
|
||||
" return search.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8566d48e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever, \"question\": RunnablePassthrough()} \n",
|
||||
" | prompt \n",
|
||||
" | model \n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "5c57f9ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"simple_query = \"what is langchain?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "37c5f962",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"LangChain is a powerful and versatile Python library that enables developers and researchers to create, experiment with, and analyze language models and agents. It simplifies the development of language-based applications by providing a suite of features for artificial general intelligence. It can be used to build chatbots, perform document analysis and summarization, and streamline interaction with various large language model providers. LangChain's unique proposition is its ability to create logical links between one or more language models, known as Chains. It is an open-source library that offers a generic interface to foundation models and allows prompt management and integration with other components and tools.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(simple_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23bdb9bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"While this is fine for well formatted queries, it can break down for more complicated queries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "8df6a814",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"distracted_query = \"man that sam bankman fried trial was crazy! what is langchain?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "16d7db64",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Based on the given context, there is no information provided about \"langchain.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(distracted_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0b4f8b93",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is because the retriever does a bad job with these \"distracted\" queries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "3439d8dc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Business She\\'s the star witness against Sam Bankman-Fried. Her testimony was explosive Gary Wang, who co-founded both FTX and Alameda Research, said Bankman-Fried directed him to change a... The Verge, following the trial\\'s Oct. 4 kickoff: \"Is Sam Bankman-Fried\\'s Defense Even Trying to Win?\". CBS Moneywatch, from Thursday: \"Sam Bankman-Fried\\'s Lawyer Struggles to Poke ... Sam Bankman-Fried, FTX\\'s founder, responded with a single word: \"Oof.\". Less than a year later, Mr. Bankman-Fried, 31, is on trial in federal court in Manhattan, fighting criminal charges ... July 19, 2023. A U.S. judge on Wednesday overruled objections by Sam Bankman-Fried\\'s lawyers and allowed jurors in the FTX founder\\'s fraud trial to see a profane message he sent to a reporter days ... Sam Bankman-Fried, who was once hailed as a virtuoso in cryptocurrency trading, is on trial over the collapse of FTX, the financial exchange he founded. Bankman-Fried is accused of...'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever(distracted_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7eb748ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Rewrite-Retrieve-Read Implementation\n",
|
||||
"\n",
|
||||
"The main part is a rewriter to rewrite the search query"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "88ae702e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Provide a better search query for \\\n",
|
||||
"web search engine to answer the given question, end \\\n",
|
||||
"the queries with ’**’. Question: \\\n",
|
||||
"{x} Answer:\"\"\"\n",
|
||||
"rewrite_prompt = ChatPromptTemplate.from_template(template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "184e1bcb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"rewrite_prompt = hub.pull(\"langchain-ai/rewrite\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "a4c23d40",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Provide a better search query for web search engine to answer the given question, end the queries with ’**’. Question {x} Answer:\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(rewrite_prompt.template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "f55cd010",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Parser to remove the `**`\n",
|
||||
"\n",
|
||||
"def _parse(text):\n",
|
||||
" return text.strip(\"**\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "c9c34bef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"rewriter = rewrite_prompt | ChatOpenAI(temperature=0) | StrOutputParser() | _parse"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "fb17fb3d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'What is the definition and purpose of Langchain?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rewriter.invoke({\"x\": distracted_query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "f83edb09",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"rewrite_retrieve_read_chain = (\n",
|
||||
" {\n",
|
||||
" \"context\": {\"x\": RunnablePassthrough()} | rewriter | retriever,\n",
|
||||
" \"question\": RunnablePassthrough()} \n",
|
||||
" | prompt \n",
|
||||
" | model \n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "43096322",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Based on the given context, LangChain is an open-source framework designed to simplify the creation of applications using large language models (LLMs). It enables LLM models to generate responses based on up-to-date online information and simplifies the organization of large volumes of data for easy access by LLMs. LangChain offers a standard interface for chains, integrations with other tools, and end-to-end chains for common applications. It is a robust library that streamlines interaction with various LLM providers. LangChain\\'s unique proposition is its ability to create logical links between one or more LLMs, known as Chains. It is an AI framework with features that simplify the development of language-based applications and offers a suite of features for artificial general intelligence. However, the context does not provide any information about the \"sam bankman fried trial\" mentioned in the question.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rewrite_retrieve_read_chain.invoke(distracted_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59874b4f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
335
cookbook/stepback-qa.ipynb
Normal file
335
cookbook/stepback-qa.ipynb
Normal file
@@ -0,0 +1,335 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "83ef724e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Step-Back Prompting (Question-Answering)\n",
|
||||
"\n",
|
||||
"One prompting technique called \"Step-Back\" prompting can improve performance on complex questions by first asking a \"step back\" question. This can be combined with regular question-answering applications by then doing retrieval on both the original and step-back question.\n",
|
||||
"\n",
|
||||
"Read the paper [here](https://arxiv.org/abs/2310.06117)\n",
|
||||
"\n",
|
||||
"See an excelent blog post on this by Cobus Greyling [here](https://cobusgreyling.medium.com/a-new-prompt-engineering-technique-has-been-introduced-called-step-back-prompting-b00e8954cacb)\n",
|
||||
"\n",
|
||||
"In this cookbook we will replicate this technique. We modify the prompts used slightly to work better with chat models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 85,
|
||||
"id": "67b5cdac",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnableLambda"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 86,
|
||||
"id": "7e017c44",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Few Shot Examples\n",
|
||||
"examples = [\n",
|
||||
" {\n",
|
||||
" \"input\": \"Could the members of The Police perform lawful arrests?\",\n",
|
||||
" \"output\": \"what can the members of The Police do?\"\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"input\": \"Jan Sindel’s was born in what country?\", \n",
|
||||
" \"output\": \"what is Jan Sindel’s personal history?\"\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"# We now transform these to example messages\n",
|
||||
"example_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" (\"ai\", \"{output}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"few_shot_prompt = FewShotChatMessagePromptTemplate(\n",
|
||||
" example_prompt=example_prompt,\n",
|
||||
" examples=examples,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 87,
|
||||
"id": "206415ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", \"\"\"You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:\"\"\"),\n",
|
||||
" # Few shot examples\n",
|
||||
" few_shot_prompt,\n",
|
||||
" # New question\n",
|
||||
" (\"user\", \"{question}\"),\n",
|
||||
"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 88,
|
||||
"id": "d643a85c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question_gen = prompt | ChatOpenAI(temperature=0) | StrOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 182,
|
||||
"id": "5ba21b2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"was chatgpt around while trump was president?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 183,
|
||||
"id": "5992c8ca",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'when was ChatGPT developed?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 183,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question_gen.invoke({\"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 190,
|
||||
"id": "32667424",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"search = DuckDuckGoSearchAPIWrapper(max_results=4)\n",
|
||||
"\n",
|
||||
"def retriever(query):\n",
|
||||
" return search.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 191,
|
||||
"id": "ffc28c91",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'This includes content about former President Donald Trump. According to further tests, ChatGPT successfully wrote poems admiring all recent U.S. presidents, but failed when we entered a query for ... On Wednesday, a Twitter user posted screenshots of him asking OpenAI\\'s chatbot, ChatGPT, to write a positive poem about former President Donald Trump, to which the chatbot declined, citing it ... While impressive in many respects, ChatGPT also has some major flaws. ... [President\\'s Name],\" refused to write a poem about ex-President Trump, but wrote one about President Biden ... During the Trump administration, Altman gained new attention as a vocal critic of the president. It was against that backdrop that he was rumored to be considering a run for California governor.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 191,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 192,
|
||||
"id": "00c77443",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Will Douglas Heaven March 3, 2023 Stephanie Arnett/MITTR | Envato When OpenAI launched ChatGPT, with zero fanfare, in late November 2022, the San Francisco-based artificial-intelligence company... ChatGPT, which stands for Chat Generative Pre-trained Transformer, is a large language model -based chatbot developed by OpenAI and launched on November 30, 2022, which enables users to refine and steer a conversation towards a desired length, format, style, level of detail, and language. ChatGPT is an artificial intelligence (AI) chatbot built on top of OpenAI's foundational large language models (LLMs) like GPT-4 and its predecessors. This chatbot has redefined the standards of... June 4, 2023 ⋅ 4 min read 124 SHARES 13K At the end of 2022, OpenAI introduced the world to ChatGPT. Since its launch, ChatGPT hasn't shown significant signs of slowing down in developing new...\""
|
||||
]
|
||||
},
|
||||
"execution_count": 192,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever(question_gen.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 193,
|
||||
"id": "b257bc06",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# response_prompt_template = \"\"\"You are an expert of world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.\n",
|
||||
"\n",
|
||||
"# {normal_context}\n",
|
||||
"# {step_back_context}\n",
|
||||
"\n",
|
||||
"# Original Question: {question}\n",
|
||||
"# Answer:\"\"\"\n",
|
||||
"# response_prompt = ChatPromptTemplate.from_template(response_prompt_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 203,
|
||||
"id": "f48c65b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"response_prompt = hub.pull(\"langchain-ai/stepback-answer\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 204,
|
||||
"id": "97a6d5ab",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = {\n",
|
||||
" # Retrieve context using the normal question\n",
|
||||
" \"normal_context\": RunnableLambda(lambda x: x['question']) | retriever,\n",
|
||||
" # Retrieve context using the step-back question\n",
|
||||
" \"step_back_context\": question_gen | retriever,\n",
|
||||
" # Pass on the question\n",
|
||||
" \"question\": lambda x: x[\"question\"]\n",
|
||||
"} | response_prompt | ChatOpenAI(temperature=0) | StrOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 205,
|
||||
"id": "ce554cb0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"No, ChatGPT was not around while Donald Trump was president. ChatGPT was launched on November 30, 2022, which is after Donald Trump's presidency. The context provided mentions that during the Trump administration, Altman, the CEO of OpenAI, gained attention as a vocal critic of the president. This suggests that ChatGPT was not developed or available during that time.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 205,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a9fb8dd2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Baseline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 206,
|
||||
"id": "00db8a15",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response_prompt_template = \"\"\"You are an expert of world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.\n",
|
||||
"\n",
|
||||
"{normal_context}\n",
|
||||
"\n",
|
||||
"Original Question: {question}\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"response_prompt = ChatPromptTemplate.from_template(response_prompt_template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 207,
|
||||
"id": "06335ebb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = {\n",
|
||||
" # Retrieve context using the normal question (only the first 3 results)\n",
|
||||
" \"normal_context\": RunnableLambda(lambda x: x['question']) | retriever,\n",
|
||||
" # Pass on the question\n",
|
||||
" \"question\": lambda x: x[\"question\"]\n",
|
||||
"} | response_prompt | ChatOpenAI(temperature=0) | StrOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 208,
|
||||
"id": "15e0e741",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Yes, ChatGPT was around while Donald Trump was president. However, it is important to note that the specific context you provided mentions that ChatGPT refused to write a positive poem about former President Donald Trump. This suggests that while ChatGPT was available during Trump's presidency, it may have had limitations or biases in its responses regarding him.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 208,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e7b9e5d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -13,6 +13,7 @@ cp -r . ../_dist
|
||||
cd ../_dist
|
||||
poetry run python scripts/model_feat_table.py
|
||||
poetry run nbdoc_build --srcdir docs
|
||||
cp ../cookbook/README.md src/pages/cookbook.mdx
|
||||
poetry run python scripts/generate_api_reference_links.py
|
||||
yarn install
|
||||
yarn start
|
||||
|
||||
@@ -107,7 +107,7 @@
|
||||
"# Now let's try with fallbacks to Anthropic\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(llm.invoke(\"Why did the the chicken cross the road?\"))\n",
|
||||
" print(llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
|
||||
119
docs/docs/expression_language/how_to/generators.ipynb
Normal file
119
docs/docs/expression_language/how_to/generators.ipynb
Normal file
@@ -0,0 +1,119 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Custom generator functions\n",
|
||||
"\n",
|
||||
"You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a LCEL pipeline.\n",
|
||||
"\n",
|
||||
"The signature of these generators should be `Iterator[Input] -> Iterator[Output]`. Or for async generators: `AsyncIterator[Input] -> AsyncIterator[Output]`.\n",
|
||||
"\n",
|
||||
"These are useful for:\n",
|
||||
"- implementing a custom output parser\n",
|
||||
"- modifying the output of a previous step, while preserving streaming capabilities\n",
|
||||
"\n",
|
||||
"Let's implement a custom output parser for comma-separated lists."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"lion, tiger, wolf, gorilla, panda\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Iterator, List\n",
|
||||
"\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts.chat import ChatPromptTemplate\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"Write a comma-separated list of 5 animals similar to: {animal}\"\n",
|
||||
")\n",
|
||||
"model = ChatOpenAI(temperature=0.0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"str_chain = prompt | model | StrOutputParser()\n",
|
||||
"\n",
|
||||
"print(str_chain.invoke({\"animal\": \"bear\"}))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This is a custom parser that splits an iterator of llm tokens\n",
|
||||
"# into a list of strings separated by commas\n",
|
||||
"def split_into_list(input: Iterator[str]) -> Iterator[List[str]]:\n",
|
||||
" # hold partial input until we get a comma\n",
|
||||
" buffer = \"\"\n",
|
||||
" for chunk in input:\n",
|
||||
" # add current chunk to buffer\n",
|
||||
" buffer += chunk\n",
|
||||
" # while there are commas in the buffer\n",
|
||||
" while \",\" in buffer:\n",
|
||||
" # split buffer on comma\n",
|
||||
" comma_index = buffer.index(\",\")\n",
|
||||
" # yield everything before the comma\n",
|
||||
" yield [buffer[:comma_index].strip()]\n",
|
||||
" # save the rest for the next iteration\n",
|
||||
" buffer = buffer[comma_index + 1 :]\n",
|
||||
" # yield the last chunk\n",
|
||||
" yield [buffer.strip()]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['lion', 'tiger', 'wolf', 'gorilla', 'panda']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"list_chain = str_chain | split_into_list\n",
|
||||
"\n",
|
||||
"print(list_chain.invoke({\"animal\": \"bear\"}))\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -109,7 +109,7 @@
|
||||
"# Now let's try with fallbacks to Anthropic\n",
|
||||
"with patch('openai.ChatCompletion.create', side_effect=RateLimitError()):\n",
|
||||
" try:\n",
|
||||
" print(llm.invoke(\"Why did the the chicken cross the road?\"))\n",
|
||||
" print(llm.invoke(\"Why did the chicken cross the road?\"))\n",
|
||||
" except:\n",
|
||||
" print(\"Hit error\")"
|
||||
]
|
||||
|
||||
@@ -148,7 +148,7 @@
|
||||
"\n",
|
||||
"Inference speed is a challenge when running models locally (see above).\n",
|
||||
"\n",
|
||||
"To minimize latency, it is desiable to run models locally on GPU, which ships with many consumer laptops [e.g., Apple devices](https://www.apple.com/newsroom/2022/06/apple-unveils-m2-with-breakthrough-performance-and-capabilities/).\n",
|
||||
"To minimize latency, it is desirable to run models locally on GPU, which ships with many consumer laptops [e.g., Apple devices](https://www.apple.com/newsroom/2022/06/apple-unveils-m2-with-breakthrough-performance-and-capabilities/).\n",
|
||||
"\n",
|
||||
"And even with GPU, the available GPU memory bandwidth (as noted above) is important.\n",
|
||||
"\n",
|
||||
@@ -254,7 +254,7 @@
|
||||
"\n",
|
||||
"`f16_kv`: whether the model should use half-precision for the key/value cache\n",
|
||||
"* Value: True\n",
|
||||
"* Meaning: The model will use half-precision, which can be more memory efficient; Metal only support True."
|
||||
"* Meaning: The model will use half-precision, which can be more memory efficient; Metal only supports True."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -291,7 +291,7 @@
|
||||
"id": "f56f5168",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The console log will show the the below to indicate Metal was enabled properly from steps above:\n",
|
||||
"The console log will show the below to indicate Metal was enabled properly from steps above:\n",
|
||||
"```\n",
|
||||
"ggml_metal_init: allocating\n",
|
||||
"ggml_metal_init: using MPS\n",
|
||||
|
||||
@@ -229,7 +229,7 @@
|
||||
"- fasttext (recommended)\n",
|
||||
"- langdetect\n",
|
||||
"\n",
|
||||
"From our exprience *fasttext* performs a bit better, but you should verify it on your use case."
|
||||
"From our experience *fasttext* performs a bit better, but you should verify it on your use case."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"\n",
|
||||
"In this notebook, we will look at building a basic system for question answering, based on private data. Before feeding the LLM with this data, we need to protect it so that it doesn't go to an external API (e.g. OpenAI, Anthropic). Then, after receiving the model output, we would like the data to be restored to its original form. Below you can observe an example flow of this QA system:\n",
|
||||
"\n",
|
||||
"<img src=\"/img/qa_privacy_protection.png\" width=\"800\"/>\n",
|
||||
"<img src=\"/img/qa_privacy_protection.png\" width=\"900\"/>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"In the following notebook, we will not go into the details of how the anonymizer works. If you are interested, please visit [this part of the documentation](https://python.langchain.com/docs/guides/privacy/presidio_data_anonymization/).\n",
|
||||
@@ -839,6 +839,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"documents = [Document(page_content=document_content)]\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)\n",
|
||||
"chunks = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# GCP Vertex AI \n",
|
||||
"\n",
|
||||
"Note: This is seperate from the Google PaLM integration. Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there. \n",
|
||||
"Note: This is separate from the Google PaLM integration. Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there. \n",
|
||||
"\n",
|
||||
"By default, Google Cloud [does not use](https://cloud.google.com/vertex-ai/docs/generative-ai/data-governance#foundation_model_development) Customer Data to train its foundation models as part of Google Cloud`s AI/ML Privacy Commitment. More details about how Google processes data can also be found in [Google's Customer Data Processing Addendum (CDPA)](https://cloud.google.com/terms/data-processing-addendum).\n",
|
||||
"\n",
|
||||
|
||||
160
docs/docs/integrations/chat/hunyuan.ipynb
Normal file
160
docs/docs/integrations/chat/hunyuan.ipynb
Normal file
@@ -0,0 +1,160 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tencent Hunyuan\n",
|
||||
"\n",
|
||||
"Hunyuan chat model API by Tencent. For more information, see [https://cloud.tencent.com/document/product/1729](https://cloud.tencent.com/document/product/1729)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-10-19T10:20:38.718834Z",
|
||||
"start_time": "2023-10-19T10:20:38.264050Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatHunyuan\n",
|
||||
"from langchain.schema import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-10-19T10:19:53.529876Z",
|
||||
"start_time": "2023-10-19T10:19:53.526210Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatHunyuan(\n",
|
||||
" hunyuan_app_id='YOUR_APP_ID',\n",
|
||||
" hunyuan_secret_id='YOUR_SECRET_ID',\n",
|
||||
" hunyuan_secret_key='YOUR_SECRET_KEY',\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-10-19T10:19:56.054289Z",
|
||||
"start_time": "2023-10-19T10:19:53.531078Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "AIMessage(content=\"J'aime programmer.\")"
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat([\n",
|
||||
" HumanMessage(content='You are a helpful assistant that translates English to French.Translate this sentence from English to French. I love programming.')\n",
|
||||
"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## For ChatHunyuan with Streaming"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatHunyuan(\n",
|
||||
" hunyuan_app_id='YOUR_APP_ID',\n",
|
||||
" hunyuan_secret_id='YOUR_SECRET_ID',\n",
|
||||
" hunyuan_secret_key='YOUR_SECRET_KEY',\n",
|
||||
" streaming=True,\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-10-19T10:20:41.507720Z",
|
||||
"start_time": "2023-10-19T10:20:41.496456Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "AIMessageChunk(content=\"J'aime programmer.\")"
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat([\n",
|
||||
" HumanMessage(content='You are a helpful assistant that translates English to French.Translate this sentence from English to French. I love programming.')\n",
|
||||
"])"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-10-19T10:20:46.275673Z",
|
||||
"start_time": "2023-10-19T10:20:44.241097Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"start_time": "2023-10-19T10:19:56.233477Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
121
docs/docs/integrations/chat/pai_eas_chat_endpoint.ipynb
Normal file
121
docs/docs/integrations/chat/pai_eas_chat_endpoint.ipynb
Normal file
@@ -0,0 +1,121 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AliCloud PAI EAS\n",
|
||||
"Machine Learning Platform for AI of Alibaba Cloud is a machine learning or deep learning engineering platform intended for enterprises and developers. It provides easy-to-use, cost-effective, high-performance, and easy-to-scale plug-ins that can be applied to various industry scenarios. With over 140 built-in optimization algorithms, Machine Learning Platform for AI provides whole-process AI engineering capabilities including data labeling (PAI-iTAG), model building (PAI-Designer and PAI-DSW), model training (PAI-DLC), compilation optimization, and inference deployment (PAI-EAS). PAI-EAS supports different types of hardware resources, including CPUs and GPUs, and features high throughput and low latency. It allows you to deploy large-scale complex models with a few clicks and perform elastic scale-ins and scale-outs in real time. It also provides a comprehensive O&M and monitoring system."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup Eas Service\n",
|
||||
"\n",
|
||||
"One who want to use eas llms must set up eas service first. When the eas service is launched, eas_service_rul and eas_service token can be got. Users can refer to https://www.alibabacloud.com/help/en/pai/user-guide/service-deployment/ for more information. Try to set environment variables to init eas service url and token:\n",
|
||||
"\n",
|
||||
"```base\n",
|
||||
"export EAS_SERVICE_URL=XXX\n",
|
||||
"export EAS_SERVICE_TOKEN=XXX\n",
|
||||
"```\n",
|
||||
"or run as follow codes:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.chat_models.base import HumanMessage\n",
|
||||
"from langchain.chat_models import PaiEasChatEndpoint\n",
|
||||
"os.environ[\"EAS_SERVICE_URL\"] = \"Your_EAS_Service_URL\"\n",
|
||||
"os.environ[\"EAS_SERVICE_TOKEN\"] = \"Your_EAS_Service_Token\"\n",
|
||||
"chat = PaiEasChatEndpoint(\n",
|
||||
" eas_service_url=os.environ[\"EAS_SERVICE_URL\"], \n",
|
||||
" eas_service_token=os.environ[\"EAS_SERVICE_TOKEN\"]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run Chat Model\n",
|
||||
"You can use the default settings to call eas service as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output = chat([HumanMessage(content=\"write a funny joke\")])\n",
|
||||
"print(\"output:\", output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Or, call eas service with new inference params:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kwargs = {\"temperature\": 0.8, \"top_p\": 0.8, \"top_k\": 5}\n",
|
||||
"output = chat([HumanMessage(content=\"write a funny joke\")], **kwargs)\n",
|
||||
"print(\"output:\", output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Or, run a stream call to get a stream response:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"outputs = chat.stream([HumanMessage(content=\"hi\")], streaming=True)\n",
|
||||
"for output in outputs:\n",
|
||||
" print(\"stream output:\", output)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -52,9 +52,9 @@
|
||||
"id": "8533ab63-d437-492a-aaec-ccca31167bf2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Select dataset\n",
|
||||
"## 1. Select a dataset\n",
|
||||
"\n",
|
||||
"This notebook fine-tunes a model directly on a selecting which runs to fine-tune on. You will often curate these from traced runs. You can learn more about LangSmith datasets in the docs [docs](https://docs.smith.langchain.com/evaluation/datasets).\n",
|
||||
"This notebook fine-tunes a model directly on selecting which runs to fine-tune on. You will often curate these from traced runs. You can learn more about LangSmith datasets in the docs [docs](https://docs.smith.langchain.com/evaluation/datasets).\n",
|
||||
"\n",
|
||||
"For the sake of this tutorial, we will upload an existing dataset here that you can use."
|
||||
]
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "735455a6-f82e-4252-b545-27385ef883f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
" Telegram\n",
|
||||
"# Telegram\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the Telegram chat loader. This class helps map exported Telegram conversations to LangChain chat messages.\n",
|
||||
"\n",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# WhatsApp\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the WhatsApp chat loader. This class helps map exported Telegram conversations to LangChain chat messages.\n",
|
||||
"This notebook shows how to use the WhatsApp chat loader. This class helps map exported WhatsApp conversations to LangChain chat messages.\n",
|
||||
"\n",
|
||||
"The process has three steps:\n",
|
||||
"1. Export the chat conversations to computer\n",
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`BibtexLoader` has these arguments:\n",
|
||||
"- `file_path`: the path the the `.bib` bibtex file\n",
|
||||
"- `file_path`: the path of the `.bib` bibtex file\n",
|
||||
"- optional `max_docs`: default=None, i.e. not limit. Use it to limit number of retrieved documents.\n",
|
||||
"- optional `max_content_chars`: default=4000. Use it to limit the number of characters in a single document.\n",
|
||||
"- optional `load_extra_meta`: default=False. By default only the most important fields from the bibtex entries: `Published` (publication year), `Title`, `Authors`, `Summary`, `Journal`, `Keywords`, and `URL`. If True, it will also try to load return `entry_id`, `note`, `doi`, and `links` fields. \n",
|
||||
|
||||
93
docs/docs/integrations/llms/pai_eas_endpoint.ipynb
Normal file
93
docs/docs/integrations/llms/pai_eas_endpoint.ipynb
Normal file
@@ -0,0 +1,93 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AliCloud PAI EAS\n",
|
||||
"Machine Learning Platform for AI of Alibaba Cloud is a machine learning or deep learning engineering platform intended for enterprises and developers. It provides easy-to-use, cost-effective, high-performance, and easy-to-scale plug-ins that can be applied to various industry scenarios. With over 140 built-in optimization algorithms, Machine Learning Platform for AI provides whole-process AI engineering capabilities including data labeling (PAI-iTAG), model building (PAI-Designer and PAI-DSW), model training (PAI-DLC), compilation optimization, and inference deployment (PAI-EAS). PAI-EAS supports different types of hardware resources, including CPUs and GPUs, and features high throughput and low latency. It allows you to deploy large-scale complex models with a few clicks and perform elastic scale-ins and scale-outs in real time. It also provides a comprehensive O&M and monitoring system."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms.pai_eas_endpoint import PaiEasEndpoint\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"One who want to use eas llms must set up eas service first. When the eas service is launched, eas_service_rul and eas_service token can be got. Users can refer to https://www.alibabacloud.com/help/en/pai/user-guide/service-deployment/ for more information,"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"EAS_SERVICE_URL\"] = \"Your_EAS_Service_URL\"\n",
|
||||
"os.environ[\"EAS_SERVICE_TOKEN\"] = \"Your_EAS_Service_Token\"\n",
|
||||
"llm = PaiEasEndpoint(eas_service_url=os.environ[\"EAS_SERVICE_URL\"], eas_service_token=os.environ[\"EAS_SERVICE_TOKEN\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Thank you for asking! However, I must respectfully point out that the question contains an error. Justin Bieber was born in 1994, and the Super Bowl was first played in 1967. Therefore, it is not possible for any NFL team to have won the Super Bowl in the year Justin Bieber was born.\\n\\nI hope this clarifies things! If you have any other questions, please feel free to ask.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,272 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Cloud Enterprise Search\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"[Enterprise Search](https://cloud.google.com/enterprise-search) is a part of the Generative AI App Builder suite of tools offered by Google Cloud.\n",
|
||||
"\n",
|
||||
"Gen AI App Builder lets developers, even those with limited machine learning skills, quickly and easily tap into the power of Google’s foundation models, search expertise, and conversational AI technologies to create enterprise-grade generative AI applications. \n",
|
||||
"\n",
|
||||
"Enterprise Search lets organizations quickly build generative AI powered search engines for customers and employees.Enterprise Search is underpinned by a variety of Google Search technologies, including semantic search, which helps deliver more relevant results than traditional keyword-based search techniques by using natural language processing and machine learning techniques to infer relationships within the content and intent from the user’s query input. Enterprise Search also benefits from Google’s expertise in understanding how users search and factors in content relevance to order displayed results. \n",
|
||||
"\n",
|
||||
"Google Cloud offers Enterprise Search via Gen App Builder in Google Cloud Console and via an API for enterprise workflow integration. \n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to configure Enterprise Search and use the Enterprise Search retriever. The Enterprise Search retriever encapsulates the [Generative AI App Builder Python client library](https://cloud.google.com/generative-ai-app-builder/docs/libraries#client-libraries-install-python) and uses it to access the Enterprise Search [Search Service API](https://cloud.google.com/python/docs/reference/discoveryengine/latest/google.cloud.discoveryengine_v1beta.services.search_service)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install pre-requisites\n",
|
||||
"\n",
|
||||
"You need to install the `google-cloud-discoverengine` package to use the Enterprise Search retriever."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install google-cloud-discoveryengine"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configure access to Google Cloud and Google Cloud Enterprise Search\n",
|
||||
"\n",
|
||||
"Enterprise Search is generally available for the allowlist (which means customers need to be approved for access) as of June 6, 2023. Contact your Google Cloud sales team for access and pricing details. We are previewing additional features that are coming soon to the generally available offering as part of our [Trusted Tester](https://cloud.google.com/ai/earlyaccess/join?hl=en) program. Sign up for [Trusted Tester](https://cloud.google.com/ai/earlyaccess/join?hl=en) and contact your Google Cloud sales team for an expedited trial.\n",
|
||||
"\n",
|
||||
"Before you can run this notebook you need to:\n",
|
||||
"- Set or create a Google Cloud project and turn on Gen App Builder\n",
|
||||
"- Create and populate an unstructured data store\n",
|
||||
"- Set credentials to access `Enterprise Search API`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set or create a Google Cloud poject and turn on Gen App Builder\n",
|
||||
"\n",
|
||||
"Follow the instructions in the [Enterprise Search Getting Started guide](https://cloud.google.com/generative-ai-app-builder/docs/before-you-begin) to set/create a GCP project and enable Gen App Builder.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create and populate an unstructured data store\n",
|
||||
"\n",
|
||||
"[Use Google Cloud Console to create an unstructured data store](https://cloud.google.com/generative-ai-app-builder/docs/create-engine-es#unstructured-data) and populate it with the example PDF documents from the `gs://cloud-samples-data/gen-app-builder/search/alphabet-investor-pdfs` Cloud Storage folder. Make sure to use the `Cloud Storage (without metadata)` option."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set credentials to access Enterprise Search API\n",
|
||||
"\n",
|
||||
"The [Gen App Builder client libraries](https://cloud.google.com/generative-ai-app-builder/docs/libraries) used by the Enterprise Search retriever provide high-level language support for authenticating to Gen App Builder programmatically. Client libraries support [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/application-default-credentials); the libraries look for credentials in a set of defined locations and use those credentials to authenticate requests to the API. With ADC, you can make credentials available to your application in a variety of environments, such as local development or production, without needing to modify your application code.\n",
|
||||
"\n",
|
||||
"If running in [Google Colab](https://colab.google) authenticate with `google.colab.google.auth` otherwise follow one of the [supported methods](https://cloud.google.com/docs/authentication/application-default-credentials) to make sure that you Application Default Credentials are properly set."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"if \"google.colab\" in sys.modules:\n",
|
||||
" from google.colab import auth as google_auth\n",
|
||||
"\n",
|
||||
" google_auth.authenticate_user()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configure and use the Enterprise Search retriever\n",
|
||||
"\n",
|
||||
"The Enterprise Search retriever is implemented in the `langchain.retriever.GoogleCloudEntepriseSearchRetriever` class. The `get_relevant_documents` method returns a list of `langchain.schema.Document` documents where the `page_content` field of each document is populated the document content.\n",
|
||||
"Depending on the data type used in Enterprise search (structured or unstructured) the `page_content` field is populated as follows:\n",
|
||||
"- Structured data source: either an `extractive segment` or an `extractive answer` that matches a query. The `metadata` field is populated with metadata (if any) of the document from which the segments or answers were extracted.\n",
|
||||
"- Unstructured data source: a string json containing all the fields returned from the structured data source. The `metadata` field is populated with metadata (if any) of the document \n",
|
||||
"\n",
|
||||
"### Only for Unstructured data sources:\n",
|
||||
"An extractive answer is verbatim text that is returned with each search result. It is extracted directly from the original document. Extractive answers are typically displayed near the top of web pages to provide an end user with a brief answer that is contextually relevant to their query. Extractive answers are available for website and unstructured search.\n",
|
||||
"\n",
|
||||
"An extractive segment is verbatim text that is returned with each search result. An extractive segment is usually more verbose than an extractive answer. Extractive segments can be displayed as an answer to a query, and can be used to perform post-processing tasks and as input for large language models to generate answers or new text. Extractive segments are available for unstructured search.\n",
|
||||
"\n",
|
||||
"For more information about extractive segments and extractive answers refer to [product documentation](https://cloud.google.com/generative-ai-app-builder/docs/snippets).\n",
|
||||
"\n",
|
||||
"When creating an instance of the retriever you can specify a number of parameters that control which Enterprise data store to access and how a natural language query is processed, including configurations for extractive answers and segments.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### The mandatory parameters are:\n",
|
||||
"\n",
|
||||
"- `project_id` - Your Google Cloud PROJECT_ID\n",
|
||||
"- `search_engine_id` - The ID of the data store you want to use. \n",
|
||||
"\n",
|
||||
"The `project_id` and `search_engine_id` parameters can be provided explicitly in the retriever's constructor or through the environment variables - `PROJECT_ID` and `SEARCH_ENGINE_ID`.\n",
|
||||
"\n",
|
||||
"You can also configure a number of optional parameters, including:\n",
|
||||
"\n",
|
||||
"- `max_documents` - The maximum number of documents used to provide extractive segments or extractive answers\n",
|
||||
"- `get_extractive_answers` - By default, the retriever is configured to return extractive segments. Set this field to `True` to return extractive answers. This is used only when `engine_data_type` set to 0 (unstructured) \n",
|
||||
"- `max_extractive_answer_count` - The maximum number of extractive answers returned in each search result.\n",
|
||||
" At most 5 answers will be returned. This is used only when `engine_data_type` set to 0 (unstructured) \n",
|
||||
"- `max_extractive_segment_count` - The maximum number of extractive segments returned in each search result.\n",
|
||||
" Currently one segment will be returned. This is used only when `engine_data_type` set to 0 (unstructured) \n",
|
||||
"- `filter` - The filter expression that allows you filter the search results based on the metadata associated with the documents in the searched data store. \n",
|
||||
"- `query_expansion_condition` - Specification to determine under which conditions query expansion should occur.\n",
|
||||
" 0 - Unspecified query expansion condition. In this case, server behavior defaults to disabled.\n",
|
||||
" 1 - Disabled query expansion. Only the exact search query is used, even if SearchResponse.total_size is zero.\n",
|
||||
" 2 - Automatic query expansion built by the Search API.\n",
|
||||
"- `engine_data_type` - Defines the enterprise search data type\n",
|
||||
" 0 - Unstructured data \n",
|
||||
" 1 - Structured data\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure and use the retriever for **unstructured** data with extractve segments "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import GoogleCloudEnterpriseSearchRetriever\n",
|
||||
"\n",
|
||||
"PROJECT_ID = \"<YOUR PROJECT ID>\" # Set to your Project ID\n",
|
||||
"SEARCH_ENGINE_ID = \"<YOUR SEARCH ENGINE ID>\" # Set to your data store ID"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = GoogleCloudEnterpriseSearchRetriever(\n",
|
||||
" project_id=PROJECT_ID,\n",
|
||||
" search_engine_id=SEARCH_ENGINE_ID,\n",
|
||||
" max_documents=3,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What are Alphabet's Other Bets?\"\n",
|
||||
"\n",
|
||||
"result = retriever.get_relevant_documents(query)\n",
|
||||
"for doc in result:\n",
|
||||
" print(doc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure and use the retriever for **unstructured** data with extractve answers "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = GoogleCloudEnterpriseSearchRetriever(\n",
|
||||
" project_id=PROJECT_ID,\n",
|
||||
" search_engine_id=SEARCH_ENGINE_ID,\n",
|
||||
" max_documents=3,\n",
|
||||
" max_extractive_answer_count=3,\n",
|
||||
" get_extractive_answers=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What are Alphabet's Other Bets?\"\n",
|
||||
"\n",
|
||||
"result = retriever.get_relevant_documents(query)\n",
|
||||
"for doc in result:\n",
|
||||
" print(doc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure and use the retriever for **structured** data with extractve answers "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = GoogleCloudEnterpriseSearchRetriever(\n",
|
||||
" project_id=PROJECT_ID,\n",
|
||||
" search_engine_id=SEARCH_ENGINE_ID,\n",
|
||||
" max_documents=3,\n",
|
||||
" engine_data_type=1\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"result = retriever.get_relevant_documents(query)\n",
|
||||
"for doc in result:\n",
|
||||
" print(doc)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "base",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -30,7 +30,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install google-cloud-discoveryengine"
|
||||
"! pip install google-cloud-discoveryengine\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -80,7 +80,7 @@
|
||||
"if \"google.colab\" in sys.modules:\n",
|
||||
" from google.colab import auth as google_auth\n",
|
||||
"\n",
|
||||
" google_auth.authenticate_user()"
|
||||
" google_auth.authenticate_user()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -90,12 +90,13 @@
|
||||
"## Configure and use the Vertex AI Search retriever\n",
|
||||
"\n",
|
||||
"The Vertex AI Search retriever is implemented in the `langchain.retriever.GoogleVertexAISearchRetriever` class. The `get_relevant_documents` method returns a list of `langchain.schema.Document` documents where the `page_content` field of each document is populated the document content.\n",
|
||||
"Depending on the data type used in Vertex AI Search (structured or unstructured) the `page_content` field is populated as follows:\n",
|
||||
"Depending on the data type used in Vertex AI Search (website, structured or unstructured) the `page_content` field is populated as follows:\n",
|
||||
"\n",
|
||||
"- Structured data source: either an `extractive segment` or an `extractive answer` that matches a query. The `metadata` field is populated with metadata (if any) of the document from which the segments or answers were extracted.\n",
|
||||
"- Unstructured data source: a string json containing all the fields returned from the structured data source. The `metadata` field is populated with metadata (if any) of the document\n",
|
||||
"- Website with advanced indexing: an `extractive answer` that matches a query. The `metadata` field is populated with metadata (if any) of the document from which the segments or answers were extracted.\n",
|
||||
"- Unstructured data source: either an `extractive segment` or an `extractive answer` that matches a query. The `metadata` field is populated with metadata (if any) of the document from which the segments or answers were extracted.\n",
|
||||
"- Structured data source: a string json containing all the fields returned from the structured data source. The `metadata` field is populated with metadata (if any) of the document\n",
|
||||
"\n",
|
||||
"### Only for Unstructured data sources:\n",
|
||||
"### Extractive answers & extractive segments\n",
|
||||
"\n",
|
||||
"An extractive answer is verbatim text that is returned with each search result. It is extracted directly from the original document. Extractive answers are typically displayed near the top of web pages to provide an end user with a brief answer that is contextually relevant to their query. Extractive answers are available for website and unstructured search.\n",
|
||||
"\n",
|
||||
@@ -136,6 +137,7 @@
|
||||
"- `engine_data_type` - Defines the Vertex AI Search data type\n",
|
||||
" - `0` - Unstructured data\n",
|
||||
" - `1` - Structured data\n",
|
||||
" - `2` - Website data with [Advanced Website Indexing](https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing)\n",
|
||||
"\n",
|
||||
"### Migration guide for `GoogleCloudEnterpriseSearchRetriever`\n",
|
||||
"\n",
|
||||
@@ -165,7 +167,7 @@
|
||||
"\n",
|
||||
"PROJECT_ID = \"<YOUR PROJECT ID>\" # Set to your Project ID\n",
|
||||
"LOCATION_ID = \"<YOUR LOCATION>\" # Set to your data store location\n",
|
||||
"DATA_STORE_ID = \"<YOUR DATA STORE ID>\" # Set to your data store ID"
|
||||
"DATA_STORE_ID = \"<YOUR DATA STORE ID>\" # Set to your data store ID\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -179,7 +181,7 @@
|
||||
" location_id=LOCATION_ID,\n",
|
||||
" data_store_id=DATA_STORE_ID,\n",
|
||||
" max_documents=3,\n",
|
||||
")"
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -192,7 +194,7 @@
|
||||
"\n",
|
||||
"result = retriever.get_relevant_documents(query)\n",
|
||||
"for doc in result:\n",
|
||||
" print(doc)"
|
||||
" print(doc)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -219,7 +221,7 @@
|
||||
"\n",
|
||||
"result = retriever.get_relevant_documents(query)\n",
|
||||
"for doc in result:\n",
|
||||
" print(doc)"
|
||||
" print(doc)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -245,21 +247,44 @@
|
||||
"\n",
|
||||
"result = retriever.get_relevant_documents(query)\n",
|
||||
"for doc in result:\n",
|
||||
" print(doc)"
|
||||
" print(doc)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configure and use the retrieve for multi-turn search"
|
||||
"### Configure and use the retriever for **website** data with Advanced Website Indexing\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = GoogleVertexAISearchRetriever(\n",
|
||||
" project_id=PROJECT_ID,\n",
|
||||
" location_id=LOCATION_ID,\n",
|
||||
" data_store_id=DATA_STORE_ID,\n",
|
||||
" max_documents=3,\n",
|
||||
" max_extractive_answer_count=3,\n",
|
||||
" get_extractive_answers=True,\n",
|
||||
" engine_data_type=2,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"result = retriever.get_relevant_documents(query)\n",
|
||||
"for doc in result:\n",
|
||||
" print(doc)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Search with follow-ups is [based](https://cloud.google.com/generative-ai-app-builder/docs/multi-turn-search) on generative AI models and it is different from the regular unstructured data search."
|
||||
"### Configure and use the retriever for multi-turn search\n",
|
||||
"\n",
|
||||
"[Search with follow-ups](https://cloud.google.com/generative-ai-app-builder/docs/multi-turn-search) is based on generative AI models and it is different from the regular unstructured data search.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -276,7 +301,7 @@
|
||||
"\n",
|
||||
"result = retriever.get_relevant_documents(query)\n",
|
||||
"for doc in result:\n",
|
||||
" print(doc)"
|
||||
" print(doc)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -64,13 +64,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 3,
|
||||
"id": "b4d4d386-2a6b-4942-863e-9202f5a9f1d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import KayAiRetriever\n",
|
||||
"import os\n",
|
||||
"from langchain.retrievers import KayAiRetriever\n",
|
||||
"from kay.rag.retrievers import KayRetriever\n",
|
||||
"os.environ[\"KAY_API_KEY\"] = KAY_API_KEY\n",
|
||||
"retriever = KayAiRetriever.create(dataset_id=\"company\", data_types=[\"10-K\", \"10-Q\", \"PressRelease\"], num_contexts=3)\n",
|
||||
@@ -79,19 +79,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 4,
|
||||
"id": "04ee2d6b-c2ab-4e15-8a8b-afaf6ef8c0f6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Company Name: ROKU INC\\nCompany Industry: CABLE & OTHER PAY TELEVISION SERVICES\\nArticle Title: Roku and FreeWheel Announce Strategic Partnership to Bring Roku’s Leading Ad Tech to FreeWheel Customers\\nText: Additionally, eMarketer Link: https://cts.businesswire.com/ct/CT?id=smartlink&url=https%3A%2F%2Fwww.insiderintelligence.com%2Finsights%2Favod-more-than-50-percent-of-us-digital-video-viewers%2F&esheet=53451144&newsitemid=20230712907788&lan=en-US&anchor=eMarketer&index=4&md5=b64dea72bcf6b6379474462602781d83 projects 57% of U.S. digital video users will stream an advertising-based video on demand (AVOD) service this year.\\nHaving solutions aimed at driving greater interoperability and automation will help accelerate this growth.\\nKey highlights of this collaboration include:\\nStreamlined Integration: Roku has now integrated its demand application programming interface (dAPI) with FreeWheel s TV platform. Roku s demand API gives publishers direct, automatic and real-time access to more advertiser demand. This enhanced integration allows for streamlined ad operation workflows and better inventory quality control, both of which will improve publisher yield and revenue.\\nSeamless Data Targeting: Publishers can now use Roku platform signals to enable advertisers to target audiences and measure campaign performance without relying on cookies. Additionally, FreeWheel and Roku will rely on data clean room technology to enable the activation of additional data sets providing better measurement and monetization to publishers and agencies.', metadata={'_additional': {'id': '962b79e0-f9d1-43ae-9f7a-8a9b42bc7a9a'}, 'chunk_type': 'text', 'chunk_years_mentioned': [], 'company_name': 'ROKU INC', 'company_sic_code_description': 'CABLE & OTHER PAY TELEVISION SERVICES', 'data_source': 'PressRelease', 'data_source_link': 'https://www.nasdaq.com/press-release/roku-and-freewheel-announce-strategic-partnership-to-bring-rokus-leading-ad-tech-to', 'data_source_publish_date': '2023-07-12T00:00:00Z', 'data_source_uid': 'a46f309c-705d-3946-96db-87aa4e73261f', 'title': 'ROKU INC | Roku and FreeWheel Announce Strategic Partnership to Bring Roku’s Leading Ad Tech to FreeWheel Customers'}),\n",
|
||||
" Document(page_content='Company Name: ROKU INC \\n Company Industry: CABLE & OTHER PAY TELEVISION SERVICES \\n Form Title: 10-K 2022-FY \\n Form Section: Risk Factors \\n Text: nd the Note Regarding Forward Looking Statements.This section of this Annual Report generally discusses fiscal years 2022 and 2021 and year to year comparisons between those years.Discussions of fiscal year 2020 and year to year comparisons between fiscal years 2021 and 2020 that are not included in this Annual Report can be found in Management\\'s Discussion and Analysis of Financial Condition and Results of Operations in Part II, Item 7 of our Annual Report for the fiscal year ended December 31, 2021 filed with the SEC on February 18, 2022.Overview Effective as of the fourth quarter of fiscal 2022, we reorganized our reportable segments to better align with management\\'s reporting of information reviewed by the Chief Operating Decision Maker (\"CODM\") for each segment.We renamed our \"player\" segment to \"devices\" which now includes our licensing arrangements with service operators and licensed Roku TV partners in addition to sales of our streaming players, audio products, smart home products and Roku branded TVs that will be designed, made, and sold by us in 2023.Our historical segment information is recast to conform to our new presentation in our financial statements and accompanying notes included in Item 8 of this Annual Report.Our two reportable segments are the platform segment and the devices segment.', metadata={'_additional': {'id': 'a76c5fed-5d63-45a7-b63a-2c30e05140fc'}, 'chunk_type': 'text', 'chunk_years_mentioned': [2020, 2021, 2022, 2023], 'company_name': 'ROKU INC', 'company_sic_code_description': 'CABLE & OTHER PAY TELEVISION SERVICES', 'data_source': '10-K', 'data_source_link': 'https://www.sec.gov/Archives/edgar/data/1428439/000142843923000007', 'data_source_publish_date': '2022-01-01T00:00:00Z', 'data_source_uid': '0001428439-23-000007', 'title': 'ROKU INC | 10-K 2022-FY '}),\n",
|
||||
" Document(page_content='Company Name: ROKU INC \\n Company Industry: CABLE & OTHER PAY TELEVISION SERVICES \\n Form Title: 10-Q 2023-Q1 \\n Form Section: Risk Factors \\n Text: Our current and potential partners include TV brands, cable and satellite companies, and telecommunication providers.Under these license arrangements, we generally have limited or no control over the amount and timing of resources these entities dedicate to the relationship.In the past, our licensed Roku TV partners have failed to meet their forecasts and anticipated market launch dates for distributing Roku TV models, and they may fail to meet their forecasts or such launches in the future.If our licensed Roku TV partners or service operator partners fail to meet their forecasts or such launches for distributing licensed streaming devices or choose to deploy competing streaming solutions within their product lines, our business may be harmed.We depend on a small number of content publishers for a majority of our streaming hours, and if we fail to maintain these relationships, our business could be harmed.*Historically, a small number of content publishers have accounted for a significant portion of the hours streamed on our platform.In the three months ended March 31, 2023, the top three streaming services represented over 50% of all hours streamed in the period.If, for any reason, we cease distributing channels that have historically streamed a large percentage of the aggregate streaming hours on our platform, our streaming hours, our active accounts, or Roku streaming device sales may be adversely affected, and our business may be harmed.', metadata={'_additional': {'id': '2a92b2bb-02a0-4e15-8b64-d7e04078a205'}, 'chunk_type': 'text', 'chunk_years_mentioned': [2023], 'company_name': 'ROKU INC', 'company_sic_code_description': 'CABLE & OTHER PAY TELEVISION SERVICES', 'data_source': '10-Q', 'data_source_link': 'https://www.sec.gov/Archives/edgar/data/1428439/000142843923000017', 'data_source_publish_date': '2023-01-01T00:00:00Z', 'data_source_uid': '0001428439-23-000017', 'title': 'ROKU INC | 10-Q 2023-Q1 '})]"
|
||||
"[Document(page_content='Company Name: ROKU INC\\nCompany Industry: CABLE & OTHER PAY TELEVISION SERVICES\\nArticle Title: Roku Is One of Fast Company\\'s Most Innovative Companies for 2023\\nText: The company launched several new devices, including the Roku Voice Remote Pro; upgraded its most premium player, the Roku Ultra; and expanded its products with a new line of smart home devices such as video doorbells, lights, and plugs integrated into the Roku ecosystem. Recently, the company announced it will launch Roku-branded TVs this spring to offer more choice and innovation to both consumers and Roku TV partners. Throughout 2022, Roku also updated its operating system (OS), the only OS purpose-built for TV, with more personalization features and enhancements across search, audio, and content discovery, launching The Buzz, Sports, and What to Watch, which provides tailored movie and TV recommendations on the Home Screen Menu. The company also released a new feature for streamers, Photo Streams, that allows customers to display and share photo albums through Roku streaming devices. Additionally, Roku unveiled Shoppable Ads, a new ad innovation that makes shopping on TV streaming as easy as it is on social media. Viewers simply press \"OK\" with their Roku remote on a shoppable ad and proceed to check out with their shipping and payment details pre-populated from Roku Pay, its proprietary payments platform. Walmart was the exclusive retailer for the launch, a first-of-its-kind partnership.', metadata={'chunk_type': 'text', 'chunk_years_mentioned': [2022, 2023], 'company_name': 'ROKU INC', 'company_sic_code_description': 'CABLE & OTHER PAY TELEVISION SERVICES', 'data_source': 'PressRelease', 'data_source_link': 'https://newsroom.roku.com/press-releases', 'data_source_publish_date': '2023-03-02T09:30:00-04:00', 'data_source_uid': '963d4a81-f58e-3093-af68-987fb1758c15', 'title': \"ROKU INC | Roku Is One of Fast Company's Most Innovative Companies for 2023\"}),\n",
|
||||
" Document(page_content='Company Name: ROKU INC\\nCompany Industry: CABLE & OTHER PAY TELEVISION SERVICES\\nArticle Title: Roku Is One of Fast Company\\'s Most Innovative Companies for 2023\\nText: Finally, Roku grew its content offering with thousands of apps and watching options for users, including content on The Roku Channel, a top five app by reach and engagement on the Roku platform in the U.S. in 2022. In November, Roku released its first feature film, \"WEIRD: The Weird Al\\' Yankovic Story,\" a biopic starring Daniel Radcliffe. Throughout the year, The Roku Channel added FAST channels from NBCUniversal and the National Hockey League, as well as an exclusive AMC channel featuring its signature drama \"Mad Men.\" This year, the company announced a deal with Warner Bros. Discovery, launching new channels that will include \"Westworld\" and \"The Bachelor,\" in addition to 2,000 hours of on-demand content. Read more about Roku\\'s journey here . Fast Company\\'s Most Innovative Companies issue (March/April 2023) is available online here , as well as in-app via iTunes and on newsstands beginning March 14. About Roku, Inc.\\nRoku pioneered streaming to the TV. We connect users to the streaming content they love, enable content publishers to build and monetize large audiences, and provide advertisers with unique capabilities to engage consumers. Roku streaming players and TV-related audio devices are available in the U.S. and in select countries through direct retail sales and licensing arrangements with service operators. Roku TV models are available in the U.S. and select countries through licensing arrangements with TV OEM brands.', metadata={'chunk_type': 'text', 'chunk_years_mentioned': [2022, 2023], 'company_name': 'ROKU INC', 'company_sic_code_description': 'CABLE & OTHER PAY TELEVISION SERVICES', 'data_source': 'PressRelease', 'data_source_link': 'https://newsroom.roku.com/press-releases', 'data_source_publish_date': '2023-03-02T09:30:00-04:00', 'data_source_uid': '963d4a81-f58e-3093-af68-987fb1758c15', 'title': \"ROKU INC | Roku Is One of Fast Company's Most Innovative Companies for 2023\"}),\n",
|
||||
" Document(page_content='Company Name: ROKU INC\\nCompany Industry: CABLE & OTHER PAY TELEVISION SERVICES\\nArticle Title: Roku\\'s New NFL Zone Gives Fans Easy Access to NFL Games Right On Time for 2023 Season\\nText: In partnership with the NFL, the new NFL Zone offers viewers an easy way to find where to watch NFL live games Today, Roku (NASDAQ: ROKU ) and the National Football League (NFL) announced the recently launched NFL Zone within the Roku Sports experience to kick off the 2023 NFL season. This strategic partnership between Roku and the NFL marks the first official league-branded zone within Roku\\'s Sports experience. Available now, the NFL Zone offers football fans a centralized location to find live and upcoming games, so they can spend less time figuring out where to watch the game and more time rooting for their favorite teams. Users can also tune in for weekly game previews, League highlights, and additional NFL content, all within the zone. This press release features multimedia. View the full release here: In partnership with the NFL, Roku\\'s new NFL Zone offers viewers an easy way to find where to watch NFL live games (Photo: Business Wire) \"Last year we introduced the Sports experience for our highly engaged sports audience, making it simpler for Roku users to watch sports programming,\" said Gidon Katz, President, Consumer Experience, at Roku. \"As we start the biggest sports season of the year, providing easy access to NFL games and content to our millions of users is a top priority for us. We look forward to fans immersing themselves within the NFL Zone and making it their destination to find NFL games.', metadata={'chunk_type': 'text', 'chunk_years_mentioned': [2023], 'company_name': 'ROKU INC', 'company_sic_code_description': 'CABLE & OTHER PAY TELEVISION SERVICES', 'data_source': 'PressRelease', 'data_source_link': 'https://newsroom.roku.com/press-releases', 'data_source_publish_date': '2023-09-12T09:00:00-04:00', 'data_source_uid': '963d4a81-f58e-3093-af68-987fb1758c15', 'title': \"ROKU INC | Roku's New NFL Zone Gives Fans Easy Access to NFL Games Right On Time for 2023 Season\"})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
||||
@@ -28,19 +28,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 11,
|
||||
"id": "63a8af5b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 23.3 is available.\n",
|
||||
"You should consider upgrading via the '/Users/joe/projects/elastic/langchain/libs/langchain/.venv/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
|
||||
"\u001b[0m"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#!pip install lark elasticsearch"
|
||||
"#!pip install -qU lark elasticsearch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "cb4a5787",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -60,7 +70,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 2,
|
||||
"id": "bcbe04d9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -115,7 +125,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"id": "86e34dbf",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -164,17 +174,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 4,
|
||||
"id": "38a126e9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='dinosaur' filter=None limit=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -184,7 +187,7 @@
|
||||
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -196,24 +199,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 5,
|
||||
"id": "b19d4da0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig') limit=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'year': 2019, 'director': 'Greta Gerwig', 'rating': 8.3})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -237,7 +233,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 6,
|
||||
"id": "bff36b88-b506-4877-9c63-e5a1a8d78e64",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -256,19 +252,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 7,
|
||||
"id": "2758d229-4f97-499c-819f-888acaf8ee10",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='dinosaur' filter=None limit=2\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@@ -276,7 +265,7 @@
|
||||
" Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -297,24 +286,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 8,
|
||||
"id": "e460da93",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"query='animated toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Operation(operator=<Operator.OR: 'or'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='animated'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='comedy')]), Comparison(comparator=<Comparator.GTE: 'gte'>, attribute='year', value=1990)]) limit=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -325,21 +307,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"id": "0851fc42",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ObjectApiResponse({'acknowledged': True})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore.client.indices.delete(index=\"elasticsearch-self-query-demo\")"
|
||||
]
|
||||
@@ -361,7 +332,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
120
docs/docs/integrations/retrievers/singlestoredb.ipynb
Normal file
120
docs/docs/integrations/retrievers/singlestoredb.ipynb
Normal file
@@ -0,0 +1,120 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ab66dd43",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStoreDB\n",
|
||||
"\n",
|
||||
">[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This notebook shows how to use a retriever that uses `SingleStoreDB`.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "51b49135-a61a-49e8-869d-7c1d76794cd7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Establishing a connection to the database is facilitated through the singlestoredb Python connector.\n",
|
||||
"# Please ensure that this connector is installed in your working environment.\n",
|
||||
"!pip install singlestoredb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aaf80e7f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Retriever from vector store"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bcb3c8c2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"# We want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import SingleStoreDB\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"# Setup connection url as environment variable\n",
|
||||
"os.environ[\"SINGLESTOREDB_URL\"] = \"root:pass@localhost:3306/db\"\n",
|
||||
"\n",
|
||||
"# Load documents to the store\n",
|
||||
"docsearch = SingleStoreDB.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" table_name=\"notebook\", # use table with a custom name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# create retriever from the vector store\n",
|
||||
"retriever = docsearch.as_retriever(search_kwargs={\"k\": 2})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc0915db",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Search with retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "b605284d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = retriever.get_relevant_documents(\"What did the president say about Ketanji Brown Jackson\")\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -55,7 +55,7 @@
|
||||
"id": "ac5c88ce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's add some files to the the sandbox"
|
||||
"Let's add some files to the sandbox"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
140
docs/docs/integrations/tools/tavily_search.ipynb
Normal file
140
docs/docs/integrations/tools/tavily_search.ipynb
Normal file
@@ -0,0 +1,140 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6f91f20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tavily Search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5e24a889",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.\n",
|
||||
"\n",
|
||||
"Set up API key [here](https://app.tavily.com/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b50d6c92",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Try it out!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8cc8ded6",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-10-21T13:15:37.974229Z",
|
||||
"start_time": "2023-10-21T13:15:10.007898Z"
|
||||
},
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I'm not aware of the current situation regarding the Burning Man event. I'll need to search for recent news about any flooding that might have affected it.\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"tavily_search_results_json\",\n",
|
||||
" \"action_input\": {\"query\": \"Burning Man floods latest news\"}\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m[{'url': 'https://www.theguardian.com/culture/2023/sep/03/burning-man-nevada-festival-floods', 'content': 'More on this story\\nMore on this story\\nBurning Man revelers begin exodus from festival after road reopens\\nBurning Man festival-goers trapped in desert as rain turns site to mud\\n\\nOfficials investigate death at Burning Man as thousands stranded by floods\\n\\nBurning Man festivalgoers surrounded by mud in Nevada desert – video\\nBurning Man attendees roadblocked by climate activists: ‘They have a privileged mindset’\\n\\nin our favor. We will let you know. It could be sooner, and it could be later,” said an update on the Burning Man website on Saturday evening.'}, {'url': 'https://www.npr.org/2023/09/03/1197497458/the-latest-on-the-burning-man-flooding', 'content': \"National\\nThe latest on the Burning Man flooding\\nClaudia Peschiutta\\n\\nClaudia Peschiutta\\nAuthorities are investigating a death at the Burning Man festival in the Nevada desert after tens of thousands of people are stuck in camps because of rain.\\nSCOTT DETROW, HOST:\\n\\nDETROW: Well, that's NPR's Claudia Peschiutta covered and caked in a lot of mud at Burning Man. Thanks for talking to us.\\nPESCHIUTTA: Confirmed.\\nDETROW: Stay dry as much as you can.\\n\\nwith NPR's Claudia Peschiutta, who's at her first burn, and she told me it's muddy where she is, but that she and her camp family have been making the best of things.\"}, {'url': 'https://www.npr.org/2023/09/03/1197497458/the-latest-on-the-burning-man-flooding', 'content': \"National\\nThe latest on the Burning Man flooding\\nClaudia Peschiutta\\n\\nClaudia Peschiutta\\nAuthorities are investigating a death at the Burning Man festival in the Nevada desert after tens of thousands of people are stuck in camps because of rain.\\nSCOTT DETROW, HOST:\\n\\nDETROW: Well, that's NPR's Claudia Peschiutta covered and caked in a lot of mud at Burning Man. Thanks for talking to us.\\nPESCHIUTTA: Confirmed.\\nDETROW: Stay dry as much as you can.\\n\\nwith NPR's Claudia Peschiutta, who's at her first burn, and she told me it's muddy where she is, but that she and her camp family have been making the best of things.\"}, {'url': 'https://abcnews.go.com/US/burning-man-flooding-happened-stranded-festivalgoers/story?id=102908331', 'content': 'Tens of thousands of Burning Man attendees are now able to leave the festival after a downpour and massive flooding left them stranded over the weekend.\\n\\nIn 2013, according to a blog post in the \"Burning Man Journal,\" a rainstorm similarly rolled in, unexpectedly \"trapping 160 people on the playa overnight.\"\\n\\nABC News\\nVideo\\nLive\\nShows\\nElection 2024\\n538\\nStream on\\nBurning Man flooding: What happened to stranded festivalgoers?\\nSome 64,000 people were still on site Monday as the exodus began.\\n\\nBurning Man has been hosted for over 30 years, according to a statement from the organizers.'}, {'url': 'https://www.today.com/news/what-is-burning-man-flood-death-rcna103231', 'content': 'Tens of thousands of Burning Man festivalgoers are slowly making their way home from the Nevada desert after muddy conditions from heavy rains made it nearly impossible to leave over the weekend.\\n\\naccording to burningman.org.\\n\\nPresident Biden was notified of the situation and, according to a spokesperson, administration officials monitored and received updates on the latest details.\\nWhy are people stranded at Burning Man?\\n\\n\"Thank goodness this community knows how to take care of each other,\" the Instagram page for Burning Man Information Radio wrote on a post predicting more rain.'}]\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe latest Burning Man event was severely affected by heavy rainfall that led to flooding. This resulted in tens of thousands of festival attendees getting stuck in their camps due to the muddy conditions. As a result, the exodus from the festival was delayed. An unfortunate incident also occurred, with a death being investigated at the festival. The situation was severe enough that President Biden was informed about it and administration officials were monitoring it. However, it seems that the festival goers were able to handle the situation well, as the Burning Man community is known for looking out for each other. This is not the first time a rainstorm has disrupted the Burning Man event; a similar incident occurred in 2013 where a sudden storm trapped people overnight. \n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"The latest Burning Man event was severely affected by heavy rainfall that led to flooding. This resulted in tens of thousands of festival attendees getting stuck in their camps due to the muddy conditions, delaying their exit from the festival. An unfortunate incident also occurred, with a death being investigated at the festival. The situation was severe enough that President Biden was informed about it and administration officials were monitoring it. However, the festival goers were able to handle the situation well, as the Burning Man community is known for looking out for each other. This is not the first time a rainstorm has disrupted the Burning Man event; a similar incident occurred in 2013 when a sudden storm trapped people overnight.\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The latest Burning Man event was severely affected by heavy rainfall that led to flooding. This resulted in tens of thousands of festival attendees getting stuck in their camps due to the muddy conditions, delaying their exit from the festival. An unfortunate incident also occurred, with a death being investigated at the festival. The situation was severe enough that President Biden was informed about it and administration officials were monitoring it. However, the festival goers were able to handle the situation well, as the Burning Man community is known for looking out for each other. This is not the first time a rainstorm has disrupted the Burning Man event; a similar incident occurred in 2013 when a sudden storm trapped people overnight.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# libraries\n",
|
||||
"import os\n",
|
||||
"from langchain.utilities.tavily_search import TavilySearchAPIWrapper\n",
|
||||
"from langchain.agents import initialize_agent, AgentType\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.tools.tavily_search import TavilySearchResults\n",
|
||||
"\n",
|
||||
"# set up API key\n",
|
||||
"os.environ[\"TAVILY_API_KEY\"] = \"...\"\n",
|
||||
"\n",
|
||||
"# set up the agent\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0.7)\n",
|
||||
"search = TavilySearchAPIWrapper()\n",
|
||||
"tavily_tool = TavilySearchResults(api_wrapper=search)\n",
|
||||
"\n",
|
||||
"# initialize the agent\n",
|
||||
"agent_chain = initialize_agent(\n",
|
||||
" [tavily_tool],\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# run the agent\n",
|
||||
"agent_chain.run(\n",
|
||||
" \"What happened in the latest burning man floods?\",\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "86cd0a02",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -7,6 +7,8 @@
|
||||
"source": [
|
||||
"# Zapier Natural Language Actions\n",
|
||||
"\n",
|
||||
"**Deprecated** This API will be sunset on 2023-11-17: https://nla.zapier.com/start/\n",
|
||||
" \n",
|
||||
">[Zapier Natural Language Actions](https://nla.zapier.com/start/) gives you access to the 5k+ apps, 20k+ actions on Zapier's platform through a natural language API interface.\n",
|
||||
">\n",
|
||||
">NLA supports apps like `Gmail`, `Salesforce`, `Trello`, `Slack`, `Asana`, `HubSpot`, `Google Sheets`, `Microsoft Teams`, and thousands more apps: https://zapier.com/apps\n",
|
||||
|
||||
@@ -139,7 +139,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
|
||||
"metadata": {
|
||||
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
|
||||
@@ -172,7 +172,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"id": "aac9563e",
|
||||
@@ -186,7 +186,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {
|
||||
"id": "a3c3999a",
|
||||
@@ -207,7 +207,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"id": "12eb86d8",
|
||||
"metadata": {
|
||||
"id": "12eb86d8",
|
||||
@@ -218,7 +218,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../modules/state_of_the_union.txt'}), Document(page_content='One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../modules/state_of_the_union.txt', 'date': '2016-01-01', 'rating': 2, 'author': 'John Doe'}), Document(page_content='One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../modules/state_of_the_union.txt', 'date': '2010-01-01', 'rating': 1, 'author': 'John Doe'}), Document(page_content='As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \\n\\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice.', metadata={'source': '../../modules/state_of_the_union.txt'})]\n"
|
||||
"[Document(page_content='One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../modules/state_of_the_union.txt'}), Document(page_content='As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \\n\\nWhile it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice.', metadata={'source': '../../modules/state_of_the_union.txt'}), Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\n\\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.', metadata={'source': '../../modules/state_of_the_union.txt'}), Document(page_content='This is personal to me and Jill, to Kamala, and to so many of you. \\n\\nCancer is the #2 cause of death in America–second only to heart disease. \\n\\nLast month, I announced our plan to supercharge \\nthe Cancer Moonshot that President Obama asked me to lead six years ago. \\n\\nOur goal is to cut the cancer death rate by at least 50% over the next 25 years, turn more cancers from death sentences into treatable diseases. \\n\\nMore support for patients and families.', metadata={'source': '../../modules/state_of_the_union.txt'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -247,7 +247,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"id": "5d076412",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -284,12 +284,13 @@
|
||||
"## Filtering Metadata\n",
|
||||
"With metadata added to the documents, you can add metadata filtering at query time. \n",
|
||||
"\n",
|
||||
"### Example: Filter by keyword"
|
||||
"### Example: Filter by Exact keyword\n",
|
||||
"Notice: We are using the keyword subfield thats not analyzed"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"execution_count": 6,
|
||||
"id": "b2a4bd1b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -297,12 +298,42 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': '../../modules/state_of_the_union.txt', 'date': '2010-01-01', 'rating': 1, 'author': 'John Doe', 'geo_location': {'lat': 40.12, 'lon': -71.34}}\n"
|
||||
"{'source': '../../modules/state_of_the_union.txt', 'date': '2016-01-01', 'rating': 2, 'author': 'John Doe'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = db.similarity_search(query, filter=[{ \"match\": { \"metadata.author\": \"John Doe\"}}])\n",
|
||||
"docs = db.similarity_search(query, filter=[{ \"term\": { \"metadata.author.keyword\": \"John Doe\"}}])\n",
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1898ab77",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example: Filter by Partial Match\n",
|
||||
"This example shows how to filter by partial match. This is useful when you don't know the exact value of the metadata field. For example, if you want to filter by the metadata field `author` and you don't know the exact value of the author, you can use a partial match to filter by the author's last name. Fuzzy matching is also supported.\n",
|
||||
"\n",
|
||||
"\"Jon\" matches on \"John Doe\" as \"Jon\" is a close match to \"John\" token."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "f3d294ff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': '../../modules/state_of_the_union.txt', 'date': '2016-01-01', 'rating': 2, 'author': 'John Doe'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = db.similarity_search(query, filter=[{ \"match\": { \"metadata.author\": { \"query\": \"Jon\", \"fuzziness\": \"AUTO\" } }}])\n",
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"# Setup\n",
|
||||
"\n",
|
||||
"You will need a Vectara account to use Vectara with LangChain. To get started, use the following steps (see our [quickstart](https://docs.vectara.com/docs/quickstart) guide):\n",
|
||||
"1. [Sign up](https://console.vectara.com/signup) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"1. [Sign up](https://vectara.com/integrations/langchain) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
|
||||
@@ -127,9 +127,9 @@ len(docs)
|
||||
|
||||
## Auto-detect file encodings with TextLoader
|
||||
|
||||
In this example we will see some strategies that can be useful when loading a big list of arbitrary files from a directory using the `TextLoader` class.
|
||||
In this example we will see some strategies that can be useful when loading a large list of arbitrary files from a directory using the `TextLoader` class.
|
||||
|
||||
First to illustrate the problem, let's try to load multiple text with arbitrary encodings.
|
||||
First to illustrate the problem, let's try to load multiple texts with arbitrary encodings.
|
||||
|
||||
|
||||
```python
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
"\n",
|
||||
"The record manager relies on a time-based mechanism to determine what content can be cleaned up (when using `full` or `incremental` cleanup modes).\n",
|
||||
"\n",
|
||||
"If two tasks run back-to-back, and the first task finishes before the the clock time changes, then the second task may not be able to clean up content.\n",
|
||||
"If two tasks run back-to-back, and the first task finishes before the clock time changes, then the second task may not be able to clean up content.\n",
|
||||
"\n",
|
||||
"This is unlikely to be an issue in actual settings for the following reasons:\n",
|
||||
"\n",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"- [Memory in LLMChain](/docs/modules/memory/how_to/adding_memory.html)\n",
|
||||
"- [Custom Agents](/docs/modules/agents/how_to/custom_agent.html)\n",
|
||||
"\n",
|
||||
"In order to add a memory to an agent we are going to the the following steps:\n",
|
||||
"In order to add a memory to an agent we are going to perform the following steps:\n",
|
||||
"\n",
|
||||
"1. We are going to create an `LLMChain` with memory.\n",
|
||||
"2. We are going to use that `LLMChain` to create a custom Agent.\n",
|
||||
|
||||
@@ -4,9 +4,8 @@ This output parser can be used when you want to return a list of comma-separated
|
||||
|
||||
```python
|
||||
from langchain.output_parsers import CommaSeparatedListOutputParser
|
||||
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
output_parser = CommaSeparatedListOutputParser()
|
||||
|
||||
|
||||
@@ -7,11 +7,9 @@ But we can do other things besides throw errors. Specifically, we can pass the m
|
||||
For this example, we'll use the above Pydantic output parser. Here's what happens if we pass it a result that does not comply with the schema:
|
||||
|
||||
```python
|
||||
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from pydantic import BaseModel, Field, validator
|
||||
from langchain.pydantic_v1 import BaseModel, Field
|
||||
from typing import List
|
||||
```
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ qa.run(query)
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
The above way allows you to really simply change the chain_type, but it doesn't provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](/docs/modules/chains/additional/question_answering.html)) and then pass that directly to the the RetrievalQA chain with the `combine_documents_chain` parameter. For example:
|
||||
The above way allows you to really simply change the chain_type, but it doesn't provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](/docs/modules/chains/additional/question_answering.html)) and then pass that directly to the RetrievalQA chain with the `combine_documents_chain` parameter. For example:
|
||||
|
||||
|
||||
```python
|
||||
|
||||
102
docs/docs_skeleton/docs/integrations/tools/google_scholar.ipynb
Normal file
102
docs/docs_skeleton/docs/integrations/tools/google_scholar.ipynb
Normal file
@@ -0,0 +1,102 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Scholar\n",
|
||||
"\n",
|
||||
"This notebook goes through how to use Google Scholar Tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Requirement already satisfied: google-search-results in /home/mohtashimkhan/mambaforge/envs/langchain/lib/python3.9/site-packages (2.4.2)\n",
|
||||
"Requirement already satisfied: requests in /home/mohtashimkhan/mambaforge/envs/langchain/lib/python3.9/site-packages (from google-search-results) (2.31.0)\n",
|
||||
"Requirement already satisfied: charset-normalizer<4,>=2 in /home/mohtashimkhan/mambaforge/envs/langchain/lib/python3.9/site-packages (from requests->google-search-results) (3.3.0)\n",
|
||||
"Requirement already satisfied: idna<4,>=2.5 in /home/mohtashimkhan/mambaforge/envs/langchain/lib/python3.9/site-packages (from requests->google-search-results) (3.4)\n",
|
||||
"Requirement already satisfied: urllib3<3,>=1.21.1 in /home/mohtashimkhan/mambaforge/envs/langchain/lib/python3.9/site-packages (from requests->google-search-results) (1.26.17)\n",
|
||||
"Requirement already satisfied: certifi>=2017.4.17 in /home/mohtashimkhan/mambaforge/envs/langchain/lib/python3.9/site-packages (from requests->google-search-results) (2023.5.7)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install google-search-results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.google_scholar import GoogleScholarQueryRun\n",
|
||||
"from langchain.utilities.google_scholar import GoogleScholarAPIWrapper\n",
|
||||
"import os"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Title: Large language models (LLM) and ChatGPT: what will the impact on nuclear medicine be?\\nAuthors: IL Alberts,K Shi\\nSummary: IL Alberts, L Mercolli, T Pyka, G Prenosil, K Shi… - European journal of …, 2023 - Springer\\nTotal-Citations: 28\\n\\nTitle: Dynamic Planning with a LLM\\nAuthors: G Dagan,F Keller,A Lascarides\\nSummary: G Dagan, F Keller, A Lascarides - arXiv preprint arXiv:2308.06391, 2023 - arxiv.org\\nTotal-Citations: 3\\n\\nTitle: Openagi: When llm meets domain experts\\nAuthors: Y Ge,W Hua,J Ji,J Tan,S Xu,Y Zhang\\nSummary: Y Ge, W Hua, J Ji, J Tan, S Xu, Y Zhang - arXiv preprint arXiv:2304.04370, 2023 - arxiv.org\\nTotal-Citations: 19\\n\\nTitle: Llm-planner: Few-shot grounded planning for embodied agents with large language models\\nAuthors: CH Song\\nSummary: CH Song, J Wu, C Washington… - Proceedings of the …, 2023 - openaccess.thecvf.com\\nTotal-Citations: 28\\n\\nTitle: The science of detecting llm-generated texts\\nAuthors: R Tang,YN Chuang,X Hu\\nSummary: R Tang, YN Chuang, X Hu - arXiv preprint arXiv:2303.07205, 2023 - arxiv.org\\nTotal-Citations: 23\\n\\nTitle: X-llm: Bootstrapping advanced large language models by treating multi-modalities as foreign languages\\nAuthors: F Chen,M Han,J Shi\\nSummary: F Chen, M Han, H Zhao, Q Zhang, J Shi, S Xu… - arXiv preprint arXiv …, 2023 - arxiv.org\\nTotal-Citations: 12\\n\\nTitle: 3d-llm: Injecting the 3d world into large language models\\nAuthors: Y Hong,H Zhen,P Chen,S Zheng,Y Du\\nSummary: Y Hong, H Zhen, P Chen, S Zheng, Y Du… - arXiv preprint arXiv …, 2023 - arxiv.org\\nTotal-Citations: 4\\n\\nTitle: The internal state of an llm knows when its lying\\nAuthors: A Azaria,T Mitchell\\nSummary: A Azaria, T Mitchell - arXiv preprint arXiv:2304.13734, 2023 - arxiv.org\\nTotal-Citations: 18\\n\\nTitle: LLM-Pruner: On the Structural Pruning of Large Language Models\\nAuthors: X Ma,G Fang,X Wang\\nSummary: X Ma, G Fang, X Wang - arXiv preprint arXiv:2305.11627, 2023 - arxiv.org\\nTotal-Citations: 15\\n\\nTitle: Large language models are few-shot testers: Exploring llm-based general bug reproduction\\nAuthors: S Kang,J Yoon,S Yoo\\nSummary: S Kang, J Yoon, S Yoo - 2023 IEEE/ACM 45th International …, 2023 - ieeexplore.ieee.org\\nTotal-Citations: 17'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"os.environ[\"SERP_API_KEY\"] = \"\"\n",
|
||||
"tool = GoogleScholarQueryRun(api_wrapper=GoogleScholarAPIWrapper())\n",
|
||||
"tool.run(\"LLM Models\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.16 ('langchain')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "15e58ce194949b77a891bd4339ce3d86a9bd138e905926019517993f97db9e6c"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "91c6a7ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStoreDB Chat Message History\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use SingleStoreDB to store chat message history."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d15e3302",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import SingleStoreDBChatMessageHistory\n",
|
||||
"\n",
|
||||
"history = SingleStoreDBChatMessageHistory(\n",
|
||||
" session_id=\"foo\",\n",
|
||||
" host=\"root:pass@localhost:3306/db\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"history.add_user_message(\"hi!\")\n",
|
||||
"\n",
|
||||
"history.add_ai_message(\"whats up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64fc465e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"history.messages"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
title: Cookbook
|
||||
hide_table_of_contents: true
|
||||
---
|
||||
|
||||
# Cookbook
|
||||
|
||||
The page you're looking for has been moved to the [cookbook section of the repo](https://github.com/langchain-ai/langchain/tree/master/cookbook) as a notebook.
|
||||
BIN
docs/static/img/qa_privacy_protection.png
vendored
BIN
docs/static/img/qa_privacy_protection.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 150 KiB After Width: | Height: | Size: 185 KiB |
@@ -3848,8 +3848,8 @@
|
||||
"destination": "/docs/additional_resources/dependents"
|
||||
},
|
||||
{
|
||||
"source": "docs/integrations/retrievers/google_cloud_enterprise_search",
|
||||
"destination": "docs/integrations/retrievers/google_vertex_ai_search"
|
||||
"source": "/docs/integrations/retrievers/google_cloud_enterprise_search",
|
||||
"destination": "/docs/integrations/retrievers/google_vertex_ai_search"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -48,4 +48,5 @@ python3.11 -m pip install --upgrade pip
|
||||
python3.11 -m pip install -r vercel_requirements.txt
|
||||
python3.11 scripts/model_feat_table.py
|
||||
nbdoc_build --srcdir docs
|
||||
cp ../cookbook/README.md src/pages/cookbook.mdx
|
||||
python3.11 scripts/generate_api_reference_links.py
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
"""CSV toolkit."""
|
||||
@@ -0,0 +1,37 @@
|
||||
from io import IOBase
|
||||
from typing import Any, List, Optional, Union
|
||||
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
from langchain_experimental.agents.agent_toolkits.pandas.base import (
|
||||
create_pandas_dataframe_agent,
|
||||
)
|
||||
|
||||
|
||||
def create_csv_agent(
|
||||
llm: BaseLanguageModel,
|
||||
path: Union[str, IOBase, List[Union[str, IOBase]]],
|
||||
pandas_kwargs: Optional[dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Create csv agent by loading to a dataframe and using pandas agent."""
|
||||
try:
|
||||
import pandas as pd
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"pandas package not found, please install with `pip install pandas`"
|
||||
)
|
||||
|
||||
_kwargs = pandas_kwargs or {}
|
||||
if isinstance(path, (str, IOBase)):
|
||||
df = pd.read_csv(path, **_kwargs)
|
||||
elif isinstance(path, list):
|
||||
df = []
|
||||
for item in path:
|
||||
if not isinstance(item, (str, IOBase)):
|
||||
raise ValueError(f"Expected str or file-like object, got {type(path)}")
|
||||
df.append(pd.read_csv(item, **_kwargs))
|
||||
else:
|
||||
raise ValueError(f"Expected str, list, or file-like object, got {type(path)}")
|
||||
return create_pandas_dataframe_agent(llm, df, **kwargs)
|
||||
@@ -1,10 +1,8 @@
|
||||
from __future__ import annotations # allows pydantic model to reference itself
|
||||
|
||||
import re
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Any, List, Optional, Union
|
||||
|
||||
import duckdb
|
||||
import pandas as pd
|
||||
from langchain.graphs.networkx_graph import NetworkxEntityGraph
|
||||
|
||||
from langchain_experimental.cpal.constants import Constant
|
||||
@@ -38,7 +36,7 @@ class EntityModel(BaseModel):
|
||||
name: str = Field(description="entity name")
|
||||
code: str = Field(description="entity actions")
|
||||
value: float = Field(description="entity initial value")
|
||||
depends_on: list[str] = Field(default=[], description="ancestor entities")
|
||||
depends_on: List[str] = Field(default=[], description="ancestor entities")
|
||||
|
||||
# TODO: generalize to multivariate math
|
||||
# TODO: acyclic graph
|
||||
@@ -54,7 +52,7 @@ class EntityModel(BaseModel):
|
||||
|
||||
class CausalModel(BaseModel):
|
||||
attribute: str = Field(description="name of the attribute to be calculated")
|
||||
entities: list[EntityModel] = Field(description="entities in the story")
|
||||
entities: List[EntityModel] = Field(description="entities in the story")
|
||||
|
||||
# TODO: root validate each `entity.depends_on` using system's entity names
|
||||
|
||||
@@ -101,8 +99,8 @@ class InterventionModel(BaseModel):
|
||||
}
|
||||
"""
|
||||
|
||||
entity_settings: list[EntitySettingModel]
|
||||
system_settings: Optional[list[SystemSettingModel]] = None
|
||||
entity_settings: List[EntitySettingModel]
|
||||
system_settings: Optional[List[SystemSettingModel]] = None
|
||||
|
||||
@validator("system_settings")
|
||||
def lower_case_name(cls, v: str) -> Union[str, None]:
|
||||
@@ -129,7 +127,7 @@ class StoryModel(BaseModel):
|
||||
causal_operations: Any = Field(required=True)
|
||||
intervention: Any = Field(required=True)
|
||||
query: Any = Field(required=True)
|
||||
_outcome_table: pd.DataFrame = PrivateAttr(default=None)
|
||||
_outcome_table: Any = PrivateAttr(default=None)
|
||||
_networkx_wrapper: Any = PrivateAttr(default=None)
|
||||
|
||||
def __init__(self, **kwargs: Any):
|
||||
@@ -190,6 +188,12 @@ class StoryModel(BaseModel):
|
||||
self.causal_operations.entities.sort(key=lambda x: sorted_nodes.index(x.name))
|
||||
|
||||
def _forward_propagate(self) -> None:
|
||||
try:
|
||||
import pandas as pd
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Unable to import pandas, please install with `pip install pandas`."
|
||||
) from e
|
||||
entity_scope = {
|
||||
entity.name: entity for entity in self.causal_operations.entities
|
||||
}
|
||||
@@ -217,11 +221,17 @@ class StoryModel(BaseModel):
|
||||
|
||||
if self.query.llm_error_msg == "":
|
||||
try:
|
||||
import duckdb
|
||||
|
||||
df = self._outcome_table # noqa
|
||||
query_result = duckdb.sql(self.query.expression).df()
|
||||
self.query._result_table = query_result
|
||||
except duckdb.BinderException as e:
|
||||
self.query._result_table = humanize_sql_error_msg(str(e))
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Unable to import duckdb, please install with `pip install duckdb`."
|
||||
) from e
|
||||
except Exception as e:
|
||||
self.query._result_table = str(e)
|
||||
else:
|
||||
|
||||
@@ -12,7 +12,7 @@ class ToTDFSMemory:
|
||||
"""
|
||||
|
||||
def __init__(self, stack: Optional[List[Thought]] = None):
|
||||
self.stack: list[Thought] = stack or []
|
||||
self.stack: List[Thought] = stack or []
|
||||
|
||||
def top(self) -> Optional[Thought]:
|
||||
"Get the top of the stack without popping it."
|
||||
|
||||
31
libs/experimental/poetry.lock
generated
31
libs/experimental/poetry.lock
generated
@@ -1044,7 +1044,7 @@ files = [
|
||||
{file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b72b802496cccbd9b31acea72b6f87e7771ccfd7f7927437d592e5c92ed703c"},
|
||||
{file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:527cd90ba3d8d7ae7dceb06fda619895768a46a1b4e423bdb24c1969823b8362"},
|
||||
{file = "greenlet-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:37f60b3a42d8b5499be910d1267b24355c495064f271cfe74bf28b17b099133c"},
|
||||
{file = "greenlet-3.0.0-cp311-universal2-macosx_10_9_universal2.whl", hash = "sha256:c3692ecf3fe754c8c0f2c95ff19626584459eab110eaab66413b1e7425cd84e9"},
|
||||
{file = "greenlet-3.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1482fba7fbed96ea7842b5a7fc11d61727e8be75a077e603e8ab49d24e234383"},
|
||||
{file = "greenlet-3.0.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:be557119bf467d37a8099d91fbf11b2de5eb1fd5fc5b91598407574848dc910f"},
|
||||
{file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73b2f1922a39d5d59cc0e597987300df3396b148a9bd10b76a058a2f2772fc04"},
|
||||
{file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1e22c22f7826096ad503e9bb681b05b8c1f5a8138469b255eb91f26a76634f2"},
|
||||
@@ -1054,7 +1054,6 @@ files = [
|
||||
{file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:952256c2bc5b4ee8df8dfc54fc4de330970bf5d79253c863fb5e6761f00dda35"},
|
||||
{file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:269d06fa0f9624455ce08ae0179430eea61085e3cf6457f05982b37fd2cefe17"},
|
||||
{file = "greenlet-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9adbd8ecf097e34ada8efde9b6fec4dd2a903b1e98037adf72d12993a1c80b51"},
|
||||
{file = "greenlet-3.0.0-cp312-universal2-macosx_10_9_universal2.whl", hash = "sha256:553d6fb2324e7f4f0899e5ad2c427a4579ed4873f42124beba763f16032959af"},
|
||||
{file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b5ce7f40f0e2f8b88c28e6691ca6806814157ff05e794cdd161be928550f4c"},
|
||||
{file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf94aa539e97a8411b5ea52fc6ccd8371be9550c4041011a091eb8b3ca1d810"},
|
||||
{file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80dcd3c938cbcac986c5c92779db8e8ce51a89a849c135172c88ecbdc8c056b7"},
|
||||
@@ -1087,6 +1086,7 @@ files = [
|
||||
{file = "greenlet-3.0.0-cp39-cp39-win32.whl", hash = "sha256:0d3f83ffb18dc57243e0151331e3c383b05e5b6c5029ac29f754745c800f8ed9"},
|
||||
{file = "greenlet-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:831d6f35037cf18ca5e80a737a27d822d87cd922521d18ed3dbc8a6967be50ce"},
|
||||
{file = "greenlet-3.0.0-cp39-universal2-macosx_11_0_x86_64.whl", hash = "sha256:a048293392d4e058298710a54dfaefcefdf49d287cd33fb1f7d63d55426e4355"},
|
||||
{file = "greenlet-3.0.0.tar.gz", hash = "sha256:19834e3f91f485442adc1ee440171ec5d9a4840a1f7bd5ed97833544719ce10b"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -1794,6 +1794,16 @@ files = [
|
||||
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
|
||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
|
||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
|
||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
|
||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
|
||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
|
||||
@@ -2960,6 +2970,7 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
|
||||
@@ -2967,8 +2978,15 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
|
||||
@@ -2985,6 +3003,7 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
|
||||
@@ -2992,6 +3011,7 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
|
||||
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
|
||||
@@ -3589,6 +3609,11 @@ files = [
|
||||
{file = "scikit_learn-1.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f66eddfda9d45dd6cadcd706b65669ce1df84b8549875691b1f403730bdef217"},
|
||||
{file = "scikit_learn-1.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6448c37741145b241eeac617028ba6ec2119e1339b1385c9720dae31367f2be"},
|
||||
{file = "scikit_learn-1.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:c413c2c850241998168bbb3bd1bb59ff03b1195a53864f0b80ab092071af6028"},
|
||||
{file = "scikit_learn-1.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ef540e09873e31569bc8b02c8a9f745ee04d8e1263255a15c9969f6f5caa627f"},
|
||||
{file = "scikit_learn-1.3.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:9147a3a4df4d401e618713880be023e36109c85d8569b3bf5377e6cd3fecdeac"},
|
||||
{file = "scikit_learn-1.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2cd3634695ad192bf71645702b3df498bd1e246fc2d529effdb45a06ab028b4"},
|
||||
{file = "scikit_learn-1.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c275a06c5190c5ce00af0acbb61c06374087949f643ef32d355ece12c4db043"},
|
||||
{file = "scikit_learn-1.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:0e1aa8f206d0de814b81b41d60c1ce31f7f2c7354597af38fae46d9c47c45122"},
|
||||
{file = "scikit_learn-1.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:52b77cc08bd555969ec5150788ed50276f5ef83abb72e6f469c5b91a0009bbca"},
|
||||
{file = "scikit_learn-1.3.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a683394bc3f80b7c312c27f9b14ebea7766b1f0a34faf1a2e9158d80e860ec26"},
|
||||
{file = "scikit_learn-1.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15d964d9eb181c79c190d3dbc2fff7338786bf017e9039571418a1d53dab236"},
|
||||
@@ -4932,4 +4957,4 @@ extended-testing = ["faker", "presidio-analyzer", "presidio-anonymizer", "senten
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
content-hash = "ef5619ee1a6129a65856d0e248011ee298ba0c1c838472332e6b3624e09aa0c6"
|
||||
content-hash = "f23fcff0e5ab8f172578b4f4da8dd204375357c7a5ad10c97bb77de7208bc889"
|
||||
|
||||
@@ -36,6 +36,11 @@ setuptools = "^67.6.1"
|
||||
# Any dependencies that do not meet that criteria will be removed.
|
||||
pytest = "^7.3.0"
|
||||
|
||||
|
||||
[tool.poetry.group.test_integration]
|
||||
optional = true
|
||||
dependencies = {}
|
||||
|
||||
# An extra used to be able to add extended testing.
|
||||
# Please use new-line on formatting to make it easier to add new packages without
|
||||
# merge-conflicts
|
||||
@@ -84,5 +89,6 @@ addopts = "--strict-markers --strict-config --durations=5"
|
||||
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
|
||||
markers = [
|
||||
"requires: mark tests as requiring a specific library",
|
||||
"asyncio: mark tests as requiring asyncio"
|
||||
"asyncio: mark tests as requiring asyncio",
|
||||
"compile: mark placeholder test used to compile integration tests without running them",
|
||||
]
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.compile
|
||||
def test_placeholder() -> None:
|
||||
"""Used for compiling integration tests without running any real tests."""
|
||||
pass
|
||||
@@ -20,7 +20,14 @@ from langchain.utilities.clickup import ClickupAPIWrapper
|
||||
|
||||
|
||||
class ClickupToolkit(BaseToolkit):
|
||||
"""Clickup Toolkit."""
|
||||
"""Clickup Toolkit.
|
||||
|
||||
*Security Note*: This toolkit contains tools that can read and modify
|
||||
the state of a service; e.g., by reading, creating, updating, deleting
|
||||
data associated with this service.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
tools: List[BaseTool] = []
|
||||
|
||||
|
||||
@@ -13,7 +13,19 @@ from langchain.utilities.requests import Requests
|
||||
|
||||
|
||||
class NLAToolkit(BaseToolkit):
|
||||
"""Natural Language API Toolkit."""
|
||||
"""Natural Language API Toolkit.
|
||||
|
||||
*Security Note*: This toolkit creates tools that enable making calls
|
||||
to an Open API compliant API.
|
||||
|
||||
The tools created by this toolkit may be able to make GET, POST,
|
||||
PATCH, PUT, DELETE requests to any of the exposed endpoints on
|
||||
the API.
|
||||
|
||||
Control access to who can use this toolkit.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
nla_tools: Sequence[NLATool] = Field(...)
|
||||
"""List of API Endpoint Tools."""
|
||||
|
||||
@@ -28,7 +28,17 @@ from langchain.utilities.powerbi import PowerBIDataset
|
||||
|
||||
|
||||
class PowerBIToolkit(BaseToolkit):
|
||||
"""Toolkit for interacting with Power BI dataset."""
|
||||
"""Toolkit for interacting with Power BI dataset.
|
||||
|
||||
*Security Note*: This toolkit interacts with an external service.
|
||||
|
||||
Control access to who can use this toolkit.
|
||||
|
||||
Make sure that the capabilities given by this toolkit to the calling
|
||||
code are appropriately scoped to the application.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
powerbi: PowerBIDataset = Field(exclude=True)
|
||||
llm: Union[BaseLanguageModel, BaseChatModel] = Field(exclude=True)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Zapier Toolkit."""
|
||||
"""[DEPRECATED] Zapier Toolkit."""
|
||||
from typing import List
|
||||
|
||||
from langchain._api import warn_deprecated
|
||||
from langchain.agents.agent_toolkits.base import BaseToolkit
|
||||
from langchain.tools import BaseTool
|
||||
from langchain.tools.zapier.tool import ZapierNLARunAction
|
||||
@@ -48,4 +49,11 @@ class ZapierToolkit(BaseToolkit):
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
warn_deprecated(
|
||||
since="0.0.319",
|
||||
message=(
|
||||
"This tool will be deprecated on 2023-11-17. See "
|
||||
"https://nla.zapier.com/sunset/ for details"
|
||||
),
|
||||
)
|
||||
return self.tools
|
||||
|
||||
@@ -34,6 +34,7 @@ from langchain.tools.base import BaseTool
|
||||
from langchain.tools.bing_search.tool import BingSearchRun
|
||||
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
|
||||
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
|
||||
from langchain.tools.google_scholar.tool import GoogleScholarQueryRun
|
||||
from langchain.tools.metaphor_search.tool import MetaphorSearchResults
|
||||
from langchain.tools.google_serper.tool import GoogleSerperResults, GoogleSerperRun
|
||||
from langchain.tools.searchapi.tool import SearchAPIResults, SearchAPIRun
|
||||
@@ -64,6 +65,7 @@ from langchain.utilities.bing_search import BingSearchAPIWrapper
|
||||
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
|
||||
from langchain.utilities.google_search import GoogleSearchAPIWrapper
|
||||
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
|
||||
from langchain.utilities.google_scholar import GoogleScholarAPIWrapper
|
||||
from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
|
||||
from langchain.utilities.awslambda import LambdaWrapper
|
||||
from langchain.utilities.graphql import GraphQLAPIWrapper
|
||||
@@ -222,6 +224,10 @@ def _get_google_serper(**kwargs: Any) -> BaseTool:
|
||||
return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
|
||||
|
||||
|
||||
def _get_google_scholar(**kwargs: Any) -> BaseTool:
|
||||
return GoogleScholarQueryRun(api_wrapper=GoogleScholarAPIWrapper(**kwargs))
|
||||
|
||||
|
||||
def _get_google_serper_results_json(**kwargs: Any) -> BaseTool:
|
||||
return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
|
||||
|
||||
@@ -337,6 +343,10 @@ _EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[st
|
||||
"metaphor-search": (_get_metaphor_search, ["metaphor_api_key"]),
|
||||
"ddg-search": (_get_ddg_search, []),
|
||||
"google-serper": (_get_google_serper, ["serper_api_key", "aiosession"]),
|
||||
"google-scholar": (
|
||||
_get_google_scholar,
|
||||
["top_k_results", "hl", "lr", "serp_api_key"],
|
||||
),
|
||||
"google-serper-results-json": (
|
||||
_get_google_serper_results_json,
|
||||
["serper_api_key", "aiosession"],
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
import importlib.metadata
|
||||
import logging
|
||||
import os
|
||||
import traceback
|
||||
import warnings
|
||||
from contextvars import ContextVar
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Literal, Union
|
||||
from uuid import UUID
|
||||
|
||||
import requests
|
||||
from packaging.version import parse
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema.agent import AgentAction, AgentFinish
|
||||
@@ -142,7 +144,7 @@ def _get_user_props(metadata: Any) -> Any:
|
||||
return user_props_ctx.get()
|
||||
|
||||
metadata = metadata or {}
|
||||
return metadata.get("user_props")
|
||||
return metadata.get("user_props", None)
|
||||
|
||||
|
||||
def _parse_lc_message(message: BaseMessage) -> Dict[str, Any]:
|
||||
@@ -191,6 +193,8 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
__api_url: str
|
||||
__app_id: str
|
||||
__verbose: bool
|
||||
__llmonitor_version: str
|
||||
__has_valid_config: bool
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -200,37 +204,58 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.__api_url = api_url or os.getenv("LLMONITOR_API_URL") or DEFAULT_API_URL
|
||||
self.__has_valid_config = True
|
||||
|
||||
try:
|
||||
import llmonitor
|
||||
|
||||
self.__llmonitor_version = importlib.metadata.version("llmonitor")
|
||||
self.__track_event = llmonitor.track_event
|
||||
|
||||
except ImportError:
|
||||
warnings.warn(
|
||||
"""[LLMonitor] To use the LLMonitor callback handler you need to
|
||||
have the `llmonitor` Python package installed. Please install it
|
||||
with `pip install llmonitor`"""
|
||||
)
|
||||
self.__has_valid_config = False
|
||||
|
||||
if parse(self.__llmonitor_version) < parse("0.0.20"):
|
||||
warnings.warn(
|
||||
f"""[LLMonitor] The installed `llmonitor` version is
|
||||
{self.__llmonitor_version} but `LLMonitorCallbackHandler` requires
|
||||
at least version 0.0.20 upgrade `llmonitor` with `pip install
|
||||
--upgrade llmonitor`"""
|
||||
)
|
||||
self.__has_valid_config = False
|
||||
|
||||
self.__has_valid_config = True
|
||||
|
||||
self.__api_url = api_url or os.getenv("LLMONITOR_API_URL") or DEFAULT_API_URL
|
||||
self.__verbose = verbose or bool(os.getenv("LLMONITOR_VERBOSE"))
|
||||
|
||||
_app_id = app_id or os.getenv("LLMONITOR_APP_ID")
|
||||
if _app_id is None:
|
||||
raise ValueError(
|
||||
"""app_id must be provided either as an argument or as
|
||||
warnings.warn(
|
||||
"""[LLMonitor] app_id must be provided either as an argument or as
|
||||
an environment variable"""
|
||||
)
|
||||
self.__app_id = _app_id
|
||||
self.__has_valid_config = False
|
||||
else:
|
||||
self.__app_id = _app_id
|
||||
|
||||
if self.__has_valid_config is False:
|
||||
return None
|
||||
|
||||
try:
|
||||
res = requests.get(f"{self.__api_url}/api/app/{self.__app_id}")
|
||||
if not res.ok:
|
||||
raise ConnectionError()
|
||||
except Exception as e:
|
||||
raise ConnectionError(
|
||||
f"Could not connect to the LLMonitor API at {self.__api_url}"
|
||||
) from e
|
||||
|
||||
def __send_event(self, event: Dict[str, Any]) -> None:
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
event = {**event, "app": self.__app_id, "timestamp": str(datetime.utcnow())}
|
||||
|
||||
if self.__verbose:
|
||||
print("llmonitor_callback", event)
|
||||
|
||||
data = {"events": event}
|
||||
requests.post(headers=headers, url=f"{self.__api_url}/api/report", json=data)
|
||||
except Exception:
|
||||
warnings.warn(
|
||||
f"""[LLMonitor] Could not connect to the LLMonitor API at
|
||||
{self.__api_url}"""
|
||||
)
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
@@ -243,27 +268,28 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
metadata: Union[Dict[str, Any], None] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
user_id = _get_user_id(metadata)
|
||||
user_props = _get_user_props(metadata)
|
||||
name = kwargs.get("invocation_params", {}).get("model_name")
|
||||
input = _parse_input(prompts)
|
||||
|
||||
event = {
|
||||
"event": "start",
|
||||
"type": "llm",
|
||||
"userId": user_id,
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"input": _parse_input(prompts),
|
||||
"name": kwargs.get("invocation_params", {}).get("model_name"),
|
||||
"tags": tags,
|
||||
"metadata": metadata,
|
||||
}
|
||||
if user_props:
|
||||
event["userProps"] = user_props
|
||||
|
||||
self.__send_event(event)
|
||||
self.__track_event(
|
||||
"llm",
|
||||
"start",
|
||||
user_id=user_id,
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
name=name,
|
||||
input=input,
|
||||
tags=tags,
|
||||
metadata=metadata,
|
||||
user_props=user_props,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_llm_start: {e}")
|
||||
warnings.warn(f"[LLMonitor] An error occurred in on_llm_start: {e}")
|
||||
|
||||
def on_chat_model_start(
|
||||
self,
|
||||
@@ -276,28 +302,29 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
metadata: Union[Dict[str, Any], None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
user_id = _get_user_id(metadata)
|
||||
user_props = _get_user_props(metadata)
|
||||
name = kwargs.get("invocation_params", {}).get("model_name")
|
||||
input = _parse_lc_messages(messages[0])
|
||||
|
||||
event = {
|
||||
"event": "start",
|
||||
"type": "llm",
|
||||
"userId": user_id,
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"input": _parse_lc_messages(messages[0]),
|
||||
"name": kwargs.get("invocation_params", {}).get("model_name"),
|
||||
"tags": tags,
|
||||
"metadata": metadata,
|
||||
}
|
||||
if user_props:
|
||||
event["userProps"] = user_props
|
||||
|
||||
self.__send_event(event)
|
||||
self.__track_event(
|
||||
"llm",
|
||||
"start",
|
||||
user_id=user_id,
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
name=name,
|
||||
input=input,
|
||||
tags=tags,
|
||||
metadata=metadata,
|
||||
user_props=user_props,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(
|
||||
f"[LLMonitor] An error occurred in on_chat_model_start: " f"{e}"
|
||||
f"[LLMonitor] An error occurred in on_chat_model_start: {e}"
|
||||
)
|
||||
|
||||
def on_llm_end(
|
||||
@@ -308,9 +335,11 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
|
||||
try:
|
||||
token_usage = (response.llm_output or {}).get("token_usage", {})
|
||||
|
||||
parsed_output = [
|
||||
{
|
||||
"text": generation.text,
|
||||
@@ -330,20 +359,19 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
for generation in response.generations[0]
|
||||
]
|
||||
|
||||
event = {
|
||||
"event": "end",
|
||||
"type": "llm",
|
||||
"runId": str(run_id),
|
||||
"parent_run_id": str(parent_run_id) if parent_run_id else None,
|
||||
"output": parsed_output,
|
||||
"tokensUsage": {
|
||||
self.__track_event(
|
||||
"llm",
|
||||
"end",
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
output=parsed_output,
|
||||
token_usage={
|
||||
"prompt": token_usage.get("prompt_tokens"),
|
||||
"completion": token_usage.get("completion_tokens"),
|
||||
},
|
||||
}
|
||||
self.__send_event(event)
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_llm_end: {e}")
|
||||
warnings.warn(f"[LLMonitor] An error occurred in on_llm_end: {e}")
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
@@ -356,27 +384,27 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
metadata: Union[Dict[str, Any], None] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
user_id = _get_user_id(metadata)
|
||||
user_props = _get_user_props(metadata)
|
||||
name = serialized.get("name")
|
||||
|
||||
event = {
|
||||
"event": "start",
|
||||
"type": "tool",
|
||||
"userId": user_id,
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"name": serialized.get("name"),
|
||||
"input": input_str,
|
||||
"tags": tags,
|
||||
"metadata": metadata,
|
||||
}
|
||||
if user_props:
|
||||
event["userProps"] = user_props
|
||||
|
||||
self.__send_event(event)
|
||||
self.__track_event(
|
||||
"tool",
|
||||
"start",
|
||||
user_id=user_id,
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
name=name,
|
||||
input=input_str,
|
||||
tags=tags,
|
||||
metadata=metadata,
|
||||
user_props=user_props,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_tool_start: {e}")
|
||||
warnings.warn(f"[LLMonitor] An error occurred in on_tool_start: {e}")
|
||||
|
||||
def on_tool_end(
|
||||
self,
|
||||
@@ -387,17 +415,18 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
tags: Union[List[str], None] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
event = {
|
||||
"event": "end",
|
||||
"type": "tool",
|
||||
"runId": str(run_id),
|
||||
"parent_run_id": str(parent_run_id) if parent_run_id else None,
|
||||
"output": output,
|
||||
}
|
||||
self.__send_event(event)
|
||||
self.__track_event(
|
||||
"tool",
|
||||
"end",
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
output=output,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_tool_end: {e}")
|
||||
warnings.warn(f"[LLMonitor] An error occurred in on_tool_end: {e}")
|
||||
|
||||
def on_chain_start(
|
||||
self,
|
||||
@@ -410,6 +439,8 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
metadata: Union[Dict[str, Any], None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
name = serialized.get("id", [None, None, None, None])[3]
|
||||
type = "chain"
|
||||
@@ -419,35 +450,32 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
if agentName is None:
|
||||
agentName = metadata.get("agentName")
|
||||
|
||||
if name == "AgentExecutor" or name == "PlanAndExecute":
|
||||
type = "agent"
|
||||
if agentName is not None:
|
||||
type = "agent"
|
||||
name = agentName
|
||||
if name == "AgentExecutor" or name == "PlanAndExecute":
|
||||
type = "agent"
|
||||
|
||||
if parent_run_id is not None:
|
||||
type = "chain"
|
||||
|
||||
user_id = _get_user_id(metadata)
|
||||
user_props = _get_user_props(metadata)
|
||||
input = _parse_input(inputs)
|
||||
|
||||
event = {
|
||||
"event": "start",
|
||||
"type": type,
|
||||
"userId": user_id,
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"input": _parse_input(inputs),
|
||||
"tags": tags,
|
||||
"metadata": metadata,
|
||||
"name": name,
|
||||
}
|
||||
if user_props:
|
||||
event["userProps"] = user_props
|
||||
|
||||
self.__send_event(event)
|
||||
self.__track_event(
|
||||
type,
|
||||
"start",
|
||||
user_id=user_id,
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
name=name,
|
||||
input=input,
|
||||
tags=tags,
|
||||
metadata=metadata,
|
||||
user_props=user_props,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_chain_start: {e}")
|
||||
warnings.warn(f"[LLMonitor] An error occurred in on_chain_start: {e}")
|
||||
|
||||
def on_chain_end(
|
||||
self,
|
||||
@@ -457,14 +485,18 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
event = {
|
||||
"event": "end",
|
||||
"type": "chain",
|
||||
"runId": str(run_id),
|
||||
"output": _parse_output(outputs),
|
||||
}
|
||||
self.__send_event(event)
|
||||
output = _parse_output(outputs)
|
||||
|
||||
self.__track_event(
|
||||
"chain",
|
||||
"end",
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
output=output,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_chain_end: {e}")
|
||||
|
||||
@@ -476,16 +508,20 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
event = {
|
||||
"event": "start",
|
||||
"type": "tool",
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"name": action.tool,
|
||||
"input": _parse_input(action.tool_input),
|
||||
}
|
||||
self.__send_event(event)
|
||||
name = action.tool
|
||||
input = _parse_input(action.tool_input)
|
||||
|
||||
self.__track_event(
|
||||
"tool",
|
||||
"start",
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
name=name,
|
||||
input=input,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_agent_action: {e}")
|
||||
|
||||
@@ -497,15 +533,18 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
event = {
|
||||
"event": "end",
|
||||
"type": "agent",
|
||||
"runId": str(run_id),
|
||||
"parentRunId": str(parent_run_id) if parent_run_id else None,
|
||||
"output": _parse_output(finish.return_values),
|
||||
}
|
||||
self.__send_event(event)
|
||||
output = _parse_output(finish.return_values)
|
||||
|
||||
self.__track_event(
|
||||
"agent",
|
||||
"end",
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
output=output,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_agent_finish: {e}")
|
||||
|
||||
@@ -517,15 +556,16 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
event = {
|
||||
"event": "error",
|
||||
"type": "chain",
|
||||
"runId": str(run_id),
|
||||
"parent_run_id": str(parent_run_id) if parent_run_id else None,
|
||||
"error": {"message": str(error), "stack": traceback.format_exc()},
|
||||
}
|
||||
self.__send_event(event)
|
||||
self.__track_event(
|
||||
"chain",
|
||||
"error",
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
error={"message": str(error), "stack": traceback.format_exc()},
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_chain_error: {e}")
|
||||
|
||||
@@ -537,15 +577,16 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
event = {
|
||||
"event": "error",
|
||||
"type": "tool",
|
||||
"runId": str(run_id),
|
||||
"parent_run_id": str(parent_run_id) if parent_run_id else None,
|
||||
"error": {"message": str(error), "stack": traceback.format_exc()},
|
||||
}
|
||||
self.__send_event(event)
|
||||
self.__track_event(
|
||||
"tool",
|
||||
"error",
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
error={"message": str(error), "stack": traceback.format_exc()},
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_tool_error: {e}")
|
||||
|
||||
@@ -557,15 +598,16 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
|
||||
parent_run_id: Union[UUID, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
if self.__has_valid_config is False:
|
||||
return
|
||||
try:
|
||||
event = {
|
||||
"event": "error",
|
||||
"type": "llm",
|
||||
"runId": str(run_id),
|
||||
"parent_run_id": str(parent_run_id) if parent_run_id else None,
|
||||
"error": {"message": str(error), "stack": traceback.format_exc()},
|
||||
}
|
||||
self.__send_event(event)
|
||||
self.__track_event(
|
||||
"llm",
|
||||
"error",
|
||||
run_id=str(run_id),
|
||||
parent_run_id=str(parent_run_id) if parent_run_id else None,
|
||||
error={"message": str(error), "stack": traceback.format_exc()},
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"[LLMonitor] An error occurred in on_llm_error: {e}")
|
||||
|
||||
|
||||
@@ -1892,6 +1892,7 @@ def _configure(
|
||||
callback_manager = callback_manager_cls(
|
||||
handlers=inheritable_callbacks_.copy(),
|
||||
inheritable_handlers=inheritable_callbacks_.copy(),
|
||||
parent_run_id=parent_run_id,
|
||||
)
|
||||
else:
|
||||
callback_manager = callback_manager_cls(
|
||||
|
||||
@@ -116,8 +116,28 @@ class EvaluatorCallbackHandler(BaseTracer):
|
||||
eval_result = self.client.evaluate_run(run, evaluator)
|
||||
with manager.tracing_v2_enabled(
|
||||
project_name=self.project_name, tags=["eval"], client=self.client
|
||||
):
|
||||
eval_result = self.client.evaluate_run(run, evaluator)
|
||||
) as cb:
|
||||
reference_example = (
|
||||
self.client.read_example(run.reference_example_id)
|
||||
if run.reference_example_id
|
||||
else None
|
||||
)
|
||||
evaluation_result = evaluator.evaluate_run(
|
||||
run,
|
||||
example=reference_example,
|
||||
)
|
||||
run_id = cb.latest_run.id if cb.latest_run is not None else None
|
||||
self.client.create_feedback(
|
||||
run.id,
|
||||
evaluation_result.key,
|
||||
score=evaluation_result.score,
|
||||
value=evaluation_result.value,
|
||||
comment=evaluation_result.comment,
|
||||
correction=evaluation_result.correction,
|
||||
source_info=evaluation_result.evaluator_info,
|
||||
source_run_id=evaluation_result.source_run_id or run_id,
|
||||
feedback_source_type=langsmith.schemas.FeedbackSourceType.MODEL,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error evaluating run {run.id} with "
|
||||
|
||||
@@ -61,15 +61,17 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
||||
chains and cannot return as rich of an output as `__call__`.
|
||||
"""
|
||||
|
||||
@property
|
||||
def input_schema(self) -> Type[BaseModel]:
|
||||
def get_input_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
) -> Type[BaseModel]:
|
||||
# This is correct, but pydantic typings/mypy don't think so.
|
||||
return create_model( # type: ignore[call-overload]
|
||||
"ChainInput", **{k: (Any, None) for k in self.input_keys}
|
||||
)
|
||||
|
||||
@property
|
||||
def output_schema(self) -> Type[BaseModel]:
|
||||
def get_output_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
) -> Type[BaseModel]:
|
||||
# This is correct, but pydantic typings/mypy don't think so.
|
||||
return create_model( # type: ignore[call-overload]
|
||||
"ChainOutput", **{k: (Any, None) for k in self.output_keys}
|
||||
|
||||
@@ -10,6 +10,7 @@ from langchain.callbacks.manager import (
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.pydantic_v1 import BaseModel, Field, create_model
|
||||
from langchain.schema.runnable.config import RunnableConfig
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
|
||||
|
||||
|
||||
@@ -28,15 +29,17 @@ class BaseCombineDocumentsChain(Chain, ABC):
|
||||
input_key: str = "input_documents" #: :meta private:
|
||||
output_key: str = "output_text" #: :meta private:
|
||||
|
||||
@property
|
||||
def input_schema(self) -> Type[BaseModel]:
|
||||
def get_input_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
) -> Type[BaseModel]:
|
||||
return create_model(
|
||||
"CombineDocumentsInput",
|
||||
**{self.input_key: (List[Document], None)}, # type: ignore[call-overload]
|
||||
)
|
||||
|
||||
@property
|
||||
def output_schema(self) -> Type[BaseModel]:
|
||||
def get_output_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
) -> Type[BaseModel]:
|
||||
return create_model(
|
||||
"CombineDocumentsOutput",
|
||||
**{self.output_key: (str, None)}, # type: ignore[call-overload]
|
||||
@@ -167,16 +170,18 @@ class AnalyzeDocumentChain(Chain):
|
||||
"""
|
||||
return self.combine_docs_chain.output_keys
|
||||
|
||||
@property
|
||||
def input_schema(self) -> Type[BaseModel]:
|
||||
def get_input_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
) -> Type[BaseModel]:
|
||||
return create_model(
|
||||
"AnalyzeDocumentChain",
|
||||
**{self.input_key: (str, None)}, # type: ignore[call-overload]
|
||||
)
|
||||
|
||||
@property
|
||||
def output_schema(self) -> Type[BaseModel]:
|
||||
return self.combine_docs_chain.output_schema
|
||||
def get_output_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
) -> Type[BaseModel]:
|
||||
return self.combine_docs_chain.get_output_schema(config)
|
||||
|
||||
def _call(
|
||||
self,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type
|
||||
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
@@ -10,6 +10,7 @@ from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.pydantic_v1 import BaseModel, Extra, create_model, root_validator
|
||||
from langchain.schema.runnable.config import RunnableConfig
|
||||
|
||||
|
||||
class MapReduceDocumentsChain(BaseCombineDocumentsChain):
|
||||
@@ -98,8 +99,9 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
|
||||
return_intermediate_steps: bool = False
|
||||
"""Return the results of the map steps in the output."""
|
||||
|
||||
@property
|
||||
def output_schema(self) -> type[BaseModel]:
|
||||
def get_output_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
) -> Type[BaseModel]:
|
||||
if self.return_intermediate_steps:
|
||||
return create_model(
|
||||
"MapReduceDocumentsOutput",
|
||||
@@ -109,7 +111,7 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
|
||||
}, # type: ignore[call-overload]
|
||||
)
|
||||
|
||||
return super().output_schema
|
||||
return super().get_output_schema(config)
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
|
||||
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union, cast
|
||||
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
@@ -10,6 +10,7 @@ from langchain.chains.llm import LLMChain
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.output_parsers.regex import RegexParser
|
||||
from langchain.pydantic_v1 import BaseModel, Extra, create_model, root_validator
|
||||
from langchain.schema.runnable.config import RunnableConfig
|
||||
|
||||
|
||||
class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
||||
@@ -77,8 +78,9 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def output_schema(self) -> type[BaseModel]:
|
||||
def get_output_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
) -> Type[BaseModel]:
|
||||
schema: Dict[str, Any] = {
|
||||
self.output_key: (str, None),
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator
|
||||
from langchain.schema import BasePromptTemplate, BaseRetriever, Document
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.schema.messages import BaseMessage
|
||||
from langchain.schema.runnable.config import RunnableConfig
|
||||
from langchain.schema.vectorstore import VectorStore
|
||||
|
||||
# Depending on the memory type and configuration, the chat history format may differ.
|
||||
@@ -52,7 +53,7 @@ def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str:
|
||||
|
||||
class InputType(BaseModel):
|
||||
question: str
|
||||
chat_history: List[CHAT_TURN_TYPE]
|
||||
chat_history: List[CHAT_TURN_TYPE] = Field(default_factory=list)
|
||||
|
||||
|
||||
class BaseConversationalRetrievalChain(Chain):
|
||||
@@ -95,8 +96,9 @@ class BaseConversationalRetrievalChain(Chain):
|
||||
"""Input keys."""
|
||||
return ["question", "chat_history"]
|
||||
|
||||
@property
|
||||
def input_schema(self) -> Type[BaseModel]:
|
||||
def get_input_schema(
|
||||
self, config: Optional[RunnableConfig] = None
|
||||
) -> Type[BaseModel]:
|
||||
return InputType
|
||||
|
||||
@property
|
||||
|
||||
@@ -19,7 +19,19 @@ from langchain.schema import BasePromptTemplate
|
||||
|
||||
|
||||
class ArangoGraphQAChain(Chain):
|
||||
"""Chain for question-answering against a graph by generating AQL statements."""
|
||||
"""Chain for question-answering against a graph by generating AQL statements.
|
||||
|
||||
*Security note*: Make sure that the database connection uses credentials
|
||||
that are narrowly-scoped to only include necessary permissions.
|
||||
Failure to do so may result in data corruption or loss, since the calling
|
||||
code may attempt commands that would result in deletion, mutation
|
||||
of data if appropriately prompted or reading sensitive data if such
|
||||
data is present in the database.
|
||||
The best way to guard against such negative outcomes is to (as appropriate)
|
||||
limit the permissions granted to the credentials used with this tool.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
graph: ArangoGraph = Field(exclude=True)
|
||||
aql_generation_chain: LLMChain
|
||||
|
||||
@@ -14,7 +14,19 @@ from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
|
||||
class GraphQAChain(Chain):
|
||||
"""Chain for question-answering against a graph."""
|
||||
"""Chain for question-answering against a graph.
|
||||
|
||||
*Security note*: Make sure that the database connection uses credentials
|
||||
that are narrowly-scoped to only include necessary permissions.
|
||||
Failure to do so may result in data corruption or loss, since the calling
|
||||
code may attempt commands that would result in deletion, mutation
|
||||
of data if appropriately prompted or reading sensitive data if such
|
||||
data is present in the database.
|
||||
The best way to guard against such negative outcomes is to (as appropriate)
|
||||
limit the permissions granted to the credentials used with this tool.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
graph: NetworkxEntityGraph = Field(exclude=True)
|
||||
entity_extraction_chain: LLMChain
|
||||
|
||||
@@ -77,7 +77,19 @@ def construct_schema(
|
||||
|
||||
|
||||
class GraphCypherQAChain(Chain):
|
||||
"""Chain for question-answering against a graph by generating Cypher statements."""
|
||||
"""Chain for question-answering against a graph by generating Cypher statements.
|
||||
|
||||
*Security note*: Make sure that the database connection uses credentials
|
||||
that are narrowly-scoped to only include necessary permissions.
|
||||
Failure to do so may result in data corruption or loss, since the calling
|
||||
code may attempt commands that would result in deletion, mutation
|
||||
of data if appropriately prompted or reading sensitive data if such
|
||||
data is present in the database.
|
||||
The best way to guard against such negative outcomes is to (as appropriate)
|
||||
limit the permissions granted to the credentials used with this tool.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
graph: GraphStore = Field(exclude=True)
|
||||
cypher_generation_chain: LLMChain
|
||||
|
||||
@@ -35,7 +35,19 @@ def extract_cypher(text: str) -> str:
|
||||
|
||||
|
||||
class FalkorDBQAChain(Chain):
|
||||
"""Chain for question-answering against a graph by generating Cypher statements."""
|
||||
"""Chain for question-answering against a graph by generating Cypher statements.
|
||||
|
||||
*Security note*: Make sure that the database connection uses credentials
|
||||
that are narrowly-scoped to only include necessary permissions.
|
||||
Failure to do so may result in data corruption or loss, since the calling
|
||||
code may attempt commands that would result in deletion, mutation
|
||||
of data if appropriately prompted or reading sensitive data if such
|
||||
data is present in the database.
|
||||
The best way to guard against such negative outcomes is to (as appropriate)
|
||||
limit the permissions granted to the credentials used with this tool.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
graph: FalkorDBGraph = Field(exclude=True)
|
||||
cypher_generation_chain: LLMChain
|
||||
|
||||
@@ -17,7 +17,19 @@ from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
|
||||
class HugeGraphQAChain(Chain):
|
||||
"""Chain for question-answering against a graph by generating gremlin statements."""
|
||||
"""Chain for question-answering against a graph by generating gremlin statements.
|
||||
|
||||
*Security note*: Make sure that the database connection uses credentials
|
||||
that are narrowly-scoped to only include necessary permissions.
|
||||
Failure to do so may result in data corruption or loss, since the calling
|
||||
code may attempt commands that would result in deletion, mutation
|
||||
of data if appropriately prompted or reading sensitive data if such
|
||||
data is present in the database.
|
||||
The best way to guard against such negative outcomes is to (as appropriate)
|
||||
limit the permissions granted to the credentials used with this tool.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
graph: HugeGraph = Field(exclude=True)
|
||||
gremlin_generation_chain: LLMChain
|
||||
|
||||
@@ -14,8 +14,18 @@ from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
|
||||
class KuzuQAChain(Chain):
|
||||
"""Chain for question-answering against a graph by generating Cypher statements for
|
||||
Kùzu.
|
||||
"""Question-answering against a graph by generating Cypher statements for Kùzu.
|
||||
|
||||
*Security note*: Make sure that the database connection uses credentials
|
||||
that are narrowly-scoped to only include necessary permissions.
|
||||
Failure to do so may result in data corruption or loss, since the calling
|
||||
code may attempt commands that would result in deletion, mutation
|
||||
of data if appropriately prompted or reading sensitive data if such
|
||||
data is present in the database.
|
||||
The best way to guard against such negative outcomes is to (as appropriate)
|
||||
limit the permissions granted to the credentials used with this tool.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
graph: KuzuGraph = Field(exclude=True)
|
||||
|
||||
@@ -14,7 +14,19 @@ from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
|
||||
class NebulaGraphQAChain(Chain):
|
||||
"""Chain for question-answering against a graph by generating nGQL statements."""
|
||||
"""Chain for question-answering against a graph by generating nGQL statements.
|
||||
|
||||
*Security note*: Make sure that the database connection uses credentials
|
||||
that are narrowly-scoped to only include necessary permissions.
|
||||
Failure to do so may result in data corruption or loss, since the calling
|
||||
code may attempt commands that would result in deletion, mutation
|
||||
of data if appropriately prompted or reading sensitive data if such
|
||||
data is present in the database.
|
||||
The best way to guard against such negative outcomes is to (as appropriate)
|
||||
limit the permissions granted to the credentials used with this tool.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
graph: NebulaGraph = Field(exclude=True)
|
||||
ngql_generation_chain: LLMChain
|
||||
|
||||
@@ -85,6 +85,17 @@ class NeptuneOpenCypherQAChain(Chain):
|
||||
"""Chain for question-answering against a Neptune graph
|
||||
by generating openCypher statements.
|
||||
|
||||
*Security note*: Make sure that the database connection uses credentials
|
||||
that are narrowly-scoped to only include necessary permissions.
|
||||
Failure to do so may result in data corruption or loss, since the calling
|
||||
code may attempt commands that would result in deletion, mutation
|
||||
of data if appropriately prompted or reading sensitive data if such
|
||||
data is present in the database.
|
||||
The best way to guard against such negative outcomes is to (as appropriate)
|
||||
limit the permissions granted to the credentials used with this tool.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
|
||||
@@ -21,9 +21,18 @@ from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
|
||||
class GraphSparqlQAChain(Chain):
|
||||
"""
|
||||
Chain for question-answering against an RDF or OWL graph by generating
|
||||
SPARQL statements.
|
||||
"""Question-answering against an RDF or OWL graph by generating SPARQL statements.
|
||||
|
||||
*Security note*: Make sure that the database connection uses credentials
|
||||
that are narrowly-scoped to only include necessary permissions.
|
||||
Failure to do so may result in data corruption or loss, since the calling
|
||||
code may attempt commands that would result in deletion, mutation
|
||||
of data if appropriately prompted or reading sensitive data if such
|
||||
data is present in the database.
|
||||
The best way to guard against such negative outcomes is to (as appropriate)
|
||||
limit the permissions granted to the credentials used with this tool.
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
|
||||
graph: RdfGraph = Field(exclude=True)
|
||||
|
||||
@@ -22,7 +22,7 @@ from langchain.utilities.openapi import OpenAPISpec
|
||||
from langchain.utils.input import get_colored_text
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from openapi_schema_pydantic import Parameter
|
||||
from openapi_pydantic import Parameter
|
||||
|
||||
|
||||
def _get_description(o: Any, prefer_short: bool) -> Optional[str]:
|
||||
|
||||
@@ -52,7 +52,7 @@ class SequentialChain(Chain):
|
||||
if set(input_variables).intersection(set(memory_keys)):
|
||||
overlapping_keys = set(input_variables) & set(memory_keys)
|
||||
raise ValueError(
|
||||
f"The the input key(s) {''.join(overlapping_keys)} are found "
|
||||
f"The input key(s) {''.join(overlapping_keys)} are found "
|
||||
f"in the Memory keys ({memory_keys}) - please use input and "
|
||||
f"memory keys that don't overlap."
|
||||
)
|
||||
|
||||
@@ -30,6 +30,7 @@ from langchain.chat_models.fake import FakeListChatModel
|
||||
from langchain.chat_models.fireworks import ChatFireworks
|
||||
from langchain.chat_models.google_palm import ChatGooglePalm
|
||||
from langchain.chat_models.human import HumanInputChatModel
|
||||
from langchain.chat_models.hunyuan import ChatHunyuan
|
||||
from langchain.chat_models.javelin_ai_gateway import ChatJavelinAIGateway
|
||||
from langchain.chat_models.jinachat import JinaChat
|
||||
from langchain.chat_models.konko import ChatKonko
|
||||
@@ -38,6 +39,7 @@ from langchain.chat_models.minimax import MiniMaxChat
|
||||
from langchain.chat_models.mlflow_ai_gateway import ChatMLflowAIGateway
|
||||
from langchain.chat_models.ollama import ChatOllama
|
||||
from langchain.chat_models.openai import ChatOpenAI
|
||||
from langchain.chat_models.pai_eas_endpoint import PaiEasChatEndpoint
|
||||
from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI
|
||||
from langchain.chat_models.vertexai import ChatVertexAI
|
||||
from langchain.chat_models.yandex import ChatYandexGPT
|
||||
@@ -63,8 +65,10 @@ __all__ = [
|
||||
"ErnieBotChat",
|
||||
"ChatJavelinAIGateway",
|
||||
"ChatKonko",
|
||||
"PaiEasChatEndpoint",
|
||||
"QianfanChatEndpoint",
|
||||
"ChatFireworks",
|
||||
"ChatYandexGPT",
|
||||
"ChatBaichuan",
|
||||
"ChatHunyuan",
|
||||
]
|
||||
|
||||
@@ -76,7 +76,7 @@ class LlamaContentFormatter(ContentFormatterBase):
|
||||
return self.format_request_payload(prompt=prompt, model_kwargs=model_kwargs)
|
||||
|
||||
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
|
||||
"""Formats the request according the the chosen api"""
|
||||
"""Formats the request according to the chosen api"""
|
||||
return str.encode(prompt)
|
||||
|
||||
def format_response_payload(self, output: bytes) -> str:
|
||||
@@ -118,7 +118,7 @@ class AzureMLChatOnlineEndpoint(SimpleChatModel):
|
||||
@validator("http_client", always=True, allow_reuse=True)
|
||||
@classmethod
|
||||
def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
"""Validate that api key and python package exist in environment."""
|
||||
endpoint_key = get_from_dict_or_env(
|
||||
values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY"
|
||||
)
|
||||
|
||||
@@ -2,13 +2,13 @@ import hashlib
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, Iterator, List, Mapping, Optional, Type
|
||||
from typing import Any, Dict, Iterator, List, Mapping, Optional, Type, Union
|
||||
|
||||
import requests
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.chat_models.base import BaseChatModel, _generate_from_stream
|
||||
from langchain.pydantic_v1 import Field, root_validator
|
||||
from langchain.pydantic_v1 import Field, SecretStr, root_validator
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
@@ -29,7 +29,7 @@ from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
def _convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
message_dict: Dict[str, Any]
|
||||
if isinstance(message, ChatMessage):
|
||||
message_dict = {"role": message.role, "content": message.content}
|
||||
@@ -69,6 +69,21 @@ def _convert_delta_to_message_chunk(
|
||||
return default_class(content=content)
|
||||
|
||||
|
||||
def _to_secret(value: Union[SecretStr, str]) -> SecretStr:
|
||||
"""Convert a string to a SecretStr if needed."""
|
||||
if isinstance(value, SecretStr):
|
||||
return value
|
||||
return SecretStr(value)
|
||||
|
||||
|
||||
# signature generation
|
||||
def _signature(secret_key: SecretStr, payload: Dict[str, Any], timestamp: int) -> str:
|
||||
input_str = secret_key.get_secret_value() + json.dumps(payload) + str(timestamp)
|
||||
md5 = hashlib.md5()
|
||||
md5.update(input_str.encode("utf-8"))
|
||||
return md5.hexdigest()
|
||||
|
||||
|
||||
class ChatBaichuan(BaseChatModel):
|
||||
"""Baichuan chat models API by Baichuan Intelligent Technology.
|
||||
|
||||
@@ -90,21 +105,25 @@ class ChatBaichuan(BaseChatModel):
|
||||
"""Baichuan custom endpoints"""
|
||||
baichuan_api_key: Optional[str] = None
|
||||
"""Baichuan API Key"""
|
||||
baichuan_secret_key: Optional[str] = None
|
||||
baichuan_secret_key: Optional[SecretStr] = None
|
||||
"""Baichuan Secret Key"""
|
||||
streaming: Optional[bool] = False
|
||||
"""streaming mode."""
|
||||
request_timeout: Optional[int] = 60
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results or not."""
|
||||
request_timeout: int = 60
|
||||
"""request timeout for chat http requests"""
|
||||
|
||||
model = "Baichuan2-53B"
|
||||
"""model name of Baichuan, default is `Baichuan2-53B`."""
|
||||
temperature: float = 0.3
|
||||
"""What sampling temperature to use."""
|
||||
top_k: int = 5
|
||||
"""What search sampling control to use."""
|
||||
top_p: float = 0.85
|
||||
"""What probability mass to use."""
|
||||
with_search_enhance: bool = False
|
||||
"""Whether to use search enhance, default is False."""
|
||||
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Holds any model parameters valid for API call not explicitly specified."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
@@ -149,10 +168,12 @@ class ChatBaichuan(BaseChatModel):
|
||||
"baichuan_api_key",
|
||||
"BAICHUAN_API_KEY",
|
||||
)
|
||||
values["baichuan_secret_key"] = get_from_dict_or_env(
|
||||
values,
|
||||
"baichuan_secret_key",
|
||||
"BAICHUAN_SECRET_KEY",
|
||||
values["baichuan_secret_key"] = _to_secret(
|
||||
get_from_dict_or_env(
|
||||
values,
|
||||
"baichuan_secret_key",
|
||||
"BAICHUAN_SECRET_KEY",
|
||||
)
|
||||
)
|
||||
|
||||
return values
|
||||
@@ -169,15 +190,6 @@ class ChatBaichuan(BaseChatModel):
|
||||
|
||||
return {**normal_params, **self.model_kwargs}
|
||||
|
||||
def _signature(self, data: Dict[str, Any], timestamp: int) -> str:
|
||||
if self.baichuan_secret_key is None:
|
||||
raise ValueError("Baichuan secret key is not set.")
|
||||
|
||||
input_str = self.baichuan_secret_key + json.dumps(data) + str(timestamp)
|
||||
md5 = hashlib.md5()
|
||||
md5.update(input_str.encode("utf-8"))
|
||||
return md5.hexdigest()
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
@@ -224,6 +236,9 @@ class ChatBaichuan(BaseChatModel):
|
||||
run_manager.on_llm_new_token(chunk.content)
|
||||
|
||||
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
|
||||
if self.baichuan_secret_key is None:
|
||||
raise ValueError("Baichuan secret key is not set.")
|
||||
|
||||
parameters = {**self._default_params, **kwargs}
|
||||
|
||||
model = parameters.pop("model")
|
||||
@@ -231,7 +246,7 @@ class ChatBaichuan(BaseChatModel):
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": [convert_message_to_dict(m) for m in messages],
|
||||
"messages": [_convert_message_to_dict(m) for m in messages],
|
||||
"parameters": parameters,
|
||||
}
|
||||
|
||||
@@ -249,7 +264,11 @@ class ChatBaichuan(BaseChatModel):
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {self.baichuan_api_key}",
|
||||
"X-BC-Timestamp": str(timestamp),
|
||||
"X-BC-Signature": self._signature(payload, timestamp),
|
||||
"X-BC-Signature": _signature(
|
||||
secret_key=self.baichuan_secret_key,
|
||||
payload=payload,
|
||||
timestamp=timestamp,
|
||||
),
|
||||
"X-BC-Sign-Algo": "MD5",
|
||||
**headers,
|
||||
},
|
||||
|
||||
@@ -11,7 +11,6 @@ from typing import (
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
@@ -38,12 +37,10 @@ from langchain.schema import (
|
||||
from langchain.schema.language_model import BaseLanguageModel, LanguageModelInput
|
||||
from langchain.schema.messages import (
|
||||
AIMessage,
|
||||
AnyMessage,
|
||||
BaseMessage,
|
||||
BaseMessageChunk,
|
||||
ChatMessage,
|
||||
FunctionMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
from langchain.schema.output import ChatGenerationChunk
|
||||
from langchain.schema.runnable import RunnableConfig
|
||||
@@ -79,7 +76,7 @@ async def _agenerate_from_stream(
|
||||
return ChatResult(generations=[generation])
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
|
||||
class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
"""Base class for Chat models."""
|
||||
|
||||
cache: Optional[bool] = None
|
||||
@@ -116,9 +113,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
|
||||
@property
|
||||
def OutputType(self) -> Any:
|
||||
"""Get the output type for this runnable."""
|
||||
return Union[
|
||||
HumanMessage, AIMessage, ChatMessage, FunctionMessage, SystemMessage
|
||||
]
|
||||
return AnyMessage
|
||||
|
||||
def _convert_input(self, input: LanguageModelInput) -> PromptValue:
|
||||
if isinstance(input, PromptValue):
|
||||
@@ -140,23 +135,20 @@ class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
|
||||
*,
|
||||
stop: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseMessageChunk:
|
||||
) -> BaseMessage:
|
||||
config = config or {}
|
||||
return cast(
|
||||
BaseMessageChunk,
|
||||
cast(
|
||||
ChatGeneration,
|
||||
self.generate_prompt(
|
||||
[self._convert_input(input)],
|
||||
stop=stop,
|
||||
callbacks=config.get("callbacks"),
|
||||
tags=config.get("tags"),
|
||||
metadata=config.get("metadata"),
|
||||
run_name=config.get("run_name"),
|
||||
**kwargs,
|
||||
).generations[0][0],
|
||||
).message,
|
||||
)
|
||||
ChatGeneration,
|
||||
self.generate_prompt(
|
||||
[self._convert_input(input)],
|
||||
stop=stop,
|
||||
callbacks=config.get("callbacks"),
|
||||
tags=config.get("tags"),
|
||||
metadata=config.get("metadata"),
|
||||
run_name=config.get("run_name"),
|
||||
**kwargs,
|
||||
).generations[0][0],
|
||||
).message
|
||||
|
||||
async def ainvoke(
|
||||
self,
|
||||
@@ -165,7 +157,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
|
||||
*,
|
||||
stop: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseMessageChunk:
|
||||
) -> BaseMessage:
|
||||
config = config or {}
|
||||
llm_result = await self.agenerate_prompt(
|
||||
[self._convert_input(input)],
|
||||
@@ -176,9 +168,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
|
||||
run_name=config.get("run_name"),
|
||||
**kwargs,
|
||||
)
|
||||
return cast(
|
||||
BaseMessageChunk, cast(ChatGeneration, llm_result.generations[0][0]).message
|
||||
)
|
||||
return cast(ChatGeneration, llm_result.generations[0][0]).message
|
||||
|
||||
def stream(
|
||||
self,
|
||||
@@ -190,7 +180,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
|
||||
) -> Iterator[BaseMessageChunk]:
|
||||
if type(self)._stream == BaseChatModel._stream:
|
||||
# model doesn't implement streaming, so use default implementation
|
||||
yield self.invoke(input, config=config, stop=stop, **kwargs)
|
||||
yield cast(
|
||||
BaseMessageChunk, self.invoke(input, config=config, stop=stop, **kwargs)
|
||||
)
|
||||
else:
|
||||
config = config or {}
|
||||
messages = self._convert_input(input).to_messages()
|
||||
@@ -241,7 +233,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
|
||||
) -> AsyncIterator[BaseMessageChunk]:
|
||||
if type(self)._astream == BaseChatModel._astream:
|
||||
# model doesn't implement streaming, so use default implementation
|
||||
yield self.invoke(input, config=config, stop=stop, **kwargs)
|
||||
yield cast(
|
||||
BaseMessageChunk, self.invoke(input, config=config, stop=stop, **kwargs)
|
||||
)
|
||||
else:
|
||||
config = config or {}
|
||||
messages = self._convert_input(input).to_messages()
|
||||
|
||||
325
libs/langchain/langchain/chat_models/hunyuan.py
Normal file
325
libs/langchain/langchain/chat_models/hunyuan.py
Normal file
@@ -0,0 +1,325 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, Iterator, List, Mapping, Optional, Type, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
||||
from langchain.chat_models.base import BaseChatModel, _generate_from_stream
|
||||
from langchain.pydantic_v1 import Field, SecretStr, root_validator
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ChatGeneration,
|
||||
ChatMessage,
|
||||
ChatResult,
|
||||
HumanMessage,
|
||||
)
|
||||
from langchain.schema.messages import (
|
||||
AIMessageChunk,
|
||||
BaseMessageChunk,
|
||||
ChatMessageChunk,
|
||||
HumanMessageChunk,
|
||||
)
|
||||
from langchain.schema.output import ChatGenerationChunk
|
||||
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_HUNYUAN_API_BASE = "https://hunyuan.cloud.tencent.com"
|
||||
DEFAULT_HUNYUAN_PATH = "/hyllm/v1/chat/completions"
|
||||
|
||||
|
||||
def _convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
message_dict: Dict[str, Any]
|
||||
if isinstance(message, ChatMessage):
|
||||
message_dict = {"role": message.role, "content": message.content}
|
||||
elif isinstance(message, HumanMessage):
|
||||
message_dict = {"role": "user", "content": message.content}
|
||||
elif isinstance(message, AIMessage):
|
||||
message_dict = {"role": "assistant", "content": message.content}
|
||||
else:
|
||||
raise TypeError(f"Got unknown type {message}")
|
||||
|
||||
return message_dict
|
||||
|
||||
|
||||
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
|
||||
role = _dict["role"]
|
||||
if role == "user":
|
||||
return HumanMessage(content=_dict["content"])
|
||||
elif role == "assistant":
|
||||
return AIMessage(content=_dict.get("content", "") or "")
|
||||
else:
|
||||
return ChatMessage(content=_dict["content"], role=role)
|
||||
|
||||
|
||||
def _convert_delta_to_message_chunk(
|
||||
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
|
||||
) -> BaseMessageChunk:
|
||||
role = _dict.get("role")
|
||||
content = _dict.get("content") or ""
|
||||
|
||||
if role == "user" or default_class == HumanMessageChunk:
|
||||
return HumanMessageChunk(content=content)
|
||||
elif role == "assistant" or default_class == AIMessageChunk:
|
||||
return AIMessageChunk(content=content)
|
||||
elif role or default_class == ChatMessageChunk:
|
||||
return ChatMessageChunk(content=content, role=role)
|
||||
else:
|
||||
return default_class(content=content)
|
||||
|
||||
|
||||
# signature generation
|
||||
# https://cloud.tencent.com/document/product/1729/97732#532252ce-e960-48a7-8821-940a9ce2ccf3
|
||||
def _signature(secret_key: SecretStr, url: str, payload: Dict[str, Any]) -> str:
|
||||
sorted_keys = sorted(payload.keys())
|
||||
|
||||
url_info = urlparse(url)
|
||||
|
||||
sign_str = url_info.netloc + url_info.path + "?"
|
||||
|
||||
for key in sorted_keys:
|
||||
value = payload[key]
|
||||
|
||||
if isinstance(value, list) or isinstance(value, dict):
|
||||
value = json.dumps(value, separators=(",", ":"))
|
||||
elif isinstance(value, float):
|
||||
value = "%g" % value
|
||||
|
||||
sign_str = sign_str + key + "=" + str(value) + "&"
|
||||
|
||||
sign_str = sign_str[:-1]
|
||||
|
||||
hmacstr = hmac.new(
|
||||
key=secret_key.get_secret_value().encode("utf-8"),
|
||||
msg=sign_str.encode("utf-8"),
|
||||
digestmod=hashlib.sha1,
|
||||
).digest()
|
||||
|
||||
return base64.b64encode(hmacstr).decode("utf-8")
|
||||
|
||||
|
||||
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
|
||||
generations = []
|
||||
for choice in response["choices"]:
|
||||
message = _convert_dict_to_message(choice["messages"])
|
||||
generations.append(ChatGeneration(message=message))
|
||||
|
||||
token_usage = response["usage"]
|
||||
llm_output = {"token_usage": token_usage}
|
||||
return ChatResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
|
||||
def _to_secret(value: Union[SecretStr, str]) -> SecretStr:
|
||||
"""Convert a string to a SecretStr if needed."""
|
||||
if isinstance(value, SecretStr):
|
||||
return value
|
||||
return SecretStr(value)
|
||||
|
||||
|
||||
class ChatHunyuan(BaseChatModel):
|
||||
"""Tencent Hunyuan chat models API by Tencent.
|
||||
|
||||
For more information, see https://cloud.tencent.com/document/product/1729
|
||||
"""
|
||||
|
||||
@property
|
||||
def lc_secrets(self) -> Dict[str, str]:
|
||||
return {
|
||||
"hunyuan_app_id": "HUNYUAN_APP_ID",
|
||||
"hunyuan_secret_id": "HUNYUAN_SECRET_ID",
|
||||
"hunyuan_secret_key": "HUNYUAN_SECRET_KEY",
|
||||
}
|
||||
|
||||
@property
|
||||
def lc_serializable(self) -> bool:
|
||||
return True
|
||||
|
||||
hunyuan_api_base: str = "https://hunyuan.cloud.tencent.com"
|
||||
"""Hunyuan custom endpoints"""
|
||||
hunyuan_app_id: Optional[str] = None
|
||||
"""Hunyuan App ID"""
|
||||
hunyuan_secret_id: Optional[str] = None
|
||||
"""Hunyuan Secret ID"""
|
||||
hunyuan_secret_key: Optional[SecretStr] = None
|
||||
"""Hunyuan Secret Key"""
|
||||
streaming: bool = False
|
||||
"""Whether to stream the results or not."""
|
||||
request_timeout: int = 60
|
||||
"""Timeout for requests to Hunyuan API. Default is 60 seconds."""
|
||||
|
||||
query_id: Optional[str] = None
|
||||
"""Query id for troubleshooting"""
|
||||
temperature: float = 1.0
|
||||
"""What sampling temperature to use."""
|
||||
top_p: float = 1.0
|
||||
"""What probability mass to use."""
|
||||
|
||||
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Holds any model parameters valid for API call not explicitly specified."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
allow_population_by_field_name = True
|
||||
|
||||
@root_validator(pre=True)
|
||||
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Build extra kwargs from additional params that were passed in."""
|
||||
all_required_field_names = get_pydantic_field_names(cls)
|
||||
extra = values.get("model_kwargs", {})
|
||||
for field_name in list(values):
|
||||
if field_name in extra:
|
||||
raise ValueError(f"Found {field_name} supplied twice.")
|
||||
if field_name not in all_required_field_names:
|
||||
logger.warning(
|
||||
f"""WARNING! {field_name} is not default parameter.
|
||||
{field_name} was transferred to model_kwargs.
|
||||
Please confirm that {field_name} is what you intended."""
|
||||
)
|
||||
extra[field_name] = values.pop(field_name)
|
||||
|
||||
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
|
||||
if invalid_model_kwargs:
|
||||
raise ValueError(
|
||||
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
|
||||
f"Instead they were passed in as part of `model_kwargs` parameter."
|
||||
)
|
||||
|
||||
values["model_kwargs"] = extra
|
||||
return values
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
values["hunyuan_api_base"] = get_from_dict_or_env(
|
||||
values,
|
||||
"hunyuan_api_base",
|
||||
"HUNYUAN_API_BASE",
|
||||
)
|
||||
values["hunyuan_app_id"] = get_from_dict_or_env(
|
||||
values,
|
||||
"hunyuan_app_id",
|
||||
"HUNYUAN_APP_ID",
|
||||
)
|
||||
values["hunyuan_secret_id"] = get_from_dict_or_env(
|
||||
values,
|
||||
"hunyuan_secret_id",
|
||||
"HUNYUAN_SECRET_ID",
|
||||
)
|
||||
values["hunyuan_secret_key"] = _to_secret(
|
||||
get_from_dict_or_env(
|
||||
values,
|
||||
"hunyuan_secret_key",
|
||||
"HUNYUAN_SECRET_KEY",
|
||||
)
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling Hunyuan API."""
|
||||
normal_params = {
|
||||
"app_id": self.hunyuan_app_id,
|
||||
"secret_id": self.hunyuan_secret_id,
|
||||
"temperature": self.temperature,
|
||||
"top_p": self.top_p,
|
||||
}
|
||||
|
||||
if self.query_id is not None:
|
||||
normal_params["query_id"] = self.query_id
|
||||
|
||||
return {**normal_params, **self.model_kwargs}
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
if self.streaming:
|
||||
stream_iter = self._stream(
|
||||
messages=messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
return _generate_from_stream(stream_iter)
|
||||
|
||||
res = self._chat(messages, **kwargs)
|
||||
|
||||
response = res.json()
|
||||
|
||||
if "error" in response:
|
||||
raise ValueError(f"Error from Hunyuan api response: {response}")
|
||||
|
||||
return _create_chat_result(response)
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[ChatGenerationChunk]:
|
||||
res = self._chat(messages, **kwargs)
|
||||
|
||||
default_chunk_class = AIMessageChunk
|
||||
for chunk in res.iter_lines():
|
||||
response = json.loads(chunk)
|
||||
if "error" in response:
|
||||
raise ValueError(f"Error from Hunyuan api response: {response}")
|
||||
|
||||
for choice in response["choices"]:
|
||||
chunk = _convert_delta_to_message_chunk(
|
||||
choice["delta"], default_chunk_class
|
||||
)
|
||||
default_chunk_class = chunk.__class__
|
||||
yield ChatGenerationChunk(message=chunk)
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(chunk.content)
|
||||
|
||||
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
|
||||
if self.hunyuan_secret_key is None:
|
||||
raise ValueError("Hunyuan secret key is not set.")
|
||||
|
||||
parameters = {**self._default_params, **kwargs}
|
||||
|
||||
headers = parameters.pop("headers", {})
|
||||
timestamp = parameters.pop("timestamp", int(time.time()))
|
||||
expired = parameters.pop("expired", timestamp + 24 * 60 * 60)
|
||||
|
||||
payload = {
|
||||
"timestamp": timestamp,
|
||||
"expired": expired,
|
||||
"messages": [_convert_message_to_dict(m) for m in messages],
|
||||
**parameters,
|
||||
}
|
||||
|
||||
if self.streaming:
|
||||
payload["stream"] = 1
|
||||
|
||||
url = self.hunyuan_api_base + DEFAULT_HUNYUAN_PATH
|
||||
|
||||
res = requests.post(
|
||||
url=url,
|
||||
timeout=self.request_timeout,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": _signature(
|
||||
secret_key=self.hunyuan_secret_key, url=url, payload=payload
|
||||
),
|
||||
**headers,
|
||||
},
|
||||
json=payload,
|
||||
stream=self.streaming,
|
||||
)
|
||||
return res
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "hunyuan-chat"
|
||||
324
libs/langchain/langchain/chat_models/pai_eas_endpoint.py
Normal file
324
libs/langchain/langchain/chat_models/pai_eas_endpoint.py
Normal file
@@ -0,0 +1,324 @@
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from functools import partial
|
||||
from typing import Any, AsyncIterator, Dict, List, Optional
|
||||
|
||||
import requests
|
||||
|
||||
from langchain.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.llms.utils import enforce_stop_tokens
|
||||
from langchain.pydantic_v1 import root_validator
|
||||
from langchain.schema import ChatGeneration, ChatResult
|
||||
from langchain.schema.messages import (
|
||||
AIMessage,
|
||||
AIMessageChunk,
|
||||
BaseMessage,
|
||||
ChatMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
from langchain.schema.output import ChatGenerationChunk
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PaiEasChatEndpoint(BaseChatModel):
|
||||
"""Eas LLM Service chat model API.
|
||||
|
||||
To use, must have a deployed eas chat llm service on AliCloud. One can set the
|
||||
environment variable ``eas_service_url`` and ``eas_service_token`` set with your eas
|
||||
service url and service token.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.chat_models import PaiEasChatEndpoint
|
||||
eas_chat_endpoint = PaiEasChatEndpoint(
|
||||
eas_service_url="your_service_url",
|
||||
eas_service_token="your_service_token"
|
||||
)
|
||||
"""
|
||||
|
||||
"""PAI-EAS Service URL"""
|
||||
eas_service_url: str
|
||||
|
||||
"""PAI-EAS Service TOKEN"""
|
||||
eas_service_token: str
|
||||
|
||||
"""PAI-EAS Service Infer Params"""
|
||||
max_new_tokens: Optional[int] = 512
|
||||
temperature: Optional[float] = 0.8
|
||||
top_p: Optional[float] = 0.1
|
||||
top_k: Optional[int] = 10
|
||||
do_sample: Optional[bool] = False
|
||||
use_cache: Optional[bool] = True
|
||||
stop_sequences: Optional[List[str]] = None
|
||||
|
||||
"""Enable stream chat mode."""
|
||||
streaming: bool = False
|
||||
|
||||
"""Key/value arguments to pass to the model. Reserved for future use"""
|
||||
model_kwargs: Optional[dict] = None
|
||||
|
||||
version: Optional[str] = "2.0"
|
||||
|
||||
timeout: Optional[int] = 5000
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
values["eas_service_url"] = get_from_dict_or_env(
|
||||
values, "eas_service_url", "EAS_SERVICE_URL"
|
||||
)
|
||||
values["eas_service_token"] = get_from_dict_or_env(
|
||||
values, "eas_service_token", "EAS_SERVICE_TOKEN"
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
"""Get the identifying parameters."""
|
||||
_model_kwargs = self.model_kwargs or {}
|
||||
return {
|
||||
"eas_service_url": self.eas_service_url,
|
||||
"eas_service_token": self.eas_service_token,
|
||||
**{"model_kwargs": _model_kwargs},
|
||||
}
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of llm."""
|
||||
return "pai_eas_chat_endpoint"
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling Cohere API."""
|
||||
return {
|
||||
"max_new_tokens": self.max_new_tokens,
|
||||
"temperature": self.temperature,
|
||||
"top_k": self.top_k,
|
||||
"top_p": self.top_p,
|
||||
"stop_sequences": [],
|
||||
"do_sample": self.do_sample,
|
||||
"use_cache": self.use_cache,
|
||||
}
|
||||
|
||||
def _invocation_params(
|
||||
self, stop_sequences: Optional[List[str]], **kwargs: Any
|
||||
) -> dict:
|
||||
params = self._default_params
|
||||
if self.model_kwargs:
|
||||
params.update(self.model_kwargs)
|
||||
if self.stop_sequences is not None and stop_sequences is not None:
|
||||
raise ValueError("`stop` found in both the input and default params.")
|
||||
elif self.stop_sequences is not None:
|
||||
params["stop"] = self.stop_sequences
|
||||
else:
|
||||
params["stop"] = stop_sequences
|
||||
return {**params, **kwargs}
|
||||
|
||||
def format_request_payload(
|
||||
self, messages: List[BaseMessage], **model_kwargs: Any
|
||||
) -> dict:
|
||||
prompt: Dict[str, Any] = {}
|
||||
user_content: List[str] = []
|
||||
assistant_content: List[str] = []
|
||||
|
||||
for message in messages:
|
||||
"""Converts message to a dict according to role"""
|
||||
if isinstance(message, HumanMessage):
|
||||
user_content = user_content + [message.content]
|
||||
elif isinstance(message, AIMessage):
|
||||
assistant_content = assistant_content + [message.content]
|
||||
elif isinstance(message, SystemMessage):
|
||||
prompt["system_prompt"] = message.content
|
||||
elif isinstance(message, ChatMessage) and message.role in [
|
||||
"user",
|
||||
"assistant",
|
||||
"system",
|
||||
]:
|
||||
if message.role == "system":
|
||||
prompt["system_prompt"] = message.content
|
||||
elif message.role == "user":
|
||||
user_content = user_content + [message.content]
|
||||
elif message.role == "assistant":
|
||||
assistant_content = assistant_content + [message.content]
|
||||
else:
|
||||
supported = ",".join([role for role in ["user", "assistant", "system"]])
|
||||
raise ValueError(
|
||||
f"""Received unsupported role.
|
||||
Supported roles for the LLaMa Foundation Model: {supported}"""
|
||||
)
|
||||
prompt["prompt"] = user_content[len(user_content) - 1]
|
||||
history = [
|
||||
history_item
|
||||
for _, history_item in enumerate(zip(user_content[:-1], assistant_content))
|
||||
]
|
||||
|
||||
prompt["history"] = history
|
||||
|
||||
return {**prompt, **model_kwargs}
|
||||
|
||||
def _format_response_payload(
|
||||
self, output: bytes, stop_sequences: Optional[List[str]]
|
||||
) -> str:
|
||||
"""Formats response"""
|
||||
try:
|
||||
text = json.loads(output)["response"]
|
||||
if stop_sequences:
|
||||
text = enforce_stop_tokens(text, stop_sequences)
|
||||
return text
|
||||
except Exception as e:
|
||||
if isinstance(e, json.decoder.JSONDecodeError):
|
||||
return output.decode("utf-8")
|
||||
raise e
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs)
|
||||
message = AIMessage(content=output_str)
|
||||
generation = ChatGeneration(message=message)
|
||||
return ChatResult(generations=[generation])
|
||||
|
||||
def _call(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
params = self._invocation_params(stop, **kwargs)
|
||||
|
||||
request_payload = self.format_request_payload(messages, **params)
|
||||
response_payload = self._call_eas(request_payload)
|
||||
generated_text = self._format_response_payload(response_payload, params["stop"])
|
||||
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(generated_text)
|
||||
|
||||
return generated_text
|
||||
|
||||
def _call_eas(self, query_body: dict) -> Any:
|
||||
"""Generate text from the eas service."""
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"Authorization": f"{self.eas_service_token}",
|
||||
}
|
||||
|
||||
# make request
|
||||
response = requests.post(
|
||||
self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(
|
||||
f"Request failed with status code {response.status_code}"
|
||||
f" and message {response.text}"
|
||||
)
|
||||
|
||||
return response.text
|
||||
|
||||
def _call_eas_stream(self, query_body: dict) -> Any:
|
||||
"""Generate text from the eas service."""
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"Authorization": f"{self.eas_service_token}",
|
||||
}
|
||||
|
||||
# make request
|
||||
response = requests.post(
|
||||
self.eas_service_url, headers=headers, json=query_body, timeout=self.timeout
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(
|
||||
f"Request failed with status code {response.status_code}"
|
||||
f" and message {response.text}"
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
def _convert_chunk_to_message_message(
|
||||
self,
|
||||
chunk: str,
|
||||
) -> AIMessageChunk:
|
||||
data = json.loads(chunk.encode("utf-8"))
|
||||
return AIMessageChunk(content=data.get("response", ""))
|
||||
|
||||
async def _astream(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[ChatGenerationChunk]:
|
||||
params = self._invocation_params(stop, **kwargs)
|
||||
|
||||
request_payload = self.format_request_payload(messages, **params)
|
||||
request_payload["use_stream_chat"] = True
|
||||
|
||||
response = self._call_eas_stream(request_payload)
|
||||
for chunk in response.iter_lines(
|
||||
chunk_size=8192, decode_unicode=False, delimiter=b"\0"
|
||||
):
|
||||
if chunk:
|
||||
content = self._convert_chunk_to_message_message(chunk)
|
||||
|
||||
# identify stop sequence in generated text, if any
|
||||
stop_seq_found: Optional[str] = None
|
||||
for stop_seq in params["stop"]:
|
||||
if stop_seq in content.content:
|
||||
stop_seq_found = stop_seq
|
||||
|
||||
# identify text to yield
|
||||
text: Optional[str] = None
|
||||
if stop_seq_found:
|
||||
content.content = content.content[
|
||||
: content.content.index(stop_seq_found)
|
||||
]
|
||||
|
||||
# yield text, if any
|
||||
if text:
|
||||
if run_manager:
|
||||
await run_manager.on_llm_new_token(content.content)
|
||||
yield ChatGenerationChunk(message=content)
|
||||
|
||||
# break if stop sequence found
|
||||
if stop_seq_found:
|
||||
break
|
||||
|
||||
async def _agenerate(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
stream: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
if stream if stream is not None else self.streaming:
|
||||
generation: Optional[ChatGenerationChunk] = None
|
||||
async for chunk in self._astream(
|
||||
messages=messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
):
|
||||
generation = chunk
|
||||
assert generation is not None
|
||||
return ChatResult(generations=[generation])
|
||||
|
||||
func = partial(
|
||||
self._generate, messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
)
|
||||
return await asyncio.get_event_loop().run_in_executor(None, func)
|
||||
@@ -10,6 +10,7 @@ from typing import (
|
||||
Mapping,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
from requests.exceptions import HTTPError
|
||||
@@ -153,7 +154,7 @@ def _create_retry_decorator(
|
||||
|
||||
def _convert_delta_to_message_chunk(
|
||||
_dict: Mapping[str, Any],
|
||||
default_class: type[BaseMessageChunk],
|
||||
default_class: Type[BaseMessageChunk],
|
||||
length: int,
|
||||
) -> BaseMessageChunk:
|
||||
role = _dict.get("role")
|
||||
@@ -318,7 +319,14 @@ class ChatTongyi(BaseChatModel):
|
||||
)
|
||||
return _generate_from_stream(stream_iter)
|
||||
|
||||
if not messages:
|
||||
raise ValueError("No messages provided.")
|
||||
|
||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
||||
|
||||
if message_dicts[-1]["role"] != "user":
|
||||
raise ValueError("Last message should be user message.")
|
||||
|
||||
params = {**params, **kwargs}
|
||||
response = self.completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
@@ -374,7 +382,7 @@ class ChatTongyi(BaseChatModel):
|
||||
def _client_params(self) -> Dict[str, Any]:
|
||||
"""Get the parameters used for the openai client."""
|
||||
creds: Dict[str, Any] = {
|
||||
"dashscope_api_key": self.dashscope_api_key,
|
||||
"api_key": self.dashscope_api_key,
|
||||
}
|
||||
return {**self._default_params, **creds}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry==$POETRY_VERSION
|
||||
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
&& ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install poetry
|
||||
run: pipx install poetry==$POETRY_VERSION
|
||||
- name: Set up Python 3.10
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user